2 * arch/arm/mach-imx/pcie.c
4 * PCIe host controller driver for IMX6 SOCs
6 * Copyright (C) 2012 Freescale Semiconductor, Inc. All Rights Reserved.
7 * Copyright (C) 2013 Tim Harvey <tharvey@gateworks.com>
9 * Bits taken from arch/arm/mach-dove/pcie.c
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/gpio.h>
35 #include <linux/platform_device.h>
37 #include <linux/of_platform.h>
38 #include <linux/of_address.h>
39 #include <linux/of_gpio.h>
41 #include <asm/signal.h>
42 #include <asm/mach/pci.h>
43 #include <asm/sizes.h>
48 #define PCIE_ARB_BASE_ADDR 0x01000000
49 #define PCIE_ARB_END_ADDR 0x01FFFFFF
50 #define PCIE_RC_IOBLSSR 0x1c
52 /* Register Definitions */
53 #define PRT_LOG_R_BaseAddress 0x700
56 /* Debug Register 0 */
57 #define DB_R0 (PRT_LOG_R_BaseAddress + 0x28)
58 #define DB_R0_RegisterSize 32
59 #define DB_R0_RegisterResetValue 0x0
60 #define DB_R0_RegisterResetMask 0xFFFFFFFF
61 /* End of Register Definition for DB_R0 */
64 /* Debug Register 1 */
65 #define DB_R1 (PRT_LOG_R_BaseAddress + 0x2c)
66 #define DB_R1_RegisterSize 32
67 #define DB_R1_RegisterResetValue 0x0
68 #define DB_R1_RegisterResetMask 0xFFFFFFFF
69 /* End of Register Definition for DB_R1 */
71 #define PCIE_PL_MSICA 0x820
72 #define PCIE_PL_MSICUA 0x824
73 #define PCIE_PL_MSIC_INT 0x828
75 #define MSIC_INT_EN 0x0
76 #define MSIC_INT_MASK 0x4
77 #define MSIC_INT_STATUS 0x8
79 #define ATU_R_BaseAddress 0x900
80 #define ATU_VIEWPORT_R (ATU_R_BaseAddress + 0x0)
81 #define ATU_REGION_CTRL1_R (ATU_R_BaseAddress + 0x4)
82 #define ATU_REGION_CTRL2_R (ATU_R_BaseAddress + 0x8)
83 #define ATU_REGION_LOWBASE_R (ATU_R_BaseAddress + 0xC)
84 #define ATU_REGION_UPBASE_R (ATU_R_BaseAddress + 0x10)
85 #define ATU_REGION_LIMIT_ADDR_R (ATU_R_BaseAddress + 0x14)
86 #define ATU_REGION_LOW_TRGT_ADDR_R (ATU_R_BaseAddress + 0x18)
87 #define ATU_REGION_UP_TRGT_ADDR_R (ATU_R_BaseAddress + 0x1C)
90 #define IOMUXC_GPR_BASE_ADDR 0x020E0000
91 #define IOMUXC_GPR1 (imx_pcie.gpr_base + 0x04)
92 #define IOMUXC_GPR8 (imx_pcie.gpr_base + 0x20)
93 #define IOMUXC_GPR12 (imx_pcie.gpr_base + 0x30)
94 /* GPR1: iomuxc_gpr1_pcie_ref_clk_en(iomuxc_gpr1[16]) */
95 #define iomuxc_gpr1_pcie_ref_clk_en (1 << 16)
96 /* GPR1: iomuxc_gpr1_test_powerdown(iomuxc_gpr1_18) */
97 #define iomuxc_gpr1_test_powerdown (1 << 18)
98 /* GPR12: iomuxc_gpr12_los_level(iomuxc_gpr12[8:4]) */
99 #define iomuxc_gpr12_los_level (0x1F << 4)
100 /* GPR12: iomuxc_gpr12_app_ltssm_enable(iomuxc_gpr12[10]) */
101 #define iomuxc_gpr12_app_ltssm_enable (1 << 10)
102 /* GPR12: iomuxc_gpr12_device_type(iomuxc_gpr12[15:12]) */
103 #define iomuxc_gpr12_device_type (0xF << 12)
104 /* GPR8: iomuxc_gpr8_tx_deemph_gen1(iomuxc_gpr8[5:0]) */
105 #define iomuxc_gpr8_tx_deemph_gen1 (0x3F << 0)
106 /* GPR8: iomuxc_gpr8_tx_deemph_gen2_3p5db(iomuxc_gpr8[11:6]) */
107 #define iomuxc_gpr8_tx_deemph_gen2_3p5db (0x3F << 6)
108 /* GPR8: iomuxc_gpr8_tx_deemph_gen2_6db(iomuxc_gpr8[17:12]) */
109 #define iomuxc_gpr8_tx_deemph_gen2_6db (0x3F << 12)
110 /* GPR8: iomuxc_gpr8_tx_swing_full(iomuxc_gpr8[24:18]) */
111 #define iomuxc_gpr8_tx_swing_full (0x7F << 18)
112 /* GPR8: iomuxc_gpr8_tx_swing_low(iomuxc_gpr8[31:25]) */
113 #define iomuxc_gpr8_tx_swing_low (0x7F << 25)
115 /* Registers of PHY */
116 /* Register PHY_STS_R */
117 /* PHY Status Register */
118 #define PHY_STS_R (PRT_LOG_R_BaseAddress + 0x110)
120 /* Register PHY_CTRL_R */
121 /* PHY Control Register */
122 #define PHY_CTRL_R (PRT_LOG_R_BaseAddress + 0x114)
124 #define SSP_CR_SUP_DIG_MPLL_OVRD_IN_LO 0x0011
125 /* FIELD: RES_ACK_IN_OVRD [15:15]
126 // FIELD: RES_ACK_IN [14:14]
127 // FIELD: RES_REQ_IN_OVRD [13:13]
128 // FIELD: RES_REQ_IN [12:12]
129 // FIELD: RTUNE_REQ_OVRD [11:11]
130 // FIELD: RTUNE_REQ [10:10]
131 // FIELD: MPLL_MULTIPLIER_OVRD [9:9]
132 // FIELD: MPLL_MULTIPLIER [8:2]
133 // FIELD: MPLL_EN_OVRD [1:1]
134 // FIELD: MPLL_EN [0:0]
137 #define SSP_CR_SUP_DIG_ATEOVRD 0x0010
138 /* FIELD: ateovrd_en [2:2]
139 // FIELD: ref_usb2_en [1:1]
140 // FIELD: ref_clkdiv2 [0:0]
143 #define SSP_CR_LANE0_DIG_RX_OVRD_IN_LO 0x1005
144 /* FIELD: RX_LOS_EN_OVRD [13:13]
145 // FIELD: RX_LOS_EN [12:12]
146 // FIELD: RX_TERM_EN_OVRD [11:11]
147 // FIELD: RX_TERM_EN [10:10]
148 // FIELD: RX_BIT_SHIFT_OVRD [9:9]
149 // FIELD: RX_BIT_SHIFT [8:8]
150 // FIELD: RX_ALIGN_EN_OVRD [7:7]
151 // FIELD: RX_ALIGN_EN [6:6]
152 // FIELD: RX_DATA_EN_OVRD [5:5]
153 // FIELD: RX_DATA_EN [4:4]
154 // FIELD: RX_PLL_EN_OVRD [3:3]
155 // FIELD: RX_PLL_EN [2:2]
156 // FIELD: RX_INVERT_OVRD [1:1]
157 // FIELD: RX_INVERT [0:0]
160 #define SSP_CR_LANE0_DIG_RX_ASIC_OUT 0x100D
162 // FIELD: PLL_STATE [1:1]
163 // FIELD: VALID [0:0]
166 /* control bus bit definition */
167 #define PCIE_CR_CTL_DATA_LOC 0
168 #define PCIE_CR_CTL_CAP_ADR_LOC 16
169 #define PCIE_CR_CTL_CAP_DAT_LOC 17
170 #define PCIE_CR_CTL_WR_LOC 18
171 #define PCIE_CR_CTL_RD_LOC 19
172 #define PCIE_CR_STAT_DATA_LOC 0
173 #define PCIE_CR_STAT_ACK_LOC 16
175 #define PCIE_CAP_STRUC_BaseAddress 0x70
177 /* Register LNK_CAP */
179 #define LNK_CAP (PCIE_CAP_STRUC_BaseAddress + 0xc)
181 /* End of Register Definitions */
191 struct imx_pcie_port {
195 void __iomem *dbi_base;
196 spinlock_t conf_lock;
198 char io_space_name[16];
199 char mem_space_name[16];
201 struct resource res[2];
205 struct imx_pcie_info {
206 struct imx_pcie_port imx_pcie_port[1];
210 void __iomem *dbi_base;
211 void __iomem *gpr_base;
213 unsigned int pcie_pwr_en;
214 unsigned int pcie_rst;
215 unsigned int pcie_wake_up;
216 unsigned int pcie_dis;
219 static struct imx_pcie_info imx_pcie;
221 static int pcie_phy_cr_read(int addr, int *data);
222 static int pcie_phy_cr_write(int addr, int data);
223 static void change_field(int *in, int start, int end, int val);
225 /* IMX PCIE GPR configure routines */
226 static inline void imx_pcie_clrset(u32 mask, u32 val, void __iomem *addr)
228 writel(((readl(addr) & ~mask) | (val & mask)), addr);
231 static int imx_pcie_setup(int nr, struct pci_sys_data *sys)
233 struct imx_pcie_port *pp;
235 if (nr >= imx_pcie.num_pcie_ports)
238 pp = &imx_pcie.imx_pcie_port[nr];
239 pp->root_bus_nr = sys->busnr;
244 snprintf(pp->io_space_name, sizeof(pp->io_space_name),
245 "PCIe %d I/O", pp->index);
246 pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0;
247 pp->res[0].name = pp->io_space_name;
248 if (pp->index == 0) {
249 pp->res[0].start = PCIE_ARB_BASE_ADDR;
250 pp->res[0].end = pp->res[0].start + SZ_1M - 1;
252 pp->res[0].flags = IORESOURCE_IO;
253 if (request_resource(&ioport_resource, &pp->res[0]))
254 panic("Request PCIe IO resource failed\n");
255 pci_add_resource_offset(&sys->resources, &pp->res[0], sys->io_offset);
260 snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
261 "PCIe %d MEM", pp->index);
262 pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
263 pp->res[1].name = pp->mem_space_name;
264 if (pp->index == 0) {
265 pp->res[1].start = PCIE_ARB_BASE_ADDR + SZ_1M;
266 pp->res[1].end = pp->res[1].start + SZ_16M - SZ_2M - 1;
268 pp->res[1].flags = IORESOURCE_MEM;
269 if (request_resource(&iomem_resource, &pp->res[1]))
270 panic("Request PCIe Memory resource failed\n");
271 pci_add_resource_offset(&sys->resources, &pp->res[1], sys->mem_offset);
276 static int imx_pcie_link_up(void __iomem *dbi_base)
278 /* Check the pcie link up or link down */
279 int iterations = 200;
280 u32 rc, ltssm, rx_valid, temp;
283 /* link is debug bit 36 debug 1 start in bit 32 */
284 rc = readl(dbi_base + DB_R1) & (0x1 << (36 - 32)) ;
286 usleep_range(2000, 3000);
288 /* From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
289 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
290 * If (MAC/LTSSM.state == Recovery.RcvrLock)
291 * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
294 pcie_phy_cr_read(SSP_CR_LANE0_DIG_RX_ASIC_OUT, &rx_valid);
295 ltssm = readl(dbi_base + DB_R0) & 0x3F;
296 if ((ltssm == 0x0D) && ((rx_valid & 0x01) == 0)) {
297 pr_info("Transition to gen2 is stuck, reset PHY!\n");
298 pcie_phy_cr_read(SSP_CR_LANE0_DIG_RX_OVRD_IN_LO, &temp);
299 change_field(&temp, 3, 3, 0x1);
300 change_field(&temp, 5, 5, 0x1);
301 pcie_phy_cr_write(SSP_CR_LANE0_DIG_RX_OVRD_IN_LO,
303 usleep_range(2000, 3000);
304 pcie_phy_cr_read(SSP_CR_LANE0_DIG_RX_OVRD_IN_LO, &temp);
305 change_field(&temp, 3, 3, 0x0);
306 change_field(&temp, 5, 5, 0x0);
307 pcie_phy_cr_write(SSP_CR_LANE0_DIG_RX_OVRD_IN_LO,
311 if ((iterations < 0))
312 pr_info("link up failed, DB_R0:0x%08x, DB_R1:0x%08x!\n"
313 , readl(dbi_base + DB_R0)
314 , readl(dbi_base + DB_R1));
315 } while (!rc && iterations);
322 static void imx_pcie_regions_setup(void __iomem *dbi_base)
326 unsigned untranslated_base = PCIE_ARB_END_ADDR +1 - SZ_1M;
327 void __iomem *p = dbi_base + PCIE_PL_MSIC_INT;
329 * i.MX6 defines 16MB in the AXI address map for PCIe.
331 * That address space excepted the pcie registers is
332 * split and defined into different regions by iATU,
333 * with sizes and offsets as follows:
335 * 0x0100_0000 --- 0x010F_FFFF 1MB IORESOURCE_IO
336 * 0x0110_0000 --- 0x01EF_FFFF 14MB IORESOURCE_MEM
337 * 0x01F0_0000 --- 0x01FF_FFFF 1MB Cfg + Registers
340 /* CMD reg:I/O space, MEM space, and Bus Master Enable */
341 writel(readl(dbi_base + PCI_COMMAND)
344 | PCI_COMMAND_MASTER,
345 dbi_base + PCI_COMMAND);
347 /* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI */
348 writel(readl(dbi_base + PCI_CLASS_REVISION)
349 | (PCI_CLASS_BRIDGE_PCI << 16),
350 dbi_base + PCI_CLASS_REVISION);
353 * region0-3 outbound used to access target cfg
355 for (bus = 1; bus <= 4; bus++) {
356 writel(bus - 1, dbi_base + ATU_VIEWPORT_R);
357 writel(untranslated_base, dbi_base + ATU_REGION_LOWBASE_R);
358 untranslated_base += (1 << 18);
360 untranslated_base -= (1 << 14); //(remove registers)
361 writel(untranslated_base - 1, dbi_base + ATU_REGION_LIMIT_ADDR_R);
362 writel(0, dbi_base + ATU_REGION_UPBASE_R);
364 writel(bus << 24, dbi_base + ATU_REGION_LOW_TRGT_ADDR_R);
365 writel(0, dbi_base + ATU_REGION_UP_TRGT_ADDR_R);
366 writel((bus > 1) ? CfgRdWr1 : CfgRdWr0,
367 dbi_base + ATU_REGION_CTRL1_R);
368 writel((1<<31), dbi_base + ATU_REGION_CTRL2_R);
371 writel(MSI_MATCH_ADDR, dbi_base + PCIE_PL_MSICA);
372 writel(0, dbi_base + PCIE_PL_MSICUA);
373 for (i = 0; i < 8 ; i++) {
374 writel(0, p + MSIC_INT_EN);
375 writel(0xffffffff, p + MSIC_INT_MASK);
376 writel(0xffffffff, p + MSIC_INT_STATUS);
381 void imx_pcie_mask_irq(unsigned pos, int set)
383 unsigned mask = 1 << (pos & 0x1f);
384 unsigned val, newval;
385 void __iomem *p = imx_pcie.dbi_base + PCIE_PL_MSIC_INT + MSIC_INT_MASK + ((pos >> 5) * 12);
392 newval = val & ~mask;
397 void imx_pcie_enable_irq(unsigned pos, int set)
399 unsigned mask = 1 << (pos & 0x1f);
400 unsigned val, newval;
401 void __iomem *p = imx_pcie.dbi_base + PCIE_PL_MSIC_INT + MSIC_INT_EN + ((pos >> 5) * 12);
408 newval = val & ~mask;
411 if (set && (val != newval))
412 imx_pcie_mask_irq(pos, 0); /* unmask when enabled */
415 unsigned imx_pcie_msi_pending(unsigned index)
418 void __iomem *p = imx_pcie.dbi_base + PCIE_PL_MSIC_INT + (index * 12);
421 val = readl(p + MSIC_INT_STATUS);
422 mask = readl(p + MSIC_INT_MASK);
424 writel(val, p + MSIC_INT_STATUS);
428 static char master_abort(struct pci_bus *bus, u32 devfn, int where)
431 void __iomem *dbi_base = imx_pcie.dbi_base;
434 reg = readl(dbi_base + PCIE_RC_IOBLSSR);
435 if (reg & 0x71000000) {
437 pr_err("%d:%02d.%d 0x%04x: parity error\n", bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where);
439 pr_err("%d:%02d.%d 0x%04x: master abort\n", bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where);
443 pr_err("%d:%02d.%d 0x%04x: target abort\n", bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where);
445 pr_err("%d:%02d.%d 0x%04x: master data parity error\n", bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where);
446 writel(reg, dbi_base + PCIE_RC_IOBLSSR);
447 udelay(1500); // without this delay subsequent reads through bridge can erroneously return 0???
452 static volatile void *get_cfg_addr(struct pci_bus *bus, u32 devfn, int where)
455 void __iomem *base = imx_pcie.base;
456 void __iomem *dbi_base = imx_pcie.dbi_base;
461 return (imx_pcie.dbi_base) + (where & 0x0ffc);
463 if ((devfn > 0xff) || (bus->number > 15))
465 busnum = bus->number - 1;
466 if ((busnum < 3) && (devfn <= 3)) {
467 return (base) + (busnum << 18) + (devfn << 16) + (where & 0xfffc);
469 writel(3, dbi_base + ATU_VIEWPORT_R);
470 writel((bus->number << 24) | (devfn << 16),
471 dbi_base + ATU_REGION_LOW_TRGT_ADDR_R);
472 writel((bus->number > 1) ? CfgRdWr1 : CfgRdWr0,
473 dbi_base + ATU_REGION_CTRL1_R);
474 return (base) + (3 << 18) + (where & 0xfffc);
477 static int imx_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
480 const volatile void *va_address;
484 pr_info("%s: bus=%x, devfn=%x, where=%x size=%x\n", __func__, bus->number, devfn, where, size);
485 va_address = get_cfg_addr(bus, devfn, where);
488 return PCIBIOS_DEVICE_NOT_FOUND;
490 v = readl(va_address);
491 if (master_abort(bus, devfn, where)) {
492 return PCIBIOS_DEVICE_NOT_FOUND;
495 pr_info("%s: bus=%x, devfn=%x, where=%x size=%x v=%x\n", __func__, bus->number, devfn, where, size, v);
498 } else if (size == 1) {
499 *val = (v >> (8 * (where & 3))) & 0xFF;
500 } else if (size == 2) {
501 *val = (v >> (8 * (where & 3))) & 0xFFFF;
504 return PCIBIOS_BAD_REGISTER_NUMBER;
506 return PCIBIOS_SUCCESSFUL;
509 static int imx_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
510 int where, int size, u32 val)
512 volatile void *va_address;
516 pr_info("%s: bus=%x, devfn=%x, where=%x size=%x val=%x\n", __func__, bus->number, devfn, where, size, val);
517 va_address = get_cfg_addr(bus, devfn, where);
519 return PCIBIOS_DEVICE_NOT_FOUND;
521 writel(val, va_address);
522 return (master_abort(bus, devfn, where))
523 ?PCIBIOS_DEVICE_NOT_FOUND:PCIBIOS_SUCCESSFUL;
526 mask = ~(0xFFFF << ((where & 0x3) * 8));
528 mask = ~(0xFF << ((where & 0x3) * 8));
530 return PCIBIOS_BAD_REGISTER_NUMBER;
532 tmp = readl(va_address) & mask;
533 tmp |= val << ((where & 0x3) * 8);
534 writel(tmp, va_address);
535 return (master_abort(bus, devfn, where))
536 ?PCIBIOS_DEVICE_NOT_FOUND:PCIBIOS_SUCCESSFUL;
539 static struct pci_ops imx_pcie_ops = {
540 .read = imx_pcie_rd_conf,
541 .write = imx_pcie_wr_conf,
544 signed short irq_map[] = {
546 MXC_INT_PCIE_3, /* int a */
547 MXC_INT_PCIE_2, /* int b */
548 MXC_INT_PCIE_1, /* int c */
549 MXC_INT_PCIE_0, /* int d/MSI */
552 static int imx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
560 static struct hw_pci imx_pci __initdata = {
562 .setup = imx_pcie_setup,
563 .ops = &imx_pcie_ops,
564 .map_irq = imx_pcie_map_irq,
567 /* PHY CR bus acess routines */
568 static int pcie_phy_cr_ack_polling(int max_iterations, int exp_val)
570 u32 temp_rd_data, wait_counter = 0;
573 temp_rd_data = readl(imx_pcie.dbi_base + PHY_STS_R);
574 temp_rd_data = (temp_rd_data >> PCIE_CR_STAT_ACK_LOC) & 0x1;
576 } while ((wait_counter < max_iterations) && (temp_rd_data != exp_val));
578 if (temp_rd_data != exp_val)
583 static int pcie_phy_cr_cap_addr(int addr)
586 void __iomem *dbi_base = imx_pcie.dbi_base;
589 temp_wr_data = addr << PCIE_CR_CTL_DATA_LOC ;
590 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
593 temp_wr_data |= (0x1 << PCIE_CR_CTL_CAP_ADR_LOC);
594 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
597 if (!pcie_phy_cr_ack_polling(100, 1))
600 /* deassert cap addr */
601 temp_wr_data = addr << PCIE_CR_CTL_DATA_LOC;
602 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
604 /* wait for ack de-assetion */
605 if (!pcie_phy_cr_ack_polling(100, 0))
611 static int pcie_phy_cr_read(int addr , int *data)
613 u32 temp_rd_data, temp_wr_data;
614 void __iomem *dbi_base = imx_pcie.dbi_base;
618 if (!pcie_phy_cr_cap_addr(addr))
621 /* assert rd signal */
622 temp_wr_data = 0x1 << PCIE_CR_CTL_RD_LOC;
623 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
626 if (!pcie_phy_cr_ack_polling(100, 1))
629 /* after got ack return data */
630 temp_rd_data = readl(dbi_base + PHY_STS_R);
631 *data = (temp_rd_data & (0xffff << PCIE_CR_STAT_DATA_LOC)) ;
633 /* deassert rd signal */
635 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
637 /* wait for ack de-assetion */
638 if (!pcie_phy_cr_ack_polling(100, 0))
645 static int pcie_phy_cr_write(int addr, int data)
648 void __iomem *dbi_base = imx_pcie.dbi_base;
652 if (!pcie_phy_cr_cap_addr(addr))
655 temp_wr_data = data << PCIE_CR_CTL_DATA_LOC;
656 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
659 temp_wr_data |= (0x1 << PCIE_CR_CTL_CAP_DAT_LOC);
660 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
663 if (!pcie_phy_cr_ack_polling(100, 1))
666 /* deassert cap data */
667 temp_wr_data = data << PCIE_CR_CTL_DATA_LOC;
668 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
670 /* wait for ack de-assetion */
671 if (!pcie_phy_cr_ack_polling(100, 0))
674 /* assert wr signal */
675 temp_wr_data = 0x1 << PCIE_CR_CTL_WR_LOC;
676 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
679 if (!pcie_phy_cr_ack_polling(100, 1))
682 /* deassert wr signal */
683 temp_wr_data = data << PCIE_CR_CTL_DATA_LOC;
684 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
686 /* wait for ack de-assetion */
687 if (!pcie_phy_cr_ack_polling(100, 0))
691 writel(temp_wr_data, dbi_base + PHY_CTRL_R);
696 static void change_field(int *in, int start, int end, int val)
700 mask = ((0xFFFFFFFF << start) ^ (0xFFFFFFFF << (end + 1))) & 0xFFFFFFFF;
701 *in = (*in & ~mask) | (val << start);
704 static int imx_pcie_enable_controller(struct device *dev)
707 struct device_node *np = dev->of_node;
709 if (gpio_is_valid(imx_pcie.pcie_pwr_en)) {
710 /* Enable PCIE power */
711 gpio_request(imx_pcie.pcie_pwr_en, "PCIE POWER_EN");
713 /* activate PCIE_PWR_EN */
714 gpio_direction_output(imx_pcie.pcie_pwr_en, 1);
718 imx_pcie_clrset(iomuxc_gpr1_test_powerdown, 0 << 18, IOMUXC_GPR1);
720 /* enable the clks */
722 clk = of_clk_get(np, 0);
724 clk = devm_clk_get(dev, "pcie_clk");
726 pr_err("no pcie clock.\n");
730 if (clk_prepare_enable(clk)) {
731 pr_err("can't enable pcie clock.\n");
736 // Enable PCIe PHY ref clock
737 imx_pcie_clrset(iomuxc_gpr1_pcie_ref_clk_en, 1 << 16, IOMUXC_GPR1);
742 static void card_reset(struct device *dev)
744 if (gpio_is_valid(imx_pcie.pcie_rst)) {
746 gpio_request(imx_pcie.pcie_rst, "PCIE RESET");
748 /* activate PERST_B */
749 gpio_direction_output(imx_pcie.pcie_rst, 0);
751 /* Add one reset to the pcie external device */
754 /* deactive PERST_B */
755 gpio_direction_output(imx_pcie.pcie_rst, 1);
759 static void add_pcie_port(struct platform_device *pdev, void __iomem *base, void __iomem *dbi_base)
762 struct device *dev = &pdev->dev;
763 struct device_node *np = dev->of_node;
765 if (imx_pcie_link_up(dbi_base)) {
766 struct imx_pcie_port *pp = &imx_pcie.imx_pcie_port[imx_pcie.num_pcie_ports++];
768 pr_info("IMX PCIe port: link up.\n");
771 pp->root_bus_nr = -1;
773 pp->dbi_base = dbi_base;
774 spin_lock_init(&pp->conf_lock);
775 memset(pp->res, 0, sizeof(pp->res));
777 pr_info("IMX PCIe port: link down!\n");
778 /* Release the clocks, and disable the power */
781 clk = of_clk_get(np, 0);
783 clk = clk_get(NULL, "pcie_clk");
785 pr_err("no pcie clock.\n");
789 clk_disable_unprepare(clk);
792 // Disable the PCIE PHY Ref Clock
793 imx_pcie_clrset(iomuxc_gpr1_pcie_ref_clk_en, 0 << 16, IOMUXC_GPR1);
795 if (gpio_is_valid(imx_pcie.pcie_pwr_en)) {
796 /* Disable PCIE power */
797 gpio_request(imx_pcie.pcie_pwr_en, "PCIE POWER_EN");
799 /* activate PCIE_PWR_EN */
800 gpio_direction_output(imx_pcie.pcie_pwr_en, 0);
803 // Power down PCIE PHY
804 imx_pcie_clrset(iomuxc_gpr1_test_powerdown, 1 << 18, IOMUXC_GPR1);
808 static int imx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
809 struct pt_regs *regs)
812 unsigned long pc = instruction_pointer(regs) - 4;
814 instr = *(unsigned long *)pc;
815 /* imprecise aborts are no longer enabled in 3.7+ during init it would appear.
816 * We now using PCIE_RC_IOBLSSR to detect master abort however we will still get
817 * at least one imprecise abort and need to have a handler.
820 if (instr == 0xf57ff04f) {
823 instr = *(unsigned long *)pc;
825 pr_info("PCIe abort: address = 0x%08lx fsr = 0x%03x PC = 0x%08lx LR = 0x%08lx instr=%08lx\n",
826 addr, fsr, regs->ARM_pc, regs->ARM_lr, instr);
830 * If the instruction being executed was a read,
831 * make it look like it read all-ones.
833 if ((instr & 0x0c500000) == 0x04100000) {
834 /* LDR instruction */
835 int reg = (instr >> 12) & 15;
837 regs->uregs[reg] = -1;
838 regs->ARM_pc = pc + 4;
843 pr_info("PCIe abort: address = 0x%08lx fsr = 0x%03x PC = 0x%08lx LR = 0x%08lx instr=%08lx\n",
844 addr, fsr, regs->ARM_pc, regs->ARM_lr, instr);
851 static int imx_pcie_pltfm_probe(struct platform_device *pdev)
853 struct resource *mem;
854 struct device *dev = &pdev->dev;
855 struct device_node *np = pdev->dev.of_node;
860 dev_err(&pdev->dev, "No of data found\n");
864 res.start = res.end = 0;
865 ret = of_address_to_resource(np, 0, &res);
869 imx_pcie.pcie_pwr_en = of_get_named_gpio(np, "pwren-gpios", 0);
870 imx_pcie.pcie_rst = of_get_named_gpio(np, "rst-gpios", 0);
871 imx_pcie.pcie_wake_up = of_get_named_gpio(np, "wake-gpios", 0);
872 imx_pcie.pcie_dis = of_get_named_gpio(np, "dis-gpios", 0);
873 //pdev->dev.platform_data = pdata;
875 imx_pcie.base = ioremap_nocache(PCIE_ARB_END_ADDR - SZ_1M + 1, SZ_1M - SZ_16K);
876 if (!imx_pcie.base) {
877 pr_err("error with ioremap in function %s\n", __func__);
881 imx_pcie.dbi_base = devm_ioremap(dev, mem->start, resource_size(mem));
882 if (!imx_pcie.dbi_base) {
883 dev_err(dev, "can't map %pR\n", mem);
887 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-iomuxc-gpr");
889 dev_err(dev, "can't find iomux\n");
892 ret = of_address_to_resource(np, 0, &res);
897 imx_pcie.gpr_base = devm_ioremap(dev, mem->start, resource_size(mem));
898 if (!imx_pcie.gpr_base) {
899 dev_err(dev, "can't map %pR\n", mem);
903 // hold LTSSM in detect state
904 imx_pcie_clrset(iomuxc_gpr12_app_ltssm_enable, 0 << 10, IOMUXC_GPR12);
906 /* configure constant input signal to the pcie ctrl and phy */
907 // set device type to RC (PCI_EXP_TYPE_ROOT_PORT=4 is from pcie_regs.h)
908 imx_pcie_clrset(iomuxc_gpr12_device_type, PCI_EXP_TYPE_ROOT_PORT << 12, IOMUXC_GPR12);
909 // loss of signal detect sensitivity function - must be 0x9
910 imx_pcie_clrset(iomuxc_gpr12_los_level, 9 << 4, IOMUXC_GPR12);
911 // not clear what values these should have from RM
912 imx_pcie_clrset(iomuxc_gpr8_tx_deemph_gen1, 0 << 0, IOMUXC_GPR8);
913 imx_pcie_clrset(iomuxc_gpr8_tx_deemph_gen2_3p5db, 0 << 6, IOMUXC_GPR8);
914 imx_pcie_clrset(iomuxc_gpr8_tx_deemph_gen2_6db, 20 << 12, IOMUXC_GPR8);
915 imx_pcie_clrset(iomuxc_gpr8_tx_swing_full, 127 << 18, IOMUXC_GPR8);
916 imx_pcie_clrset(iomuxc_gpr8_tx_swing_low, 127 << 25, IOMUXC_GPR8);
918 /* Enable the pwr, clks and so on */
919 ret = imx_pcie_enable_controller(dev);
923 /* togle the external card's reset */
926 usleep_range(3000, 4000);
927 imx_pcie_regions_setup(imx_pcie.dbi_base);
928 usleep_range(3000, 4000);
931 * Force to GEN1 because of PCIE2USB storage stress tests
932 * would be failed when GEN2 is enabled
934 writel(((readl(imx_pcie.dbi_base + LNK_CAP) & 0xfffffff0) | 0x1),
935 imx_pcie.dbi_base + LNK_CAP);
938 imx_pcie_clrset(iomuxc_gpr12_app_ltssm_enable, 1 << 10, IOMUXC_GPR12);
940 hook_fault_code(16 + 6, imx_pcie_abort_handler, SIGBUS, 0,
941 "imprecise external abort");
943 /* add the pcie port */
944 add_pcie_port(pdev, imx_pcie.base, imx_pcie.dbi_base);
946 pci_common_init(&imx_pci);
954 static int imx_pcie_pltfm_remove(struct platform_device *pdev)
957 struct device *dev = &pdev->dev;
958 struct device_node *np = dev->of_node;
959 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
961 /* Release clocks, and disable power */
963 clk = of_clk_get(np, 0);
965 clk = devm_clk_get(dev, "pcie_clk");
967 pr_err("no pcie clock.\n");
970 clk_disable_unprepare(clk);
974 // disable PCIe PHY clock ref
975 imx_pcie_clrset(iomuxc_gpr1_pcie_ref_clk_en, 0 << 16, IOMUXC_GPR1);
977 if (gpio_is_valid(imx_pcie.pcie_pwr_en)) {
978 /* Disable PCIE power */
979 gpio_request(imx_pcie.pcie_pwr_en, "PCIE POWER_EN");
981 /* activate PCIE_PWR_EN */
982 gpio_direction_output(imx_pcie.pcie_pwr_en, 0);
985 // power down PCIe PHY
986 imx_pcie_clrset(iomuxc_gpr1_test_powerdown, 1 << 18, IOMUXC_GPR1);
988 iounmap(imx_pcie.base);
989 iounmap(imx_pcie.dbi_base);
990 iounmap(imx_pcie.gpr_base);
991 release_mem_region(iomem->start, resource_size(iomem));
992 //platform_set_drvdata(pdev, NULL);
997 static const struct of_device_id of_imx_pcie_match[] = {
998 { .compatible = "fsl,pcie" },
1001 MODULE_DEVICE_TABLE(of, of_imx_pcie_match);
1003 static struct platform_driver imx_pcie_pltfm_driver = {
1006 .owner = THIS_MODULE,
1007 .of_match_table = of_imx_pcie_match,
1009 .probe = imx_pcie_pltfm_probe,
1010 .remove = imx_pcie_pltfm_remove,
1013 /*****************************************************************************\
1015 * Driver init/exit *
1017 \*****************************************************************************/
1019 static int __init imx_pcie_drv_init(void)
1022 pcibios_min_mem = 0;
1024 return platform_driver_register(&imx_pcie_pltfm_driver);
1027 static void __exit imx_pcie_drv_exit(void)
1029 platform_driver_unregister(&imx_pcie_pltfm_driver);
1032 //module_init(imx_pcie_drv_init);
1033 //module_exit(imx_pcie_drv_exit);
1034 late_initcall(imx_pcie_drv_init);
1036 MODULE_DESCRIPTION("i.MX PCIE platform driver");
1037 MODULE_LICENSE("GPL v2");