2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright 2007, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
32 #define SB_ERROR(args)
35 #define SB_MSG(args) printf args
40 typedef uint32(*sb_intrsoff_t) (void *intr_arg);
41 typedef void (*sb_intrsrestore_t) (void *intr_arg, uint32 arg);
42 typedef bool(*sb_intrsenabled_t) (void *intr_arg);
44 typedef struct gpioh_item {
47 gpio_handler_t handler;
49 struct gpioh_item *next;
52 /* misc sb info needed by some of the routines */
53 typedef struct sb_info {
55 struct sb_pub sb; /* back plane public state (must be first field) */
57 void *osh; /* osl os handle */
58 void *sdh; /* bcmsdh handle */
60 void *curmap; /* current regs va */
61 void *regs[SB_MAXCORES]; /* other regs va */
63 uint curidx; /* current core index */
64 uint dev_coreid; /* the core provides driver functions */
66 bool memseg; /* flag to toggle MEM_SEG register */
68 uint gpioidx; /* gpio control core index */
69 uint gpioid; /* gpio control coretype */
71 uint numcores; /* # discovered cores */
72 uint coreid[SB_MAXCORES]; /* id of each core */
74 void *intr_arg; /* interrupt callback function arg */
75 sb_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
76 sb_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
77 sb_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
79 uint8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
82 bool pcie_war_ovr; /* Override ASPM/Clkreq settings */
84 uint8 pmecap_offset; /* PM Capability offset in the config space */
85 bool pmecap; /* Capable of generating PME */
87 gpioh_item_t *gpioh_head; /* GPIO event handlers list */
93 /* local prototypes */
94 static sb_info_t *sb_doattach(sb_info_t * si, uint devid, osl_t * osh,
95 void *regs, uint bustype, void *sdh,
96 char **vars, uint * varsz);
97 static void sb_scan(sb_info_t * si);
98 static uint _sb_coreidx(sb_info_t * si);
99 static uint sb_pcidev2chip(uint pcidev);
100 static uint sb_chip2numcores(uint chip);
101 static bool sb_ispcie(sb_info_t * si);
102 static uint8 sb_find_pci_capability(sb_info_t * si, uint8 req_cap_id,
103 uchar * buf, uint32 * buflen);
104 static int sb_pci_fixcfg(sb_info_t * si);
105 /* routines to access mdio slave device registers */
106 static int sb_pcie_mdiowrite(sb_info_t * si, uint physmedia, uint readdr,
108 static int sb_pcie_mdioread(sb_info_t * si, uint physmedia, uint readdr,
111 /* dev path concatenation util */
112 static char *sb_devpathvar(sb_t * sbh, char *var, int len, const char *name);
115 static void sb_war43448(sb_t * sbh);
116 static void sb_war43448_aspm(sb_t * sbh);
117 static void sb_war32414_forceHT(sb_t * sbh, bool forceHT);
118 static void sb_war30841(sb_info_t * si);
119 static void sb_war42767(sb_t * sbh);
120 static void sb_war42767_clkreq(sb_t * sbh);
122 /* delay needed between the mdio control/ mdiodata register data access */
123 #define PR28829_DELAY() OSL_DELAY(10)
125 /* size that can take bitfielddump */
126 #define BITFIELD_DUMP_SIZE 32
128 /* global variable to indicate reservation/release of gpio's */
129 static uint32 sb_gpioreservation = 0;
131 /* global flag to prevent shared resources from being initialized multiple times in sb_attach() */
132 static bool sb_onetimeinit = FALSE;
134 #define SB_INFO(sbh) (sb_info_t*)(uintptr)sbh
135 #define SET_SBREG(si, r, mask, val) \
136 W_SBREG((si), (r), ((R_SBREG((si), (r)) & ~(mask)) | (val)))
137 #define GOODCOREADDR(x) (((x) >= SB_ENUM_BASE) && ((x) <= SB_ENUM_LIM) && \
138 ISALIGNED((x), SB_CORE_SIZE))
139 #define GOODREGS(regs) ((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
140 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
141 #define BADCOREADDR 0
142 #define GOODIDX(idx) (((uint)idx) < SB_MAXCORES)
143 #define BADIDX (SB_MAXCORES+1)
144 #define NOREV -1 /* Invalid rev */
146 #define PCI(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI))
147 #define PCIE(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE))
148 #define PCMCIA(si) ((BUSTYPE(si->sb.bustype) == PCMCIA_BUS) && (si->memseg == TRUE))
151 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
152 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
154 #define R_SBREG(si, sbr) sb_read_sbreg((si), (sbr))
155 #define W_SBREG(si, sbr, v) sb_write_sbreg((si), (sbr), (v))
156 #define AND_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) & (v)))
157 #define OR_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) | (v)))
160 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
161 * after core switching to avoid invalid register accesss inside ISR.
163 #define INTR_OFF(si, intr_val) \
164 if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
165 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
166 #define INTR_RESTORE(si, intr_val) \
167 if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
168 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
170 /* dynamic clock control defines */
171 #define LPOMINFREQ 25000 /* low power oscillator min */
172 #define LPOMAXFREQ 43000 /* low power oscillator max */
173 #define XTALMINFREQ 19800000 /* 20 MHz - 1% */
174 #define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
175 #define PCIMINFREQ 25000000 /* 25 MHz */
176 #define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
178 #define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
179 #define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
181 /* force HT war check */
182 #define FORCEHT_WAR32414(si) \
183 (((PCIE(si)) && (si->sb.chip == BCM4311_CHIP_ID) && ((si->sb.chiprev <= 1))) || \
184 ((PCI(si) || PCIE(si)) && (si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev <= 3)))
186 #define PCIE_ASPMWARS(si) \
187 ((PCIE(si)) && ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)))
189 /* GPIO Based LED powersave defines */
190 #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
191 #define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
193 #define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
195 static uint32 sb_read_sbreg(sb_info_t * si, volatile uint32 * sbr)
198 uint32 val, intr_val = 0;
201 * compact flash only has 11 bits address, while we needs 12 bits address.
202 * MEM_SEG will be OR'd with other 11 bits address in hardware,
203 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
204 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
207 INTR_OFF(si, intr_val);
209 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
210 sbr = (volatile uint32 *)((uintptr) sbr & ~(1 << 11)); /* mask out bit 11 */
213 val = R_REG(si->osh, sbr);
217 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
218 INTR_RESTORE(si, intr_val);
224 static void sb_write_sbreg(sb_info_t * si, volatile uint32 * sbr, uint32 v)
227 volatile uint32 dummy;
231 * compact flash only has 11 bits address, while we needs 12 bits address.
232 * MEM_SEG will be OR'd with other 11 bits address in hardware,
233 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
234 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
237 INTR_OFF(si, intr_val);
239 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
240 sbr = (volatile uint32 *)((uintptr) sbr & ~(1 << 11)); /* mask out bit 11 */
243 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
245 dummy = R_REG(si->osh, sbr);
246 W_REG(si->osh, ((volatile uint16 *)sbr + 1),
247 (uint16) ((v >> 16) & 0xffff));
248 dummy = R_REG(si->osh, sbr);
249 W_REG(si->osh, (volatile uint16 *)sbr, (uint16) (v & 0xffff));
251 dummy = R_REG(si->osh, sbr);
252 W_REG(si->osh, (volatile uint16 *)sbr, (uint16) (v & 0xffff));
253 dummy = R_REG(si->osh, sbr);
254 W_REG(si->osh, ((volatile uint16 *)sbr + 1),
255 (uint16) ((v >> 16) & 0xffff));
256 #endif /* IL_BIGENDIAN */
258 W_REG(si->osh, sbr, v);
262 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
263 INTR_RESTORE(si, intr_val);
268 * Allocate a sb handle.
269 * devid - pci device id (used to determine chip#)
270 * osh - opaque OS handle
271 * regs - virtual address of initial core registers
272 * bustype - pci/pcmcia/sb/sdio/etc
273 * vars - pointer to a pointer area for "environment" variables
274 * varsz - pointer to int to return the size of the vars
276 sb_t *sb_attach(uint devid, osl_t * osh, void *regs,
277 uint bustype, void *sdh, char **vars,
281 /* alloc sb_info_t */
282 if ((si = MALLOC(osh, sizeof(sb_info_t))) == NULL) {
283 SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n",
288 if (sb_doattach(si, devid, osh, regs, bustype, sdh, vars, varsz) ==
290 MFREE(osh, si, sizeof(sb_info_t));
293 si->vars = vars ? *vars : NULL;
294 si->varsz = varsz ? *varsz : 0;
299 /* Using sb_kattach depends on SB_BUS support, either implicit */
300 /* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
301 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
303 /* global kernel resource */
304 static sb_info_t ksi;
306 /* generic kernel variant of sb_attach() */
307 sb_t *BCMINITFN(sb_kattach) (osl_t * osh) {
308 static bool ksi_attached = FALSE;
314 regs = (uint32 *) REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
315 cid = R_REG(osh, (uint32 *) regs);
316 if (((cid & CID_ID_MASK) == BCM4712_CHIP_ID) &&
317 ((cid & CID_PKG_MASK) != BCM4712LARGE_PKG_ID) &&
318 ((cid & CID_REV_MASK) <= (3 << CID_REV_SHIFT))) {
322 (uint32 *) ((uchar *) regs +
323 OFFSETOF(chipcregs_t, slow_clk_ctl));
324 val = R_REG(osh, scc);
325 SB_ERROR((" initial scc = 0x%x\n", val));
327 W_REG(osh, scc, val);
330 if (sb_doattach(&ksi, BCM4710_DEVICE_ID, osh, (void *)regs, SB_BUS, NULL,
331 osh != SB_OSH ? &ksi.vars : NULL,
332 osh != SB_OSH ? &ksi.varsz : NULL) == NULL)
339 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
341 static sb_info_t *BCMINITFN(sb_doattach) (sb_info_t * si, uint devid,
342 osl_t * osh, void *regs,
343 uint bustype, void *sdh,
344 char **vars, uint * varsz) {
351 ASSERT(GOODREGS(regs));
353 bzero((uchar *) si, sizeof(sb_info_t));
354 si->sb.buscoreidx = si->gpioidx = BADIDX;
360 /* check to see if we are a sb core mimic'ing a pci core */
361 if (bustype == PCI_BUS) {
362 if (OSL_PCI_READ_CONFIG
363 (si->osh, PCI_SPROM_CONTROL,
364 sizeof(uint32)) == 0xffffffff) {
365 SB_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SB " "devid:0x%x\n", __FUNCTION__, devid));
369 si->sb.bustype = bustype;
370 if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
371 SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n", si->sb.bustype, BUSTYPE(si->sb.bustype)));
375 /* need to set memseg flag for CF card first before any sb registers access */
376 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
379 /* kludge to enable the clock on the 4306 which lacks a slowclock */
380 if (BUSTYPE(si->sb.bustype) == PCI_BUS && !sb_ispcie(si))
381 sb_clkctl_xtal(&si->sb, XTAL | PLL, ON);
383 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
384 w = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
385 if (!GOODCOREADDR(w))
386 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN,
387 sizeof(uint32), SB_ENUM_BASE);
390 /* initialize current core index value */
391 si->curidx = _sb_coreidx(si);
393 if (si->curidx == BADIDX) {
394 SB_ERROR(("sb_doattach: bad core index\n"));
398 /* get sonics backplane revision */
401 (R_SBREG(si, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
402 /* keep and reuse the initial register mapping */
403 origidx = si->curidx;
404 if (BUSTYPE(si->sb.bustype) == SB_BUS)
405 si->regs[origidx] = regs;
407 /* is core-0 a chipcommon core? */
409 cc = (chipcregs_t *) sb_setcoreidx(&si->sb, 0);
410 if (sb_coreid(&si->sb) != SB_CC)
413 /* determine chip id and rev */
415 /* chip common core found! */
416 si->sb.chip = R_REG(si->osh, &cc->chipid) & CID_ID_MASK;
418 (R_REG(si->osh, &cc->chipid) & CID_REV_MASK) >>
421 (R_REG(si->osh, &cc->chipid) & CID_PKG_MASK) >>
424 /* no chip common core -- must convert device id to chip id */
425 if ((si->sb.chip = sb_pcidev2chip(devid)) == 0) {
426 SB_ERROR(("sb_doattach: unrecognized device id 0x%04x\n", devid));
427 sb_setcoreidx(&si->sb, origidx);
432 /* get chipcommon rev */
433 si->sb.ccrev = cc ? (int)sb_corerev(&si->sb) : NOREV;
435 /* get chipcommon capabilites */
436 si->sb.cccaps = cc ? R_REG(si->osh, &cc->capabilities) : 0;
438 /* determine numcores */
439 if (cc && ((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
441 (R_REG(si->osh, &cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
443 si->numcores = sb_chip2numcores(si->sb.chip);
445 /* return to original core */
446 sb_setcoreidx(&si->sb, origidx);
454 /* fixup necessary chip/core configurations */
455 if (BUSTYPE(si->sb.bustype) == PCI_BUS && sb_pci_fixcfg(si)) {
456 SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
460 /* Init nvram from sprom/otp if they exist */
462 (&si->sb, BUSTYPE(si->sb.bustype), regs, si->osh, vars, varsz)) {
463 SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
466 pvars = vars ? *vars : NULL;
468 /* PMU specific initializations */
469 if ((si->sb.cccaps & CC_CAP_PMU) && !sb_onetimeinit) {
470 sb_pmu_init(&si->sb, si->osh);
471 /* Find out Crystal frequency and init PLL */
472 sb_pmu_pll_init(&si->sb, si->osh, getintvar(pvars, "xtalfreq"));
473 /* Initialize PMU resources (up/dn timers, dep masks, etc.) */
474 sb_pmu_res_init(&si->sb, si->osh);
478 * The chip revision number is hardwired into all
479 * of the pci function config rev fields and is
480 * independent from the individual core revision numbers.
481 * For example, the "A0" silicon of each chip is chip rev 0.
482 * For PCMCIA we get it from the CIS instead.
484 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
486 si->sb.chiprev = getintvar(*vars, "chiprev");
487 } else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
488 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_REV,
490 si->sb.chiprev = w & 0xff;
495 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
496 w = getintvar(pvars, "regwindowsz");
497 si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
499 /* gpio control core is required */
500 if (!GOODIDX(si->gpioidx)) {
501 SB_ERROR(("sb_doattach: gpio control core not found\n"));
505 /* get boardtype and boardrev */
506 switch (BUSTYPE(si->sb.bustype)) {
508 /* do a pci config read to get subsystem id and subvendor id */
509 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_SVID, sizeof(uint32));
510 /* Let nvram variables override subsystem Vend/ID */
511 if ((si->sb.boardvendor =
512 (uint16) sb_getdevpathintvar(&si->sb, "boardvendor")) == 0)
513 si->sb.boardvendor = w & 0xffff;
515 SB_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n", si->sb.boardvendor, w & 0xffff));
516 if ((si->sb.boardtype =
517 (uint16) sb_getdevpathintvar(&si->sb, "boardtype")) == 0)
518 si->sb.boardtype = (w >> 16) & 0xffff;
520 SB_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n", si->sb.boardtype, (w >> 16) & 0xffff));
524 si->sb.boardvendor = getintvar(pvars, "manfid");
525 si->sb.boardtype = getintvar(pvars, "prodid");
530 si->sb.boardvendor = VENDOR_BROADCOM;
532 || ((si->sb.boardtype = getintvar(pvars, "prodid")) == 0))
533 if ((si->sb.boardtype =
534 getintvar(NULL, "boardtype")) == 0)
535 si->sb.boardtype = 0xffff;
539 if (si->sb.boardtype == 0) {
540 SB_ERROR(("sb_doattach: unknown board type\n"));
541 ASSERT(si->sb.boardtype);
544 si->sb.boardflags = getintvar(pvars, "boardflags");
546 /* setup the GPIO based LED powersave register */
547 if (si->sb.ccrev >= 16) {
548 if ((pvars == NULL) || ((w = getintvar(pvars, "leddc")) == 0))
549 w = DEFAULT_GPIOTIMERVAL;
550 sb_corereg(&si->sb, SB_CC_IDX,
551 OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
554 /* Determine if this board needs override */
555 if (PCIE(si) && (si->sb.chip == BCM4321_CHIP_ID))
556 si->pcie_war_ovr = ((si->sb.boardvendor == VENDOR_APPLE) &&
557 ((uint8) getintvar(pvars, "sromrev") == 4)
558 && ((uint8) getintvar(pvars, "boardrev") <=
560 || ((uint32) getintvar(pvars, "boardflags2") &
563 if (PCIE_ASPMWARS(si)) {
564 sb_war43448_aspm((void *)si);
565 sb_war42767_clkreq((void *)si);
568 if (FORCEHT_WAR32414(si)) {
569 si->sb.pr32414 = TRUE;
570 sb_clkctl_init(&si->sb);
571 sb_war32414_forceHT(&si->sb, 1);
574 if (PCIE(si) && ((si->sb.buscorerev == 6) || (si->sb.buscorerev == 7)))
575 si->sb.pr42780 = TRUE;
577 if (PCIE_ASPMWARS(si))
578 sb_pcieclkreq(&si->sb, 1, 0);
581 (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
582 ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
583 sb_set_initiator_to(&si->sb, 0x3,
584 sb_findcoreidx(&si->sb, SB_D11, 0));
586 /* Disable gpiopullup and gpiopulldown */
587 if (!sb_onetimeinit && si->sb.ccrev >= 20) {
588 cc = (chipcregs_t *) sb_setcore(&si->sb, SB_CC, 0);
589 W_REG(osh, &cc->gpiopullup, 0);
590 W_REG(osh, &cc->gpiopulldown, 0);
591 sb_setcoreidx(&si->sb, origidx);
594 /* clear any previous epidiag-induced target abort */
599 sb_onetimeinit = TRUE;
605 /* Enable/Disable clkreq for PCIE (4311B0/4321B1) */
606 void sb_war42780_clkreq(sb_t * sbh, bool clkreq) {
611 /* Don't change clkreq value if serdespll war has not yet been applied */
612 if (!si->pr42767_war && PCIE_ASPMWARS(si))
615 sb_pcieclkreq(sbh, 1, (int32) clkreq);
618 static void BCMINITFN(sb_war43448) (sb_t * sbh) {
623 /* if not pcie bus, we're done */
624 if (!PCIE(si) || !PCIE_ASPMWARS(si))
627 /* Restore the polarity */
628 if (si->pcie_polarity != 0)
629 sb_pcie_mdiowrite((void *)(uintptr) & si->sb, MDIODATA_DEV_RX,
630 SERDES_RX_CTRL, si->pcie_polarity);
633 static void BCMINITFN(sb_war43448_aspm) (sb_t * sbh) {
635 uint16 val16, *reg16;
636 sbpcieregs_t *pcieregs;
641 /* if not pcie bus, we're done */
642 if (!PCIE(si) || !PCIE_ASPMWARS(si))
645 /* no ASPM stuff on QT or VSIM */
646 if (si->sb.chippkg == HDLSIM_PKG_ID || si->sb.chippkg == HWSIM_PKG_ID)
649 pcieregs = (sbpcieregs_t *) sb_setcoreidx(sbh, si->sb.buscoreidx);
651 /* Enable ASPM in the shadow SROM and Link control */
652 reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
653 val16 = R_REG(si->osh, reg16);
654 if (!si->pcie_war_ovr)
655 val16 |= SRSH_ASPM_ENB;
657 val16 &= ~SRSH_ASPM_ENB;
658 W_REG(si->osh, reg16, val16);
660 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset,
662 if (!si->pcie_war_ovr)
665 w &= ~PCIE_ASPM_ENAB;
666 OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32),
670 static void BCMINITFN(sb_war32414_forceHT) (sb_t * sbh, bool forceHT) {
676 ASSERT(FORCEHT_WAR32414(si));
680 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, system_clk_ctl),
684 uint sb_coreid(sb_t * sbh)
690 sb = REGS2SB(si->curmap);
692 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
695 uint sb_flag(sb_t * sbh)
701 sb = REGS2SB(si->curmap);
703 return R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
706 uint sb_coreidx(sb_t * sbh)
714 static uint _sb_coreidx(sb_info_t * si)
722 switch (BUSTYPE(si->sb.bustype)) {
724 sb = REGS2SB(si->curmap);
725 sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
730 OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
736 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
737 sbaddr = (uint) tmp << 12;
738 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
739 sbaddr |= (uint) tmp << 16;
740 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
741 sbaddr |= (uint) tmp << 24;
747 sbaddr = (uint32) si->curmap;
755 if (!GOODCOREADDR(sbaddr))
758 return ((sbaddr - SB_ENUM_BASE) / SB_CORE_SIZE);
761 uint sb_corevendor(sb_t * sbh)
767 sb = REGS2SB(si->curmap);
769 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
772 uint sb_corerev(sb_t * sbh)
779 sb = REGS2SB(si->curmap);
780 sbidh = R_SBREG(si, &sb->sbidhigh);
782 return (SBCOREREV(sbidh));
785 void *sb_osh(sb_t * sbh)
793 void sb_setosh(sb_t * sbh, osl_t * osh)
798 if (si->osh != NULL) {
799 SB_ERROR(("osh is already set....\n"));
805 /* set sbtmstatelow core-specific flags */
806 void sb_coreflags_wo(sb_t * sbh, uint32 mask, uint32 val)
813 sb = REGS2SB(si->curmap);
815 ASSERT((val & ~mask) == 0);
818 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
819 W_SBREG(si, &sb->sbtmstatelow, w);
822 /* set/clear sbtmstatelow core-specific flags */
823 uint32 sb_coreflags(sb_t * sbh, uint32 mask, uint32 val)
830 sb = REGS2SB(si->curmap);
832 ASSERT((val & ~mask) == 0);
836 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
837 W_SBREG(si, &sb->sbtmstatelow, w);
840 /* return the new value
841 * for write operation, the following readback ensures the completion of write opration.
843 return (R_SBREG(si, &sb->sbtmstatelow));
846 /* set/clear sbtmstatehigh core-specific flags */
847 uint32 sb_coreflagshi(sb_t * sbh, uint32 mask, uint32 val)
854 sb = REGS2SB(si->curmap);
856 ASSERT((val & ~mask) == 0);
857 ASSERT((mask & ~SBTMH_FL_MASK) == 0);
861 w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
862 W_SBREG(si, &sb->sbtmstatehigh, w);
865 /* return the new value */
866 return (R_SBREG(si, &sb->sbtmstatehigh));
869 /* Run bist on current core. Caller needs to take care of core-specific bist hazards */
870 int sb_corebist(sb_t * sbh)
878 sb = REGS2SB(si->curmap);
880 sblo = R_SBREG(si, &sb->sbtmstatelow);
881 W_SBREG(si, &sb->sbtmstatelow, (sblo | SBTML_FGC | SBTML_BE));
883 SPINWAIT(((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTD) == 0),
886 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTF)
889 W_SBREG(si, &sb->sbtmstatelow, sblo);
894 bool sb_iscoreup(sb_t * sbh)
900 sb = REGS2SB(si->curmap);
902 return ((R_SBREG(si, &sb->sbtmstatelow) &
903 (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
907 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
908 * switch back to the original core, and return the new value.
910 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
912 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
913 * and (on newer pci cores) chipcommon registers.
915 uint sb_corereg(sb_t * sbh, uint coreidx, uint regoff, uint mask, uint val)
926 ASSERT(GOODIDX(coreidx));
927 ASSERT(regoff < SB_CORE_SIZE);
928 ASSERT((val & ~mask) == 0);
931 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
932 /* If internal bus, we can always get at everything */
934 /* map if does not exist */
935 if (!si->regs[coreidx]) {
937 (void *)REG_MAP(si->coresba[coreidx], SB_CORE_SIZE);
938 ASSERT(GOODREGS(si->regs[coreidx]));
940 r = (uint32 *) ((uchar *) si->regs[coreidx] + regoff);
941 } else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
942 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
944 if ((si->coreid[coreidx] == SB_CC) &&
945 ((si->sb.buscoretype == SB_PCIE)
946 || (si->sb.buscorerev >= 13))) {
947 /* Chipc registers are mapped at 12KB */
950 r = (uint32 *) ((char *)si->curmap +
951 PCI_16KB0_CCREGS_OFFSET + regoff);
952 } else if (si->sb.buscoreidx == coreidx) {
953 /* pci registers are at either in the last 2KB of an 8KB window
954 * or, in pcie and pci rev 13 at 8KB
957 if ((si->sb.buscoretype == SB_PCIE)
958 || (si->sb.buscorerev >= 13))
959 r = (uint32 *) ((char *)si->curmap +
960 PCI_16KB0_PCIREGS_OFFSET +
963 r = (uint32 *) ((char *)si->curmap +
964 ((regoff >= SBCONFIGOFF) ?
965 PCI_BAR0_PCISBR_OFFSET :
966 PCI_BAR0_PCIREGS_OFFSET)
973 INTR_OFF(si, intr_val);
975 /* save current core index */
976 origidx = sb_coreidx(&si->sb);
979 r = (uint32 *) ((uchar *) sb_setcoreidx(&si->sb, coreidx) +
986 if (regoff >= SBCONFIGOFF) {
987 w = (R_SBREG(si, r) & ~mask) | val;
990 w = (R_REG(si->osh, r) & ~mask) | val;
991 W_REG(si->osh, r, w);
996 if (regoff >= SBCONFIGOFF)
999 if ((si->sb.chip == BCM5354_CHIP_ID) &&
1000 (coreidx == SB_CC_IDX) &&
1001 (regoff == OFFSETOF(chipcregs_t, watchdog))) {
1004 w = R_REG(si->osh, r);
1008 /* restore core index */
1009 if (origidx != coreidx)
1010 sb_setcoreidx(&si->sb, origidx);
1012 INTR_RESTORE(si, intr_val);
1018 #define DWORD_ALIGN(x) (x & ~(0x03))
1019 #define BYTE_POS(x) (x & 0x3)
1020 #define WORD_POS(x) (x & 0x1)
1022 #define BYTE_SHIFT(x) (8 * BYTE_POS(x))
1023 #define WORD_SHIFT(x) (16 * WORD_POS(x))
1025 #define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
1026 #define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
1028 #define read_pci_cfg_byte(a) \
1029 (BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
1031 #define read_pci_cfg_word(a) \
1032 (WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
1034 /* return cap_offset if requested capability exists in the PCI config space */
1036 sb_find_pci_capability(sb_info_t * si, uint8 req_cap_id, uchar * buf,
1044 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
1047 /* check for Header type 0 */
1048 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
1049 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
1052 /* check if the capability pointer field exists */
1053 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
1054 if (!(byte_val & PCI_CAPPTR_PRESENT))
1057 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
1058 /* check if the capability pointer is 0x00 */
1059 if (cap_ptr == 0x00)
1062 /* loop thr'u the capability list and see if the pcie capabilty exists */
1064 cap_id = read_pci_cfg_byte(cap_ptr);
1066 while (cap_id != req_cap_id) {
1067 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
1068 if (cap_ptr == 0x00)
1070 cap_id = read_pci_cfg_byte(cap_ptr);
1072 if (cap_id != req_cap_id) {
1075 /* found the caller requested capability */
1076 if ((buf != NULL) && (buflen != NULL)) {
1083 /* copy the cpability data excluding cap ID and next ptr */
1084 cap_data = cap_ptr + 2;
1085 if ((bufsize + cap_data) > SZPCR)
1086 bufsize = SZPCR - cap_data;
1089 *buf = read_pci_cfg_byte(cap_data);
1098 uint8 sb_pcieclkreq(sb_t * sbh, uint32 mask, uint32 val)
1106 offset = si->pciecap_lcreg_offset;
1110 reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1114 reg_val |= PCIE_CLKREQ_ENAB;
1116 reg_val &= ~PCIE_CLKREQ_ENAB;
1117 OSL_PCI_WRITE_CONFIG(si->osh, offset, sizeof(uint32), reg_val);
1118 reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1120 if (reg_val & PCIE_CLKREQ_ENAB)
1128 uint32 sb_pcielcreg(sb_t * sbh, uint32 mask, uint32 val)
1139 offset = si->pciecap_lcreg_offset;
1145 OSL_PCI_WRITE_CONFIG(si->osh, offset, sizeof(uint32), val);
1147 reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1152 uint8 sb_pcieL1plldown(sb_t * sbh)
1163 if (!((si->sb.buscorerev == 3) || (si->sb.buscorerev == 4)))
1166 if (!sb_pcieclkreq((void *)(uintptr) sbh, 0, 0)) {
1167 SB_ERROR(("PCIEL1PLLDOWN requires Clkreq be enabled, so enable it\n"));
1168 sb_pcieclkreq((void *)(uintptr) sbh, 1, 1);
1170 reg_val = sb_pcielcreg((void *)(uintptr) sbh, 0, 0);
1171 if (reg_val & PCIE_CAP_LCREG_ASPML0s) {
1172 SB_ERROR(("PCIEL1PLLDOWN requires L0s to be disabled\n"));
1173 reg_val &= ~PCIE_CAP_LCREG_ASPML0s;
1174 sb_pcielcreg((void *)(uintptr) sbh, 1, reg_val);
1176 SB_ERROR(("PCIEL1PLLDOWN: L0s is already disabled\n"));
1178 /* turnoff intrs, change core, set original back, turn on intrs back on */
1179 origidx = si->curidx;
1180 INTR_OFF(si, intr_val);
1181 sb_setcore(sbh, SB_PCIE, 0);
1183 sb_pcie_writereg((void *)(uintptr) sbh, (void *)PCIE_PCIEREGS,
1184 PCIE_DLLP_PCIE11, 0);
1186 sb_setcoreidx(sbh, origidx);
1187 INTR_RESTORE(si, intr_val);
1192 /* return TRUE if PCIE capability exists in the pci config space */
1193 static bool sb_ispcie(sb_info_t * si)
1197 cap_ptr = sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL);
1201 si->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
1206 /* Wake-on-wireless-LAN (WOWL) support functions */
1207 /* return TRUE if PM capability exists in the pci config space */
1208 bool sb_pci_pmecap(sb_t * sbh)
1216 if (si == NULL || !(PCI(si) || PCIE(si)))
1219 if (!si->pmecap_offset) {
1221 sb_find_pci_capability(si, PCI_CAP_POWERMGMTCAP_ID, NULL,
1226 si->pmecap_offset = cap_ptr;
1229 OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset,
1232 /* At least one state can generate PME */
1233 si->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
1236 return (si->pmecap);
1239 /* Enable PME generation and disable clkreq */
1240 void sb_pci_pmeen(sb_t * sbh)
1246 /* if not pmecapable return */
1247 if (!sb_pci_pmecap(sbh))
1250 w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET,
1252 w |= (PME_CSR_PME_EN);
1253 OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET,
1256 /* Disable clkreq */
1257 if (si->pr42767_war) {
1258 sb_pcieclkreq(sbh, 1, 0);
1259 si->pr42767_war = FALSE;
1260 } else if (si->sb.pr42780) {
1261 sb_pcieclkreq(sbh, 1, 1);
1265 /* Disable PME generation, clear the PME status bit if set and
1266 * return TRUE if PME status set
1268 bool sb_pci_pmeclr(sb_t * sbh)
1276 if (!sb_pci_pmecap(sbh))
1279 w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET,
1282 SB_ERROR(("sb_pci_pmeclr PMECSR : 0x%x\n", w));
1283 ret = (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
1285 /* PMESTAT is cleared by writing 1 to it */
1286 w &= ~(PME_CSR_PME_EN);
1288 OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET,
1294 /* use pci dev id to determine chip id for chips not having a chipcommon core */
1295 static uint BCMINITFN(sb_pcidev2chip) (uint pcidev) {
1296 if ((pcidev >= BCM4710_DEVICE_ID) && (pcidev <= BCM47XX_USB_ID))
1297 return (BCM4710_CHIP_ID);
1298 if ((pcidev >= BCM4402_ENET_ID) && (pcidev <= BCM4402_V90_ID))
1299 return (BCM4402_CHIP_ID);
1300 if (pcidev == BCM4401_ENET_ID)
1301 return (BCM4402_CHIP_ID);
1302 if (pcidev == SDIOH_FPGA_ID)
1303 return (SDIOH_FPGA_ID);
1308 /* Scan the enumeration space to find all cores starting from the given
1309 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
1310 * is the default core address at chip POR time and 'regs' is the virtual
1311 * address that the default core is mapped at. 'ncores' is the number of
1312 * cores expected on bus 'sbba'. It returns the total number of cores
1313 * starting from bus 'sbba', inclusive.
1316 static void BCMINITFN(sb_scan) (sb_info_t * si) {
1329 /* numcores should already be set */
1330 ASSERT((si->numcores > 0) && (si->numcores <= SB_MAXCORES));
1332 /* save current core index */
1333 origidx = sb_coreidx(&si->sb);
1335 si->sb.buscorerev = NOREV;
1336 si->sb.buscoreidx = BADIDX;
1338 si->gpioidx = BADIDX;
1341 pcirev = pcierev = NOREV;
1342 pciidx = pcieidx = BADIDX;
1344 for (i = 0; i < si->numcores; i++) {
1345 sb_setcoreidx(&si->sb, i);
1346 si->coreid[i] = sb_coreid(&si->sb);
1348 if (si->coreid[i] == SB_PCI) {
1350 pcirev = sb_corerev(&si->sb);
1352 } else if (si->coreid[i] == SB_PCIE) {
1354 pcierev = sb_corerev(&si->sb);
1356 } else if (si->coreid[i] == SB_PCMCIA) {
1357 si->sb.buscorerev = sb_corerev(&si->sb);
1358 si->sb.buscoretype = si->coreid[i];
1359 si->sb.buscoreidx = i;
1369 si->sb.buscoretype = SB_PCI;
1370 si->sb.buscorerev = pcirev;
1371 si->sb.buscoreidx = pciidx;
1373 si->sb.buscoretype = SB_PCIE;
1374 si->sb.buscorerev = pcierev;
1375 si->sb.buscoreidx = pcieidx;
1379 * Find the gpio "controlling core" type and index.
1381 * - if there's a chip common core - use that
1382 * - else if there's a pci core (rev >= 2) - use that
1383 * - else there had better be an extif core (4710 only)
1385 if (GOODIDX(sb_findcoreidx(sbh, SB_CC, 0))) {
1386 si->gpioidx = sb_findcoreidx(sbh, SB_CC, 0);
1388 } else if (PCI(si) && (si->sb.buscorerev >= 2)) {
1389 si->gpioidx = si->sb.buscoreidx;
1390 si->gpioid = SB_PCI;
1391 } else if (sb_findcoreidx(sbh, SB_EXTIF, 0)) {
1392 si->gpioidx = sb_findcoreidx(sbh, SB_EXTIF, 0);
1393 si->gpioid = SB_EXTIF;
1395 ASSERT(si->gpioidx != BADIDX);
1397 /* return to original core index */
1398 sb_setcoreidx(&si->sb, origidx);
1401 /* may be called with core in reset */
1402 void sb_detach(sb_t * sbh)
1412 if (BUSTYPE(si->sb.bustype) == SB_BUS)
1413 for (idx = 0; idx < SB_MAXCORES; idx++)
1414 if (si->regs[idx]) {
1415 REG_UNMAP(si->regs[idx]);
1416 si->regs[idx] = NULL;
1418 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
1420 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
1421 MFREE(si->osh, si, sizeof(sb_info_t));
1425 /* convert chip number to number of i/o cores */
1426 static uint BCMINITFN(sb_chip2numcores) (uint chip) {
1427 if (chip == BCM4710_CHIP_ID)
1429 if (chip == BCM4402_CHIP_ID)
1431 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
1433 if (chip == BCM4704_CHIP_ID)
1435 if (chip == BCM5365_CHIP_ID)
1437 if (chip == SDIOH_FPGA_ID)
1440 SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
1445 /* return index of coreid or BADIDX if not found */
1446 uint sb_findcoreidx(sb_t * sbh, uint coreid, uint coreunit)
1456 for (i = 0; i < si->numcores; i++)
1457 if (si->coreid[i] == coreid) {
1458 if (found == coreunit)
1467 * this function changes logical "focus" to the indiciated core,
1468 * must be called with interrupt off.
1469 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1471 void *sb_setcoreidx(sb_t * sbh, uint coreidx)
1479 if (coreidx >= si->numcores)
1483 * If the user has provided an interrupt mask enabled function,
1484 * then assert interrupts are disabled before switching the core.
1486 ASSERT((si->intrsenabled_fn == NULL)
1487 || !(*(si)->intrsenabled_fn) ((si)->intr_arg));
1489 sbaddr = SB_ENUM_BASE + (coreidx * SB_CORE_SIZE);
1491 switch (BUSTYPE(si->sb.bustype)) {
1494 if (!si->regs[coreidx]) {
1496 (void *)REG_MAP(sbaddr, SB_CORE_SIZE);
1497 ASSERT(GOODREGS(si->regs[coreidx]));
1499 si->curmap = si->regs[coreidx];
1503 /* point bar0 window */
1504 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
1508 tmp = (sbaddr >> 12) & 0x0f;
1509 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
1510 tmp = (sbaddr >> 16) & 0xff;
1511 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
1512 tmp = (sbaddr >> 24) & 0xff;
1513 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
1518 if (!si->regs[coreidx]) {
1519 si->regs[coreidx] = (void *)sbaddr;
1520 ASSERT(GOODREGS(si->regs[coreidx]));
1522 si->curmap = si->regs[coreidx];
1524 #endif /* BCMJTAG */
1527 si->curidx = coreidx;
1529 return (si->curmap);
1533 * this function changes logical "focus" to the indiciated core,
1534 * must be called with interrupt off.
1535 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1537 void *sb_setcore(sb_t * sbh, uint coreid, uint coreunit)
1541 idx = sb_findcoreidx(sbh, coreid, coreunit);
1545 return (sb_setcoreidx(sbh, idx));
1548 /* return chip number */
1549 uint BCMINITFN(sb_chip) (sb_t * sbh) {
1553 return (si->sb.chip);
1556 /* return chip revision number */
1557 uint BCMINITFN(sb_chiprev) (sb_t * sbh) {
1561 return (si->sb.chiprev);
1564 /* return chip common revision number */
1565 uint BCMINITFN(sb_chipcrev) (sb_t * sbh) {
1569 return (si->sb.ccrev);
1572 /* return chip package option */
1573 uint BCMINITFN(sb_chippkg) (sb_t * sbh) {
1577 return (si->sb.chippkg);
1580 /* return PCI core rev. */
1581 uint BCMINITFN(sb_pcirev) (sb_t * sbh) {
1585 return (si->sb.buscorerev);
1588 bool BCMINITFN(sb_war16165) (sb_t * sbh) {
1593 return (PCI(si) && (si->sb.buscorerev <= 10));
1596 static void BCMINITFN(sb_war30841) (sb_info_t * si) {
1597 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
1598 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
1599 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
1602 /* return PCMCIA core rev. */
1603 uint BCMINITFN(sb_pcmciarev) (sb_t * sbh) {
1607 return (si->sb.buscorerev);
1610 /* return board vendor id */
1611 uint BCMINITFN(sb_boardvendor) (sb_t * sbh) {
1615 return (si->sb.boardvendor);
1618 /* return boardtype */
1619 uint BCMINITFN(sb_boardtype) (sb_t * sbh) {
1625 if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
1626 /* boardtype format is a hex string */
1627 si->sb.boardtype = getintvar(NULL, "boardtype");
1629 /* backward compatibility for older boardtype string format */
1630 if ((si->sb.boardtype == 0)
1631 && (var = getvar(NULL, "boardtype"))) {
1632 if (!strcmp(var, "bcm94710dev"))
1633 si->sb.boardtype = BCM94710D_BOARD;
1634 else if (!strcmp(var, "bcm94710ap"))
1635 si->sb.boardtype = BCM94710AP_BOARD;
1636 else if (!strcmp(var, "bu4710"))
1637 si->sb.boardtype = BU4710_BOARD;
1638 else if (!strcmp(var, "bcm94702mn"))
1639 si->sb.boardtype = BCM94702MN_BOARD;
1640 else if (!strcmp(var, "bcm94710r1"))
1641 si->sb.boardtype = BCM94710R1_BOARD;
1642 else if (!strcmp(var, "bcm94710r4"))
1643 si->sb.boardtype = BCM94710R4_BOARD;
1644 else if (!strcmp(var, "bcm94702cpci"))
1645 si->sb.boardtype = BCM94702CPCI_BOARD;
1646 else if (!strcmp(var, "bcm95380_rr"))
1647 si->sb.boardtype = BCM95380RR_BOARD;
1651 return (si->sb.boardtype);
1654 /* return bus type of sbh device */
1655 uint sb_bus(sb_t * sbh)
1660 return (si->sb.bustype);
1663 /* return bus core type */
1664 uint sb_buscoretype(sb_t * sbh)
1670 return (si->sb.buscoretype);
1673 /* return bus core revision */
1674 uint sb_buscorerev(sb_t * sbh)
1679 return (si->sb.buscorerev);
1682 /* return list of found cores */
1683 uint sb_corelist(sb_t * sbh, uint coreid[])
1689 bcopy((uchar *) si->coreid, (uchar *) coreid,
1690 (si->numcores * sizeof(uint)));
1691 return (si->numcores);
1694 /* return current register mapping */
1695 void *sb_coreregs(sb_t * sbh)
1700 ASSERT(GOODREGS(si->curmap));
1702 return (si->curmap);
1705 #if defined(BCMDBG_ASSERT)
1706 /* traverse all cores to find and clear source of serror */
1707 static void sb_serr_clear(sb_info_t * si)
1711 uint i, intr_val = 0;
1712 void *corereg = NULL;
1714 INTR_OFF(si, intr_val);
1715 origidx = sb_coreidx(&si->sb);
1717 for (i = 0; i < si->numcores; i++) {
1718 corereg = sb_setcoreidx(&si->sb, i);
1719 if (NULL != corereg) {
1720 sb = REGS2SB(corereg);
1721 if ((R_SBREG(si, &sb->sbtmstatehigh)) & SBTMH_SERR) {
1722 AND_SBREG(si, &sb->sbtmstatehigh, ~SBTMH_SERR);
1723 SB_ERROR(("sb_serr_clear: SError at core 0x%x\n", sb_coreid(&si->sb)));
1728 sb_setcoreidx(&si->sb, origidx);
1729 INTR_RESTORE(si, intr_val);
1733 * Check if any inband, outband or timeout errors has happened and clear them.
1734 * Must be called with chip clk on !
1736 bool sb_taclear(sb_t * sbh)
1743 uint32 inband = 0, serror = 0, timeout = 0;
1744 void *corereg = NULL;
1745 volatile uint32 imstate, tmstate;
1749 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
1750 volatile uint32 stcmd;
1752 /* inband error is Target abort for PCI */
1754 OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32));
1755 inband = stcmd & PCI_CFG_CMD_STAT_TA;
1758 SB_ERROR(("inband:\n"));
1759 sb_viewall((void *)si);
1761 OSL_PCI_WRITE_CONFIG(si->osh, PCI_CFG_CMD,
1762 sizeof(uint32), stcmd);
1767 OSL_PCI_READ_CONFIG(si->osh, PCI_INT_STATUS,
1769 serror = stcmd & PCI_SBIM_STATUS_SERR;
1772 SB_ERROR(("serror:\n"));
1773 sb_viewall((void *)si);
1776 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_STATUS,
1777 sizeof(uint32), stcmd);
1781 imstate = sb_corereg(sbh, si->sb.buscoreidx,
1782 SBCONFIGOFF + OFFSETOF(sbconfig_t,
1784 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1785 sb_corereg(sbh, si->sb.buscoreidx,
1786 SBCONFIGOFF + OFFSETOF(sbconfig_t,
1788 (imstate & ~(SBIM_IBE | SBIM_TO)));
1789 /* inband = imstate & SBIM_IBE; same as TA above */
1790 timeout = imstate & SBIM_TO;
1793 SB_ERROR(("timeout:\n"));
1794 sb_viewall((void *)si);
1800 /* dump errlog for sonics >= 2.3 */
1801 if (si->sb.sonicsrev == SONICS_2_2) ;
1803 uint32 imerrlog, imerrloga;
1805 sb_corereg(sbh, si->sb.buscoreidx,
1807 if (imerrlog & SBTMEL_EC) {
1809 sb_corereg(sbh, si->sb.buscoreidx,
1812 sb_corereg(sbh, si->sb.buscoreidx,
1814 SB_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n", imerrlog, imerrloga));
1819 } else if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
1821 INTR_OFF(si, intr_val);
1822 origidx = sb_coreidx(sbh);
1824 corereg = sb_setcore(sbh, SB_PCMCIA, 0);
1825 if (NULL != corereg) {
1826 sb = REGS2SB(corereg);
1828 imstate = R_SBREG(si, &sb->sbimstate);
1829 /* handle surprise removal */
1830 if ((imstate != 0xffffffff)
1831 && (imstate & (SBIM_IBE | SBIM_TO))) {
1832 AND_SBREG(si, &sb->sbimstate,
1833 ~(SBIM_IBE | SBIM_TO));
1834 inband = imstate & SBIM_IBE;
1835 timeout = imstate & SBIM_TO;
1837 tmstate = R_SBREG(si, &sb->sbtmstatehigh);
1838 if ((tmstate != 0xffffffff)
1839 && (tmstate & SBTMH_INT_STATUS)) {
1844 OR_SBREG(si, &sb->sbtmstatelow, SBTML_INT_ACK);
1845 AND_SBREG(si, &sb->sbtmstatelow,
1849 sb_setcoreidx(sbh, origidx);
1850 INTR_RESTORE(si, intr_val);
1854 if (inband | timeout | serror) {
1856 SB_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n", inband, serror, timeout));
1863 /* do buffered registers update */
1864 void sb_commit(sb_t * sbh)
1872 origidx = si->curidx;
1873 ASSERT(GOODIDX(origidx));
1875 INTR_OFF(si, intr_val);
1877 /* switch over to chipcommon core if there is one, else use pci */
1878 if (si->sb.ccrev != NOREV) {
1879 chipcregs_t *ccregs = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0);
1881 /* do the buffer registers update */
1882 W_REG(si->osh, &ccregs->broadcastaddress, SB_COMMIT);
1883 W_REG(si->osh, &ccregs->broadcastdata, 0x0);
1884 } else if (PCI(si)) {
1885 sbpciregs_t *pciregs =
1886 (sbpciregs_t *) sb_setcore(sbh, SB_PCI, 0);
1888 /* do the buffer registers update */
1889 W_REG(si->osh, &pciregs->bcastaddr, SB_COMMIT);
1890 W_REG(si->osh, &pciregs->bcastdata, 0x0);
1894 /* restore core index */
1895 sb_setcoreidx(sbh, origidx);
1896 INTR_RESTORE(si, intr_val);
1899 /* reset and re-enable a core
1901 * bits - core specific bits that are set during and after reset sequence
1902 * resetbits - core specific bits that are set only during reset sequence
1904 void sb_core_reset(sb_t * sbh, uint32 bits, uint32 resetbits)
1908 volatile uint32 dummy;
1911 ASSERT(GOODREGS(si->curmap));
1912 sb = REGS2SB(si->curmap);
1915 * Must do the disable sequence first to work for arbitrary current core state.
1917 sb_core_disable(sbh, (bits | resetbits));
1920 * Now do the initialization sequence.
1923 /* set reset while enabling the clock and forcing them on throughout the core */
1924 W_SBREG(si, &sb->sbtmstatelow,
1925 (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits | resetbits));
1926 dummy = R_SBREG(si, &sb->sbtmstatelow);
1929 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
1930 W_SBREG(si, &sb->sbtmstatehigh, 0);
1932 if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1933 AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1936 /* clear reset and allow it to propagate throughout the core */
1937 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
1938 dummy = R_SBREG(si, &sb->sbtmstatelow);
1941 /* leave clock enabled */
1942 W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
1943 dummy = R_SBREG(si, &sb->sbtmstatelow);
1947 void sb_core_tofixup(sb_t * sbh)
1954 if ((BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) ||
1955 (PCI(si) && (si->sb.buscorerev >= 5)))
1958 ASSERT(GOODREGS(si->curmap));
1959 sb = REGS2SB(si->curmap);
1961 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
1962 SET_SBREG(si, &sb->sbimconfiglow,
1963 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1964 (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
1966 if (sb_coreid(sbh) == SB_PCI) {
1967 SET_SBREG(si, &sb->sbimconfiglow,
1968 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1969 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1971 SET_SBREG(si, &sb->sbimconfiglow,
1972 (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
1980 * Set the initiator timeout for the "master core".
1981 * The master core is defined to be the core in control
1982 * of the chip and so it issues accesses to non-memory
1983 * locations (Because of dma *any* core can access memeory).
1985 * The routine uses the bus to decide who is the master:
1988 * PCI_BUS => pci or pcie
1989 * PCMCIA_BUS => pcmcia
1990 * SDIO_BUS => pcmcia
1992 * This routine exists so callers can disable initiator
1993 * timeouts so accesses to very slow devices like otp
1994 * won't cause an abort. The routine allows arbitrary
1995 * settings of the service and request timeouts, though.
1997 * Returns the timeout state before changing it or -1
2001 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
2003 uint32 sb_set_initiator_to(sb_t * sbh, uint32 to, uint idx)
2008 uint32 tmp, ret = 0xffffffff;
2013 if ((to & ~TO_MASK) != 0)
2016 /* Figure out the master core */
2017 if (idx == BADIDX) {
2018 switch (BUSTYPE(si->sb.bustype)) {
2020 idx = si->sb.buscoreidx;
2027 idx = sb_findcoreidx(sbh, SB_PCMCIA, 0);
2030 if ((idx = sb_findcoreidx(sbh, SB_MIPS33, 0)) == BADIDX)
2031 idx = sb_findcoreidx(sbh, SB_MIPS, 0);
2040 INTR_OFF(si, intr_val);
2041 origidx = sb_coreidx(sbh);
2043 sb = REGS2SB(sb_setcoreidx(sbh, idx));
2045 tmp = R_SBREG(si, &sb->sbimconfiglow);
2046 ret = tmp & TO_MASK;
2047 W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
2050 sb_setcoreidx(sbh, origidx);
2051 INTR_RESTORE(si, intr_val);
2055 void sb_core_disable(sb_t * sbh, uint32 bits)
2058 volatile uint32 dummy;
2064 ASSERT(GOODREGS(si->curmap));
2065 sb = REGS2SB(si->curmap);
2067 /* if core is already in reset, just return */
2068 if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
2071 /* reject value changed between sonics 2.2 and 2.3 */
2072 if (si->sb.sonicsrev == SONICS_2_2)
2073 rej = (1 << SBTML_REJ_SHIFT);
2075 rej = (2 << SBTML_REJ_SHIFT);
2077 /* if clocks are not enabled, put into reset and return */
2078 if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
2081 /* set target reject and spin until busy is clear (preserve core-specific bits) */
2082 OR_SBREG(si, &sb->sbtmstatelow, rej);
2083 dummy = R_SBREG(si, &sb->sbtmstatelow);
2085 SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
2086 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY)
2087 SB_ERROR(("%s: target state still busy\n", __FUNCTION__));
2089 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
2090 OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
2091 dummy = R_SBREG(si, &sb->sbimstate);
2093 SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
2096 /* set reset and reject while enabling the clocks */
2097 W_SBREG(si, &sb->sbtmstatelow,
2098 (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
2099 dummy = R_SBREG(si, &sb->sbtmstatelow);
2102 /* don't forget to clear the initiator reject bit */
2103 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
2104 AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
2107 /* leave reset and reject asserted */
2108 W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
2112 /* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
2113 void sb_watchdog(sb_t * sbh, uint ticks)
2115 sb_info_t *si = SB_INFO(sbh);
2117 /* make sure we come up in fast clock mode; or if clearing, clear clock */
2119 sb_clkctl_clk(sbh, CLK_FAST);
2121 sb_clkctl_clk(sbh, CLK_DYNAMIC);
2123 if (sbh->chip == BCM4328_CHIP_ID && ticks != 0)
2124 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, min_res_mask),
2125 PMURES_BIT(RES4328_ROM_SWITCH),
2126 PMURES_BIT(RES4328_ROM_SWITCH));
2129 switch (si->gpioid) {
2131 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0,
2135 sb_corereg(sbh, si->gpioidx, OFFSETOF(extifregs_t, watchdog),
2141 /* initialize the pcmcia core */
2142 void sb_pcmcia_init(sb_t * sbh)
2149 /* enable d11 mac interrupts */
2150 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2151 cor |= COR_IRQEN | COR_FUNEN;
2152 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2156 void BCMINITFN(sb_pci_up) (sb_t * sbh) {
2157 sb_info_t *si = SB_INFO(sbh);
2158 if (si->gpioid == SB_EXTIF)
2161 /* if not pci bus, we're done */
2162 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2165 if (FORCEHT_WAR32414(si))
2166 sb_war32414_forceHT(sbh, 1);
2168 if (PCIE_ASPMWARS(si) || si->sb.pr42780)
2169 sb_pcieclkreq(sbh, 1, 0);
2172 (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
2173 ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
2174 sb_set_initiator_to((void *)si, 0x3,
2175 sb_findcoreidx((void *)si, SB_D11, 0));
2178 /* Unconfigure and/or apply various WARs when system is going to sleep mode */
2179 void BCMUNINITFN(sb_pci_sleep) (sb_t * sbh) {
2180 sb_info_t *si = SB_INFO(sbh);
2181 if (si->gpioid == SB_EXTIF)
2185 /* if not pci bus, we're done */
2186 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2189 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset,
2191 w &= ~PCIE_CAP_LCREG_ASPML1;
2192 OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32),
2196 /* Unconfigure and/or apply various WARs when going down */
2197 void BCMINITFN(sb_pci_down) (sb_t * sbh) {
2198 sb_info_t *si = SB_INFO(sbh);
2199 if (si->gpioid == SB_EXTIF)
2202 /* if not pci bus, we're done */
2203 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2206 if (FORCEHT_WAR32414(si))
2207 sb_war32414_forceHT(sbh, 0);
2209 if (si->pr42767_war) {
2210 sb_pcieclkreq(sbh, 1, 1);
2211 si->pr42767_war = FALSE;
2212 } else if (si->sb.pr42780) {
2213 sb_pcieclkreq(sbh, 1, 1);
2217 static void BCMINITFN(sb_war42767_clkreq) (sb_t * sbh) {
2218 sbpcieregs_t *pcieregs;
2219 uint16 val16, *reg16;
2224 /* if not pcie bus, we're done */
2225 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2228 pcieregs = (sbpcieregs_t *) sb_setcoreidx(sbh, si->sb.buscoreidx);
2229 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
2230 val16 = R_REG(si->osh, reg16);
2231 /* if clockreq is not advertized advertize it */
2232 if (!si->pcie_war_ovr) {
2233 val16 |= SRSH_CLKREQ_ENB;
2234 si->pr42767_war = TRUE;
2236 si->sb.pr42780 = TRUE;
2238 val16 &= ~SRSH_CLKREQ_ENB;
2239 W_REG(si->osh, reg16, val16);
2242 static void BCMINITFN(sb_war42767) (sb_t * sbh) {
2248 /* if not pcie bus, we're done */
2249 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2252 sb_pcie_mdioread(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
2253 if (w & PLL_CTRL_FREQDET_EN) {
2254 w &= ~PLL_CTRL_FREQDET_EN;
2255 sb_pcie_mdiowrite(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
2260 * Configure the pci core for pci client (NIC) action
2261 * coremask is the bitvec of cores by index to be enabled.
2263 void BCMINITFN(sb_pci_setup) (sb_t * sbh, uint coremask) {
2266 sbpciregs_t *pciregs;
2273 /* if not pci bus, we're done */
2274 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2277 ASSERT(PCI(si) || PCIE(si));
2278 ASSERT(si->sb.buscoreidx != BADIDX);
2280 /* get current core index */
2283 /* we interrupt on this backplane flag number */
2284 ASSERT(GOODREGS(si->curmap));
2285 sb = REGS2SB(si->curmap);
2286 sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
2288 /* switch over to pci core */
2289 pciregs = (sbpciregs_t *) sb_setcoreidx(sbh, si->sb.buscoreidx);
2290 sb = REGS2SB(pciregs);
2293 * Enable sb->pci interrupts. Assume
2294 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
2296 if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
2297 /* pci config write to set this core bit in PCIIntMask */
2298 w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
2299 w |= (coremask << PCI_SBIM_SHIFT);
2300 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
2302 /* set sbintvec bit for our flag number */
2303 OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
2307 OR_REG(si->osh, &pciregs->sbtopci2,
2308 (SBTOPCI_PREF | SBTOPCI_BURST));
2309 if (si->sb.buscorerev >= 11)
2310 OR_REG(si->osh, &pciregs->sbtopci2,
2311 SBTOPCI_RC_READMULTI);
2312 if (si->sb.buscorerev < 5) {
2313 SET_SBREG(si, &sb->sbimconfiglow,
2314 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
2315 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
2320 /* PCIE workarounds */
2322 if ((si->sb.buscorerev == 0) || (si->sb.buscorerev == 1)) {
2323 w = sb_pcie_readreg((void *)(uintptr) sbh,
2324 (void *)(uintptr) PCIE_PCIEREGS,
2325 PCIE_TLP_WORKAROUNDSREG);
2327 sb_pcie_writereg((void *)(uintptr) sbh,
2328 (void *)(uintptr) PCIE_PCIEREGS,
2329 PCIE_TLP_WORKAROUNDSREG, w);
2332 if (si->sb.buscorerev == 1) {
2333 w = sb_pcie_readreg((void *)(uintptr) sbh,
2334 (void *)(uintptr) PCIE_PCIEREGS,
2337 sb_pcie_writereg((void *)(uintptr) sbh,
2338 (void *)(uintptr) PCIE_PCIEREGS,
2339 PCIE_DLLP_LCREG, w);
2342 if (si->sb.buscorerev == 0)
2345 if ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)) {
2346 w = sb_pcie_readreg((void *)(uintptr) sbh,
2347 (void *)(uintptr) PCIE_PCIEREGS,
2348 PCIE_DLLP_PMTHRESHREG);
2349 w &= ~(PCIE_L1THRESHOLDTIME_MASK);
2350 w |= (PCIE_L1THRESHOLD_WARVAL <<
2351 PCIE_L1THRESHOLDTIME_SHIFT);
2352 sb_pcie_writereg((void *)(uintptr) sbh,
2353 (void *)(uintptr) PCIE_PCIEREGS,
2354 PCIE_DLLP_PMTHRESHREG, w);
2360 sb_war43448_aspm(sbh);
2361 sb_war42767_clkreq(sbh);
2365 /* switch back to previous core */
2366 sb_setcoreidx(sbh, idx);
2369 uint32 sb_base(uint32 admatch)
2374 type = admatch & SBAM_TYPE_MASK;
2380 base = admatch & SBAM_BASE0_MASK;
2381 } else if (type == 1) {
2382 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2383 base = admatch & SBAM_BASE1_MASK;
2384 } else if (type == 2) {
2385 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2386 base = admatch & SBAM_BASE2_MASK;
2392 uint32 sb_size(uint32 admatch)
2397 type = admatch & SBAM_TYPE_MASK;
2404 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) +
2406 } else if (type == 1) {
2407 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2409 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) +
2411 } else if (type == 2) {
2412 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2414 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) +
2421 /* return the core-type instantiation # of the current core */
2422 uint sb_coreunit(sb_t * sbh)
2435 ASSERT(GOODREGS(si->curmap));
2436 coreid = sb_coreid(sbh);
2438 /* count the cores of our type */
2439 for (i = 0; i < idx; i++)
2440 if (si->coreid[i] == coreid)
2446 static uint32 BCMINITFN(factor6) (uint32 x) {
2465 /* calculate the speed the SB would run at given a set of clockcontrol values */
2466 uint32 BCMINITFN(sb_clock_rate) (uint32 pll_type, uint32 n, uint32 m) {
2467 uint32 n1, n2, clock, m1, m2, m3, mc;
2469 n1 = n & CN_N1_MASK;
2470 n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
2472 if (pll_type == PLL_TYPE6) {
2473 if (m & CC_T6_MMASK)
2477 } else if ((pll_type == PLL_TYPE1) ||
2478 (pll_type == PLL_TYPE3) ||
2479 (pll_type == PLL_TYPE4) || (pll_type == PLL_TYPE7)) {
2482 } else if (pll_type == PLL_TYPE2) {
2485 ASSERT((n1 >= 2) && (n1 <= 7));
2486 ASSERT((n2 >= 5) && (n2 <= 23));
2487 } else if (pll_type == PLL_TYPE5) {
2491 /* PLL types 3 and 7 use BASE2 (25Mhz) */
2492 if ((pll_type == PLL_TYPE3) || (pll_type == PLL_TYPE7)) {
2493 clock = CC_CLOCK_BASE2 * n1 * n2;
2495 clock = CC_CLOCK_BASE1 * n1 * n2;
2500 m1 = m & CC_M1_MASK;
2501 m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
2502 m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
2503 mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
2505 if ((pll_type == PLL_TYPE1) ||
2506 (pll_type == PLL_TYPE3) ||
2507 (pll_type == PLL_TYPE4) || (pll_type == PLL_TYPE7)) {
2509 if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
2519 return (clock / m1);
2521 return (clock / (m1 * m2));
2523 return (clock / (m1 * m2 * m3));
2525 return (clock / (m1 * m3));
2530 ASSERT(pll_type == PLL_TYPE2);
2535 ASSERT((m1 >= 2) && (m1 <= 7));
2536 ASSERT((m2 >= 3) && (m2 <= 10));
2537 ASSERT((m3 >= 2) && (m3 <= 7));
2539 if ((mc & CC_T2MC_M1BYP) == 0)
2541 if ((mc & CC_T2MC_M2BYP) == 0)
2543 if ((mc & CC_T2MC_M3BYP) == 0)
2550 /* returns the current speed the SB is running at */
2551 uint32 BCMINITFN(sb_clock) (sb_t * sbh) {
2557 uint32 cap, pll_type, rate;
2562 pll_type = PLL_TYPE1;
2564 INTR_OFF(si, intr_val);
2566 /* switch to extif or chipc core */
2567 if ((eir = (extifregs_t *) sb_setcore(sbh, SB_EXTIF, 0))) {
2568 n = R_REG(si->osh, &eir->clockcontrol_n);
2569 m = R_REG(si->osh, &eir->clockcontrol_sb);
2570 } else if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0))) {
2572 cap = R_REG(si->osh, &cc->capabilities);
2574 if (cap & CC_CAP_PMU) {
2576 if (sb_chip(sbh) == BCM5354_CHIP_ID) {
2577 /* 5354 has a constant sb clock of 120MHz */
2581 if (sb_chip(sbh) == BCM4328_CHIP_ID) {
2588 pll_type = cap & CC_CAP_PLL_MASK;
2589 if (pll_type == PLL_NONE) {
2590 INTR_RESTORE(si, intr_val);
2593 n = R_REG(si->osh, &cc->clockcontrol_n);
2594 if (pll_type == PLL_TYPE6)
2595 m = R_REG(si->osh, &cc->clockcontrol_m3);
2596 else if (pll_type == PLL_TYPE3
2597 && !(BCMINIT(sb_chip) (sbh) == 0x5365))
2598 m = R_REG(si->osh, &cc->clockcontrol_m2);
2600 m = R_REG(si->osh, &cc->clockcontrol_sb);
2602 INTR_RESTORE(si, intr_val);
2606 /* calculate rate */
2607 if (BCMINIT(sb_chip) (sbh) == 0x5365)
2610 rate = sb_clock_rate(pll_type, n, m);
2612 if (pll_type == PLL_TYPE3)
2617 /* switch back to previous core */
2618 sb_setcoreidx(sbh, idx);
2620 INTR_RESTORE(si, intr_val);
2625 uint32 BCMINITFN(sb_alp_clock) (sb_t * sbh) {
2626 uint32 clock = ALP_CLOCK;
2628 if (sbh->cccaps & CC_CAP_PMU)
2629 clock = sb_pmu_alp_clock(sbh, sb_osh(sbh));
2634 /* change logical "focus" to the gpio core for optimized access */
2635 void *sb_gpiosetcore(sb_t * sbh)
2641 return (sb_setcoreidx(sbh, si->gpioidx));
2644 /* mask&set gpiocontrol bits */
2645 uint32 sb_gpiocontrol(sb_t * sbh, uint32 mask, uint32 val, uint8 priority)
2653 /* gpios could be shared on router platforms
2654 * ignore reservation if it's high priority (e.g., test apps)
2656 if ((priority != GPIO_HI_PRIORITY) &&
2657 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2658 mask = priority ? (sb_gpioreservation & mask) :
2659 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2663 switch (si->gpioid) {
2665 regoff = OFFSETOF(chipcregs_t, gpiocontrol);
2669 regoff = OFFSETOF(sbpciregs_t, gpiocontrol);
2676 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
2679 /* mask&set gpio output enable bits */
2680 uint32 sb_gpioouten(sb_t * sbh, uint32 mask, uint32 val, uint8 priority)
2688 /* gpios could be shared on router platforms
2689 * ignore reservation if it's high priority (e.g., test apps)
2691 if ((priority != GPIO_HI_PRIORITY) &&
2692 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2693 mask = priority ? (sb_gpioreservation & mask) :
2694 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2698 switch (si->gpioid) {
2700 regoff = OFFSETOF(chipcregs_t, gpioouten);
2704 regoff = OFFSETOF(sbpciregs_t, gpioouten);
2708 regoff = OFFSETOF(extifregs_t, gpio[0].outen);
2712 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
2715 /* mask&set gpio output bits */
2716 uint32 sb_gpioout(sb_t * sbh, uint32 mask, uint32 val, uint8 priority)
2724 /* gpios could be shared on router platforms
2725 * ignore reservation if it's high priority (e.g., test apps)
2727 if ((priority != GPIO_HI_PRIORITY) &&
2728 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2729 mask = priority ? (sb_gpioreservation & mask) :
2730 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2734 switch (si->gpioid) {
2736 regoff = OFFSETOF(chipcregs_t, gpioout);
2740 regoff = OFFSETOF(sbpciregs_t, gpioout);
2744 regoff = OFFSETOF(extifregs_t, gpio[0].out);
2748 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
2751 /* reserve one gpio */
2752 uint32 sb_gpioreserve(sb_t * sbh, uint32 gpio_bitmask, uint8 priority)
2758 /* only cores on SB_BUS share GPIO's and only applcation users need to
2759 * reserve/release GPIO
2761 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2762 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2765 /* make sure only one bit is set */
2766 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2767 ASSERT((gpio_bitmask)
2768 && !((gpio_bitmask) & (gpio_bitmask - 1)));
2772 /* already reserved */
2773 if (sb_gpioreservation & gpio_bitmask)
2775 /* set reservation */
2776 sb_gpioreservation |= gpio_bitmask;
2778 return sb_gpioreservation;
2781 /* release one gpio */
2783 * releasing the gpio doesn't change the current value on the GPIO last write value
2784 * persists till some one overwrites it
2787 uint32 sb_gpiorelease(sb_t * sbh, uint32 gpio_bitmask, uint8 priority)
2793 /* only cores on SB_BUS share GPIO's and only applcation users need to
2794 * reserve/release GPIO
2796 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2797 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2800 /* make sure only one bit is set */
2801 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2802 ASSERT((gpio_bitmask)
2803 && !((gpio_bitmask) & (gpio_bitmask - 1)));
2807 /* already released */
2808 if (!(sb_gpioreservation & gpio_bitmask))
2811 /* clear reservation */
2812 sb_gpioreservation &= ~gpio_bitmask;
2814 return sb_gpioreservation;
2817 /* return the current gpioin register value */
2818 uint32 sb_gpioin(sb_t * sbh)
2826 switch (si->gpioid) {
2828 regoff = OFFSETOF(chipcregs_t, gpioin);
2832 regoff = OFFSETOF(sbpciregs_t, gpioin);
2836 regoff = OFFSETOF(extifregs_t, gpioin);
2840 return (sb_corereg(sbh, si->gpioidx, regoff, 0, 0));
2843 /* mask&set gpio interrupt polarity bits */
2844 uint32 sb_gpiointpolarity(sb_t * sbh, uint32 mask, uint32 val, uint8 priority)
2852 /* gpios could be shared on router platforms */
2853 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2854 mask = priority ? (sb_gpioreservation & mask) :
2855 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2859 switch (si->gpioid) {
2861 regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
2865 /* pci gpio implementation does not support interrupt polarity */
2870 regoff = OFFSETOF(extifregs_t, gpiointpolarity);
2874 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
2877 /* mask&set gpio interrupt mask bits */
2878 uint32 sb_gpiointmask(sb_t * sbh, uint32 mask, uint32 val, uint8 priority)
2886 /* gpios could be shared on router platforms */
2887 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2888 mask = priority ? (sb_gpioreservation & mask) :
2889 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2893 switch (si->gpioid) {
2895 regoff = OFFSETOF(chipcregs_t, gpiointmask);
2899 /* pci gpio implementation does not support interrupt mask */
2904 regoff = OFFSETOF(extifregs_t, gpiointmask);
2908 return (sb_corereg(sbh, si->gpioidx, regoff, mask, val));
2911 /* assign the gpio to an led */
2912 uint32 sb_gpioled(sb_t * sbh, uint32 mask, uint32 val)
2917 if (si->sb.ccrev < 16)
2920 /* gpio led powersave reg */
2922 (sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask,
2926 /* mask&set gpio timer val */
2927 uint32 sb_gpiotimerval(sb_t * sbh, uint32 mask, uint32 gpiotimerval)
2932 if (si->sb.ccrev < 16)
2935 return (sb_corereg(sbh, SB_CC_IDX,
2936 OFFSETOF(chipcregs_t, gpiotimerval), mask,
2940 uint32 sb_gpiopull(sb_t * sbh, bool updown, uint32 mask, uint32 val)
2946 if (si->sb.ccrev < 20)
2950 (updown ? OFFSETOF(chipcregs_t, gpiopulldown) :
2951 OFFSETOF(chipcregs_t, gpiopullup));
2952 return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2955 uint32 sb_gpioevent(sb_t * sbh, uint regtype, uint32 mask, uint32 val)
2961 if (si->sb.ccrev < 11)
2964 if (regtype == GPIO_REGEVT)
2965 offs = OFFSETOF(chipcregs_t, gpioevent);
2966 else if (regtype == GPIO_REGEVT_INTMSK)
2967 offs = OFFSETOF(chipcregs_t, gpioeventintmask);
2968 else if (regtype == GPIO_REGEVT_INTPOL)
2969 offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
2973 return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2976 void *BCMINITFN(sb_gpio_handler_register) (sb_t * sbh, uint32 event,
2977 bool level, gpio_handler_t cb,
2986 if (si->sb.ccrev < 11)
2989 if ((gi = MALLOC(si->osh, sizeof(gpioh_item_t))) == NULL)
2992 bzero(gi, sizeof(gpioh_item_t));
2998 gi->next = si->gpioh_head;
2999 si->gpioh_head = gi;
3001 return (void *)(gi);
3004 void BCMINITFN(sb_gpio_handler_unregister) (sb_t * sbh, void *gpioh) {
3006 gpioh_item_t *p, *n;
3009 if (si->sb.ccrev < 11)
3012 ASSERT(si->gpioh_head);
3013 if ((void *)si->gpioh_head == gpioh) {
3014 si->gpioh_head = si->gpioh_head->next;
3015 MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
3021 if ((void *)n == gpioh) {
3023 MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
3031 ASSERT(0); /* Not found in list */
3034 void sb_gpio_handler_process(sb_t * sbh)
3039 uint32 level = sb_gpioin(sbh);
3040 uint32 edge = sb_gpioevent(sbh, GPIO_REGEVT, 0, 0);
3043 for (h = si->gpioh_head; h != NULL; h = h->next) {
3045 status = (h->level ? level : edge);
3047 if (status & h->event)
3048 h->handler(status, h->arg);
3052 sb_gpioevent(sbh, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
3055 uint32 sb_gpio_int_enable(sb_t * sbh, bool enable)
3061 if (si->sb.ccrev < 11)
3064 offs = OFFSETOF(chipcregs_t, intmask);
3066 (sbh, SB_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
3070 void sb_dump(sb_t * sbh, struct bcmstrbuf *b)
3078 "si %p chip 0x%x chiprev 0x%x boardtype 0x%x boardvendor 0x%x bus %d\n",
3079 si, si->sb.chip, si->sb.chiprev, si->sb.boardtype,
3080 si->sb.boardvendor, si->sb.bustype);
3081 bcm_bprintf(b, "osh %p curmap %p\n", si->osh, si->curmap);
3083 "sonicsrev %d ccrev %d buscoretype 0x%x buscorerev %d curidx %d\n",
3084 si->sb.sonicsrev, si->sb.ccrev, si->sb.buscoretype,
3085 si->sb.buscorerev, si->curidx);
3087 bcm_bprintf(b, "forceHT %d ASPM overflowPR42780 %d pcie_polarity %d\n",
3088 si->sb.pr32414, si->sb.pr42780, si->pcie_polarity);
3090 bcm_bprintf(b, "cores: ");
3091 for (i = 0; i < si->numcores; i++)
3092 bcm_bprintf(b, "0x%x ", si->coreid[i]);
3093 bcm_bprintf(b, "\n");
3096 /* print interesting sbconfig registers */
3097 void sb_dumpregs(sb_t * sbh, struct bcmstrbuf *b)
3102 uint curidx, i, intr_val = 0;
3105 origidx = si->curidx;
3107 INTR_OFF(si, intr_val);
3108 curidx = si->curidx;
3110 for (i = 0; i < si->numcores; i++) {
3111 sb = REGS2SB(sb_setcoreidx(sbh, i));
3113 bcm_bprintf(b, "core 0x%x: \n", si->coreid[i]);
3115 "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
3116 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
3117 R_SBREG(si, &sb->sbtmstatelow), R_SBREG(si,
3120 R_SBREG(si, &sb->sbidhigh), R_SBREG(si,
3122 R_SBREG(si, &sb->sbimconfiglow), R_SBREG(si,
3127 sb_setcoreidx(sbh, origidx);
3128 INTR_RESTORE(si, intr_val);
3131 void sb_view(sb_t * sbh)
3137 sb = REGS2SB(si->curmap);
3139 if (si->sb.sonicsrev > SONICS_2_2)
3140 SB_ERROR(("sbimerrlog 0x%x sbimerrloga 0x%x\n",
3141 sb_corereg(sbh, sb_coreidx(&si->sb), SBIMERRLOG, 0,
3142 0), sb_corereg(sbh, sb_coreidx(&si->sb),
3143 SBIMERRLOGA, 0, 0)));
3145 SB_ERROR(("sbipsflag 0x%x sbtpsflag 0x%x sbtmerrloga 0x%x sbtmerrlog 0x%x\n", R_SBREG(si, &sb->sbipsflag), R_SBREG(si, &sb->sbtpsflag), R_SBREG(si, &sb->sbtmerrloga), R_SBREG(si, &sb->sbtmerrlog)));
3146 SB_ERROR(("sbadmatch3 0x%x sbadmatch2 0x%x sbadmatch1 0x%x\n",
3147 R_SBREG(si, &sb->sbadmatch3), R_SBREG(si, &sb->sbadmatch2),
3148 R_SBREG(si, &sb->sbadmatch1)));
3149 SB_ERROR(("sbimstate 0x%x sbintvec 0x%x sbtmstatelow 0x%x sbtmstatehigh 0x%x\n", R_SBREG(si, &sb->sbimstate), R_SBREG(si, &sb->sbintvec), R_SBREG(si, &sb->sbtmstatelow), R_SBREG(si, &sb->sbtmstatehigh)));
3150 SB_ERROR(("sbbwa0 0x%x sbimconfiglow 0x%x sbimconfighigh 0x%x sbadmatch0 0x%x\n", R_SBREG(si, &sb->sbbwa0), R_SBREG(si, &sb->sbimconfiglow), R_SBREG(si, &sb->sbimconfighigh), R_SBREG(si, &sb->sbadmatch0)));
3151 SB_ERROR(("sbtmconfiglow 0x%x sbtmconfighigh 0x%x sbbconfig 0x%x sbbstate 0x%x\n", R_SBREG(si, &sb->sbtmconfiglow), R_SBREG(si, &sb->sbtmconfighigh), R_SBREG(si, &sb->sbbconfig), R_SBREG(si, &sb->sbbstate)));
3152 SB_ERROR(("sbactcnfg 0x%x sbflagst 0x%x sbidlow 0x%x sbidhigh 0x%x\n",
3153 R_SBREG(si, &sb->sbactcnfg), R_SBREG(si, &sb->sbflagst),
3154 R_SBREG(si, &sb->sbidlow), R_SBREG(si, &sb->sbidhigh)));
3157 void sb_viewall(sb_t * sbh)
3164 curidx = si->curidx;
3166 for (i = 0; i < si->numcores; i++) {
3167 INTR_OFF(si, intr_val);
3168 sb_setcoreidx(sbh, i);
3170 INTR_RESTORE(si, intr_val);
3173 sb_setcoreidx(sbh, curidx);
3177 /* return the slow clock source - LPO, XTAL, or PCI */
3178 static uint sb_slowclk_src(sb_info_t * si)
3182 ASSERT(sb_coreid(&si->sb) == SB_CC);
3184 if (si->sb.ccrev < 6) {
3185 if ((BUSTYPE(si->sb.bustype) == PCI_BUS) &&
3186 (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32))
3187 & PCI_CFG_GPIO_SCS))
3188 return (SCC_SS_PCI);
3190 return (SCC_SS_XTAL);
3191 } else if (si->sb.ccrev < 10) {
3192 cc = (chipcregs_t *) sb_setcoreidx(&si->sb, si->curidx);
3193 return (R_REG(si->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
3194 } else /* Insta-clock */
3195 return (SCC_SS_XTAL);
3198 /* return the ILP (slowclock) min or max frequency */
3199 static uint sb_slowclk_freq(sb_info_t * si, bool max_freq)
3205 ASSERT(sb_coreid(&si->sb) == SB_CC);
3207 cc = (chipcregs_t *) sb_setcoreidx(&si->sb, si->curidx);
3209 /* shouldn't be here unless we've established the chip has dynamic clk control */
3210 ASSERT(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
3212 slowclk = sb_slowclk_src(si);
3213 if (si->sb.ccrev < 6) {
3214 if (slowclk == SCC_SS_PCI)
3215 return (max_freq ? (PCIMAXFREQ / 64)
3216 : (PCIMINFREQ / 64));
3218 return (max_freq ? (XTALMAXFREQ / 32)
3219 : (XTALMINFREQ / 32));
3220 } else if (si->sb.ccrev < 10) {
3223 (((R_REG(si->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >>
3226 if (slowclk == SCC_SS_LPO)
3227 return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
3228 else if (slowclk == SCC_SS_XTAL)
3229 return (max_freq ? (XTALMAXFREQ / div)
3230 : (XTALMINFREQ / div));
3231 else if (slowclk == SCC_SS_PCI)
3232 return (max_freq ? (PCIMAXFREQ / div)
3233 : (PCIMINFREQ / div));
3237 /* Chipc rev 10 is InstaClock */
3238 div = R_REG(si->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
3239 div = 4 * (div + 1);
3240 return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
3245 static void BCMINITFN(sb_clkctl_setdelay) (sb_info_t * si, void *chipcregs) {
3247 uint slowmaxfreq, pll_delay, slowclk;
3248 uint pll_on_delay, fref_sel_delay;
3250 pll_delay = PLL_DELAY;
3252 /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
3253 * since the xtal will also be powered down by dynamic clk control logic.
3256 slowclk = sb_slowclk_src(si);
3257 if (slowclk != SCC_SS_XTAL)
3258 pll_delay += XTAL_ON_DELAY;
3260 /* Starting with 4318 it is ILP that is used for the delays */
3261 slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
3263 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
3264 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
3266 cc = (chipcregs_t *) chipcregs;
3267 W_REG(si->osh, &cc->pll_on_delay, pll_on_delay);
3268 W_REG(si->osh, &cc->fref_sel_delay, fref_sel_delay);
3271 /* initialize power control delay registers */
3272 void BCMINITFN(sb_clkctl_init) (sb_t * sbh) {
3279 origidx = si->curidx;
3281 if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0)) == NULL)
3284 if ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev < 2))
3285 W_REG(si->osh, &cc->chipcontrol,
3287 0) ? CHIPCTRL_4321A0_DEFAULT : CHIPCTRL_4321A1_DEFAULT);
3289 if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL))
3292 /* set all Instaclk chip ILP to 1 MHz */
3293 if (si->sb.ccrev >= 10)
3294 SET_REG(si->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
3295 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
3297 sb_clkctl_setdelay(si, (void *)(uintptr) cc);
3300 sb_setcoreidx(sbh, origidx);
3303 /* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
3304 uint16 BCMINITFN(sb_clkctl_fast_pwrup_delay) (sb_t * sbh) {
3314 origidx = si->curidx;
3316 INTR_OFF(si, intr_val);
3318 if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0)) == NULL)
3321 if (sbh->cccaps & CC_CAP_PMU) {
3322 fpdelay = sb_pmu_fast_pwrup_delay(sbh, si->osh);
3326 if (!(sbh->cccaps & CC_CAP_PWR_CTL))
3329 slowminfreq = sb_slowclk_freq(si, FALSE);
3330 fpdelay = (((R_REG(si->osh, &cc->pll_on_delay) + 2) * 1000000) +
3331 (slowminfreq - 1)) / slowminfreq;
3334 sb_setcoreidx(sbh, origidx);
3335 INTR_RESTORE(si, intr_val);
3339 /* turn primary xtal and/or pll off/on */
3340 int sb_clkctl_xtal(sb_t * sbh, uint what, bool on)
3343 uint32 in, out, outen;
3347 switch (BUSTYPE(si->sb.bustype)) {
3354 /* pcie core doesn't have any mapping to control the xtal pu */
3358 in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof(uint32));
3360 OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32));
3362 OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN,
3366 * Avoid glitching the clock if GPRS is already using it.
3367 * We can't actually read the state of the PLLPD so we infer it
3368 * by the value of XTAL_PU which *is* readable via gpioin.
3370 if (on && (in & PCI_CFG_GPIO_XTAL))
3374 outen |= PCI_CFG_GPIO_XTAL;
3376 outen |= PCI_CFG_GPIO_PLL;
3379 /* turn primary xtal on */
3381 out |= PCI_CFG_GPIO_XTAL;
3383 out |= PCI_CFG_GPIO_PLL;
3384 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3385 sizeof(uint32), out);
3386 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN,
3387 sizeof(uint32), outen);
3388 OSL_DELAY(XTAL_ON_DELAY);
3393 out &= ~PCI_CFG_GPIO_PLL;
3394 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3395 sizeof(uint32), out);
3400 out &= ~PCI_CFG_GPIO_XTAL;
3402 out |= PCI_CFG_GPIO_PLL;
3403 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3404 sizeof(uint32), out);
3405 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN,
3406 sizeof(uint32), outen);
3416 /* set dynamic clk control mode (forceslow, forcefast, dynamic) */
3417 /* returns true if we are forcing fast clock */
3418 bool sb_clkctl_clk(sb_t * sbh, uint mode)
3428 /* chipcommon cores prior to rev6 don't support dynamic clock control */
3429 if (si->sb.ccrev < 6)
3432 /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
3433 ASSERT(si->sb.ccrev != 10);
3435 INTR_OFF(si, intr_val);
3437 origidx = si->curidx;
3439 if (sb_setcore(sbh, SB_MIPS33, 0) && (sb_corerev(&si->sb) <= 7) &&
3440 (BUSTYPE(si->sb.bustype) == SB_BUS) && (si->sb.ccrev >= 10))
3443 if (FORCEHT_WAR32414(si))
3446 cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0);
3449 if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL)
3450 && (si->sb.ccrev < 20))
3454 case CLK_FAST: /* force fast (pll) clock */
3455 if (si->sb.ccrev < 10) {
3456 /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
3457 sb_clkctl_xtal(&si->sb, XTAL, ON);
3459 SET_REG(si->osh, &cc->slow_clk_ctl,
3460 (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
3461 } else if (si->sb.ccrev < 20) {
3462 OR_REG(si->osh, &cc->system_clk_ctl, SYCC_HR);
3464 OR_REG(si->osh, &cc->clk_ctl_st, CCS_FORCEHT);
3467 /* wait for the PLL */
3468 if (R_REG(si->osh, &cc->capabilities) & CC_CAP_PMU) {
3469 SPINWAIT(((R_REG(si->osh, &cc->clk_ctl_st) &
3470 CCS_HTAVAIL) == 0), PMU_MAX_TRANSITION_DLY);
3471 ASSERT(R_REG(si->osh, &cc->clk_ctl_st) & CCS_HTAVAIL);
3473 OSL_DELAY(PLL_DELAY);
3477 case CLK_DYNAMIC: /* enable dynamic clock control */
3478 if (si->sb.ccrev < 10) {
3479 scc = R_REG(si->osh, &cc->slow_clk_ctl);
3480 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
3481 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
3483 W_REG(si->osh, &cc->slow_clk_ctl, scc);
3485 /* for dynamic control, we have to release our xtal_pu "force on" */
3487 sb_clkctl_xtal(&si->sb, XTAL, OFF);
3488 } else if (si->sb.ccrev < 20) {
3490 AND_REG(si->osh, &cc->system_clk_ctl, ~SYCC_HR);
3492 AND_REG(si->osh, &cc->clk_ctl_st, ~CCS_FORCEHT);
3501 sb_setcoreidx(sbh, origidx);
3502 INTR_RESTORE(si, intr_val);
3503 return (mode == CLK_FAST);
3506 /* register driver interrupt disabling and restoring callback functions */
3508 sb_register_intr_callback(sb_t * sbh, void *intrsoff_fn,
3509 void *intrsrestore_fn, void *intrsenabled_fn,
3515 si->intr_arg = intr_arg;
3516 si->intrsoff_fn = (sb_intrsoff_t) intrsoff_fn;
3517 si->intrsrestore_fn = (sb_intrsrestore_t) intrsrestore_fn;
3518 si->intrsenabled_fn = (sb_intrsenabled_t) intrsenabled_fn;
3519 /* save current core id. when this function called, the current core
3520 * must be the core which provides driver functions(il, et, wl, etc.)
3522 si->dev_coreid = si->coreid[si->curidx];
3525 void sb_deregister_intr_callback(sb_t * sbh)
3530 si->intrsoff_fn = NULL;
3534 /* dump dynamic clock control related registers */
3535 void sb_clkctl_dump(sb_t * sbh, struct bcmstrbuf *b)
3544 INTR_OFF(si, intr_val);
3546 origidx = si->curidx;
3548 if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0)) == NULL) {
3549 INTR_RESTORE(si, intr_val);
3553 if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL))
3556 bcm_bprintf(b, "pll_on_delay 0x%x fref_sel_delay 0x%x ",
3557 cc->pll_on_delay, cc->fref_sel_delay);
3558 if ((si->sb.ccrev >= 6) && (si->sb.ccrev < 10))
3559 bcm_bprintf(b, "slow_clk_ctl 0x%x ", cc->slow_clk_ctl);
3560 if (si->sb.ccrev >= 10) {
3561 bcm_bprintf(b, "system_clk_ctl 0x%x ", cc->system_clk_ctl);
3562 bcm_bprintf(b, "clkstatestretch 0x%x ", cc->clkstatestretch);
3564 if (BUSTYPE(si->sb.bustype) == PCI_BUS)
3565 bcm_bprintf(b, "gpioout 0x%x gpioouten 0x%x ",
3566 OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT,
3568 OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN,
3570 bcm_bprintf(b, "\n");
3573 sb_setcoreidx(sbh, origidx);
3574 INTR_RESTORE(si, intr_val);
3578 uint16 BCMINITFN(sb_d11_devid) (sb_t * sbh) {
3579 sb_info_t *si = SB_INFO(sbh);
3582 #if defined(BCM4328)
3583 /* Fix device id for dual band BCM4328 */
3584 if (sbh->chip == BCM4328_CHIP_ID &&
3585 (sbh->chippkg == BCM4328USBDUAL_PKG_ID
3586 || sbh->chippkg == BCM4328SDIODUAL_PKG_ID))
3587 device = BCM4328_D11DUAL_ID;
3589 #endif /* BCM4328 */
3590 /* Let an nvram variable with devpath override devid */
3591 if ((device = (uint16) sb_getdevpathintvar(sbh, "devid")) != 0) ;
3592 /* Get devid from OTP/SPROM depending on where the SROM is read */
3593 else if ((device = (uint16) getintvar(si->vars, "devid")) != 0) ;
3595 * no longer support wl0id, but keep the code
3596 * here for backward compatibility.
3598 else if ((device = (uint16) getintvar(si->vars, "wl0id")) != 0) ;
3599 /* Chip specific conversion */
3600 else if (sbh->chip == BCM4712_CHIP_ID) {
3601 if (sbh->chippkg == BCM4712SMALL_PKG_ID)
3602 device = BCM4306_D11G_ID;
3604 device = BCM4306_D11DUAL_ID;
3614 BCMINITFN(sb_corepciid) (sb_t * sbh, uint func, uint16 * pcivendor,
3615 uint16 * pcidevice, uint8 * pciclass,
3616 uint8 * pcisubclass, uint8 * pciprogif,
3617 uint8 * pciheader) {
3618 uint16 vendor = 0xffff, device = 0xffff;
3619 uint8 class, subclass, progif = 0;
3620 uint8 header = PCI_HEADER_NORMAL;
3621 uint32 core = sb_coreid(sbh);
3623 /* Verify whether the function exists for the core */
3624 if (func >= (uint) (core == SB_USB20H ? 2 : 1))
3627 /* Known vendor translations */
3628 switch (sb_corevendor(sbh)) {
3630 vendor = VENDOR_BROADCOM;
3636 /* Determine class based on known core codes */
3639 class = PCI_CLASS_NET;
3640 subclass = PCI_NET_ETHER;
3641 device = BCM47XX_ILINE_ID;
3644 class = PCI_CLASS_NET;
3645 subclass = PCI_NET_ETHER;
3646 device = BCM47XX_ENET_ID;
3649 class = PCI_CLASS_NET;
3650 subclass = PCI_NET_ETHER;
3651 device = BCM47XX_GIGETH_ID;
3655 class = PCI_CLASS_MEMORY;
3656 subclass = PCI_MEMORY_RAM;
3657 device = (uint16) core;
3661 class = PCI_CLASS_BRIDGE;
3662 subclass = PCI_BRIDGE_PCI;
3663 device = (uint16) core;
3664 header = PCI_HEADER_BRIDGE;
3668 class = PCI_CLASS_CPU;
3669 subclass = PCI_CPU_MIPS;
3670 device = (uint16) core;
3673 class = PCI_CLASS_COMM;
3674 subclass = PCI_COMM_MODEM;
3675 device = BCM47XX_V90_ID;
3678 class = PCI_CLASS_SERIAL;
3679 subclass = PCI_SERIAL_USB;
3680 progif = 0x10; /* OHCI */
3681 device = BCM47XX_USB_ID;
3684 class = PCI_CLASS_SERIAL;
3685 subclass = PCI_SERIAL_USB;
3686 progif = 0x10; /* OHCI */
3687 device = BCM47XX_USBH_ID;
3690 class = PCI_CLASS_SERIAL;
3691 subclass = PCI_SERIAL_USB;
3692 progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI */
3693 device = BCM47XX_USB20H_ID;
3694 header = 0x80; /* multifunction */
3697 class = PCI_CLASS_CRYPT;
3698 subclass = PCI_CRYPT_NETWORK;
3699 device = BCM47XX_IPSEC_ID;
3702 class = PCI_CLASS_NET;
3703 subclass = PCI_NET_OTHER;
3704 device = BCM47XX_ROBO_ID;
3708 class = PCI_CLASS_MEMORY;
3709 subclass = PCI_MEMORY_FLASH;
3710 device = (uint16) core;
3713 class = PCI_CLASS_XOR;
3714 subclass = PCI_XOR_QDMA;
3715 device = BCM47XX_SATAXOR_ID;
3718 class = PCI_CLASS_DASDI;
3719 subclass = PCI_DASDI_IDE;
3720 device = BCM47XX_ATA100_ID;
3723 class = PCI_CLASS_SERIAL;
3724 subclass = PCI_SERIAL_USB;
3725 device = BCM47XX_USBD_ID;
3728 class = PCI_CLASS_SERIAL;
3729 subclass = PCI_SERIAL_USB;
3730 device = BCM47XX_USB20D_ID;
3733 class = PCI_CLASS_NET;
3734 subclass = PCI_NET_OTHER;
3735 device = sb_d11_devid(sbh);
3739 class = subclass = progif = 0xff;
3740 device = (uint16) core;
3744 *pcivendor = vendor;
3745 *pcidevice = device;
3747 *pcisubclass = subclass;
3748 *pciprogif = progif;
3749 *pciheader = header;
3754 /* use the mdio interface to read from mdio slaves */
3756 sb_pcie_mdioread(sb_info_t * si, uint physmedia, uint regaddr, uint * regval)
3760 sbpcieregs_t *pcieregs;
3762 pcieregs = (sbpcieregs_t *) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3765 /* enable mdio access to SERDES */
3766 W_REG(si->osh, (&pcieregs->mdiocontrol),
3767 MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3769 mdiodata = MDIODATA_START | MDIODATA_READ |
3770 (physmedia << MDIODATA_DEVADDR_SHF) |
3771 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA;
3773 W_REG(si->osh, &pcieregs->mdiodata, mdiodata);
3777 /* retry till the transaction is complete */
3779 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) &
3780 MDIOCTL_ACCESS_DONE) {
3783 (R_REG(si->osh, &(pcieregs->mdiodata)) &
3785 /* Disable mdio access to SERDES */
3786 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3793 SB_ERROR(("sb_pcie_mdioread: timed out\n"));
3794 /* Disable mdio access to SERDES */
3795 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3799 /* use the mdio interface to write to mdio slaves */
3801 sb_pcie_mdiowrite(sb_info_t * si, uint physmedia, uint regaddr, uint val)
3805 sbpcieregs_t *pcieregs;
3807 pcieregs = (sbpcieregs_t *) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3810 /* enable mdio access to SERDES */
3811 W_REG(si->osh, (&pcieregs->mdiocontrol),
3812 MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3814 mdiodata = MDIODATA_START | MDIODATA_WRITE |
3815 (physmedia << MDIODATA_DEVADDR_SHF) |
3816 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
3818 W_REG(si->osh, (&pcieregs->mdiodata), mdiodata);
3822 /* retry till the transaction is complete */
3824 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) &
3825 MDIOCTL_ACCESS_DONE) {
3826 /* Disable mdio access to SERDES */
3827 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3834 SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
3835 /* Disable mdio access to SERDES */
3836 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3841 /* indirect way to read pcie config regs */
3842 uint sb_pcie_readreg(void *sb, void *arg1, uint offset)
3846 uint retval = 0xFFFFFFFF;
3847 sbpcieregs_t *pcieregs;
3854 pcieregs = (sbpcieregs_t *) sb_setcore(sbh, SB_PCIE, 0);
3857 addrtype = (uint) ((uintptr) arg1);
3859 case PCIE_CONFIGREGS:
3860 W_REG(si->osh, (&pcieregs->configaddr), offset);
3861 retval = R_REG(si->osh, &(pcieregs->configdata));
3864 W_REG(si->osh, &(pcieregs->pcieindaddr), offset);
3865 retval = R_REG(si->osh, &(pcieregs->pcieinddata));
3874 /* indirect way to write pcie config/mdio/pciecore regs */
3875 uint sb_pcie_writereg(sb_t * sbh, void *arg1, uint offset, uint val)
3878 sbpcieregs_t *pcieregs;
3884 pcieregs = (sbpcieregs_t *) sb_setcore(sbh, SB_PCIE, 0);
3887 addrtype = (uint) ((uintptr) arg1);
3890 case PCIE_CONFIGREGS:
3891 W_REG(si->osh, (&pcieregs->configaddr), offset);
3892 W_REG(si->osh, (&pcieregs->configdata), val);
3895 W_REG(si->osh, (&pcieregs->pcieindaddr), offset);
3896 W_REG(si->osh, (&pcieregs->pcieinddata), val);
3905 /* Build device path. Support SB, PCI, and JTAG for now. */
3906 int BCMINITFN(sb_devpath) (sb_t * sbh, char *path, int size) {
3909 ASSERT(size >= SB_DEVPATH_BUFSZ);
3911 if (!path || size <= 0)
3914 switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
3917 slen = snprintf(path, (size_t) size, "sb/%u/", sb_coreidx(sbh));
3920 ASSERT((SB_INFO(sbh))->osh);
3921 slen = snprintf(path, (size_t) size, "pci/%u/%u/",
3922 OSL_PCI_BUS((SB_INFO(sbh))->osh),
3923 OSL_PCI_SLOT((SB_INFO(sbh))->osh));
3926 SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
3927 SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
3928 slen = snprintf(path, (size_t) size, "pc/1/1/");
3936 if (slen < 0 || slen >= size) {
3944 /* Get a variable, but only if it has a devpath prefix */
3945 char *BCMINITFN(sb_getdevpathvar) (sb_t * sbh, const char *name) {
3946 char varname[SB_DEVPATH_BUFSZ + 32];
3948 sb_devpathvar(sbh, varname, sizeof(varname), name);
3950 return (getvar(NULL, varname));
3953 /* Get a variable, but only if it has a devpath prefix */
3954 int BCMINITFN(sb_getdevpathintvar) (sb_t * sbh, const char *name) {
3955 char varname[SB_DEVPATH_BUFSZ + 32];
3957 sb_devpathvar(sbh, varname, sizeof(varname), name);
3959 return (getintvar(NULL, varname));
3962 /* Concatenate the dev path with a varname into the given 'var' buffer
3963 * and return the 'var' pointer.
3964 * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
3965 * On overflow, the first char will be set to '\0'.
3967 static char *BCMINITFN(sb_devpathvar) (sb_t * sbh, char *var, int len,
3971 if (!var || len <= 0)
3974 if (sb_devpath(sbh, var, len) == 0) {
3975 path_len = strlen(var);
3977 if (strlen(name) + 1 > (uint) (len - path_len))
3980 strncpy(var + path_len, name, len - path_len - 1);
3987 * Fixup SROMless PCI device's configuration.
3988 * The current core may be changed upon return.
3990 static int sb_pci_fixcfg(sb_info_t * si)
3992 uint origidx, pciidx;
3993 sbpciregs_t *pciregs;
3994 sbpcieregs_t *pcieregs = NULL;
3995 uint16 val16, *reg16;
3998 ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
4000 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
4001 /* save the current index */
4002 origidx = sb_coreidx(&si->sb);
4004 /* check 'pi' is correct and fix it if not */
4005 if (si->sb.buscoretype == SB_PCIE) {
4006 pcieregs = (sbpcieregs_t *) sb_setcore(&si->sb, SB_PCIE, 0);
4008 reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
4009 } else if (si->sb.buscoretype == SB_PCI) {
4010 pciregs = (sbpciregs_t *) sb_setcore(&si->sb, SB_PCI, 0);
4012 reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
4017 pciidx = sb_coreidx(&si->sb);
4018 val16 = R_REG(si->osh, reg16);
4019 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16) pciidx) {
4021 (uint16) (pciidx << SRSH_PI_SHIFT) | (val16 &
4023 W_REG(si->osh, reg16, val16);
4026 if (PCIE_ASPMWARS(si)) {
4027 w = sb_pcie_readreg((void *)(uintptr) & si->sb,
4028 (void *)PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
4030 /* Detect the current polarity at attach and force that polarity and
4031 * disable changing the polarity
4033 if ((w & PCIE_PLP_POLARITYINV_STAT) == 0) {
4034 si->pcie_polarity = (SERDES_RX_CTRL_FORCE);
4036 si->pcie_polarity = (SERDES_RX_CTRL_FORCE |
4037 SERDES_RX_CTRL_POLARITY);
4040 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset,
4042 if (w & PCIE_CLKREQ_ENAB) {
4043 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
4044 val16 = R_REG(si->osh, reg16);
4045 /* if clockreq is not advertized clkreq should not be enabled */
4046 if (!(val16 & SRSH_CLKREQ_ENB))
4047 SB_ERROR(("WARNING: CLK REQ enabled already 0x%x\n", w));
4050 sb_war43448(&si->sb);
4052 sb_war42767(&si->sb);
4056 /* restore the original index */
4057 sb_setcoreidx(&si->sb, origidx);
4062 /* Return ADDR64 capability of the backplane */
4063 bool sb_backplane64(sb_t * sbh)
4068 return ((si->sb.cccaps & CC_CAP_BKPLN64) != 0);
4071 void sb_btcgpiowar(sb_t * sbh)
4079 /* Make sure that there is ChipCommon core present &&
4080 * UART_TX is strapped to 1
4082 if (!(si->sb.cccaps & CC_CAP_UARTGPIO))
4085 /* sb_corereg cannot be used as we have to guarantee 8-bit read/writes */
4086 INTR_OFF(si, intr_val);
4088 origidx = sb_coreidx(sbh);
4090 cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0);
4093 W_REG(si->osh, &cc->uart0mcr, R_REG(si->osh, &cc->uart0mcr) | 0x04);
4095 /* restore the original index */
4096 sb_setcoreidx(sbh, origidx);
4098 INTR_RESTORE(si, intr_val);
4101 /* check if the device is removed */
4102 bool sb_deviceremoved(sb_t * sbh)
4109 switch (BUSTYPE(si->sb.bustype)) {
4112 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_VID, sizeof(uint32));
4113 if ((w & 0xFFFF) != VENDOR_BROADCOM)
4124 /* Return the RAM size of the SOCRAM core */
4125 uint32 BCMINITFN(sb_socram_size) (sb_t * sbh) {
4130 sbsocramregs_t *regs;
4139 /* Block ints and save current core */
4140 INTR_OFF(si, intr_val);
4141 origidx = sb_coreidx(sbh);
4143 /* Switch to SOCRAM core */
4144 if (!(regs = sb_setcore(sbh, SB_SOCRAM, 0)))
4147 /* Get info for determining size */
4148 if (!(wasup = sb_iscoreup(sbh)))
4149 sb_core_reset(sbh, 0, 0);
4150 corerev = sb_corerev(sbh);
4151 coreinfo = R_REG(si->osh, ®s->coreinfo);
4153 /* Calculate size from coreinfo based on rev */
4155 memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
4156 else if (corerev < 3) {
4157 memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
4158 memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
4160 uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
4161 uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
4162 uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
4165 memsize = nb * (1 << (bsz + SR_BSZ_BASE));
4167 memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
4169 /* Return to previous state and core */
4171 sb_core_disable(sbh, 0);
4172 sb_setcoreidx(sbh, origidx);
4175 INTR_RESTORE(si, intr_val);