2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright 2006, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 * $Id: sbutils.c,v 1.10 2006/04/08 07:12:42 honor Exp $
35 #define SB_ERROR(args)
37 typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
38 typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
39 typedef bool (*sb_intrsenabled_t)(void *intr_arg);
41 /* misc sb info needed by some of the routines */
42 typedef struct sb_info {
44 struct sb_pub sb; /* back plane public state (must be first field) */
46 void *osh; /* osl os handle */
47 void *sdh; /* bcmsdh handle */
49 void *curmap; /* current regs va */
50 void *regs[SB_MAXCORES]; /* other regs va */
52 uint curidx; /* current core index */
53 uint dev_coreid; /* the core provides driver functions */
55 bool memseg; /* flag to toggle MEM_SEG register */
57 uint gpioidx; /* gpio control core index */
58 uint gpioid; /* gpio control coretype */
60 uint numcores; /* # discovered cores */
61 uint coreid[SB_MAXCORES]; /* id of each core */
63 void *intr_arg; /* interrupt callback function arg */
64 sb_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
65 sb_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
66 sb_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
70 /* local prototypes */
71 static sb_info_t * sb_doattach(sb_info_t *si, uint devid, osl_t *osh, void *regs,
72 uint bustype, void *sdh, char **vars, uint *varsz);
73 static void sb_scan(sb_info_t *si);
74 static uint sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val);
75 static uint _sb_coreidx(sb_info_t *si);
76 static uint sb_findcoreidx(sb_info_t *si, uint coreid, uint coreunit);
77 static uint sb_pcidev2chip(uint pcidev);
78 static uint sb_chip2numcores(uint chip);
79 static bool sb_ispcie(sb_info_t *si);
80 static bool sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen);
81 static int sb_pci_fixcfg(sb_info_t *si);
83 /* routines to access mdio slave device registers */
84 static int sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint readdr, uint val);
85 static void sb_war30841(sb_info_t *si);
87 /* delay needed between the mdio control/ mdiodata register data access */
88 #define PR28829_DELAY() OSL_DELAY(10)
90 /* size that can take bitfielddump */
91 #define BITFIELD_DUMP_SIZE 32
93 /* global variable to indicate reservation/release of gpio's */
94 static uint32 sb_gpioreservation = 0;
96 #define SB_INFO(sbh) (sb_info_t*)sbh
97 #define SET_SBREG(si, r, mask, val) \
98 W_SBREG((si), (r), ((R_SBREG((si), (r)) & ~(mask)) | (val)))
99 #define GOODCOREADDR(x) (((x) >= SB_ENUM_BASE) && ((x) <= SB_ENUM_LIM) && \
100 ISALIGNED((x), SB_CORE_SIZE))
101 #define GOODREGS(regs) ((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
102 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
103 #define GOODIDX(idx) (((uint)idx) < SB_MAXCORES)
104 #define BADIDX (SB_MAXCORES+1)
105 #define NOREV -1 /* Invalid rev */
107 #define PCI(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI))
108 #define PCIE(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE))
111 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
112 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
114 #define R_SBREG(si, sbr) sb_read_sbreg((si), (sbr))
115 #define W_SBREG(si, sbr, v) sb_write_sbreg((si), (sbr), (v))
116 #define AND_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) & (v)))
117 #define OR_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) | (v)))
120 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
121 * after core switching to avoid invalid register accesss inside ISR.
123 #define INTR_OFF(si, intr_val) \
124 if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
125 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
126 #define INTR_RESTORE(si, intr_val) \
127 if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
128 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
130 /* dynamic clock control defines */
131 #define LPOMINFREQ 25000 /* low power oscillator min */
132 #define LPOMAXFREQ 43000 /* low power oscillator max */
133 #define XTALMINFREQ 19800000 /* 20 MHz - 1% */
134 #define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
135 #define PCIMINFREQ 25000000 /* 25 MHz */
136 #define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
138 #define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
139 #define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
141 /* different register spaces to access thr'u pcie indirect access */
142 #define PCIE_CONFIGREGS 1 /* Access to config space */
143 #define PCIE_PCIEREGS 2 /* Access to pcie registers */
145 /* force HT war check */
146 #define FORCEHT_WAR32414(si) \
147 ((PCIE(si)) && (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 1)) || \
148 ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev <= 3))))
150 /* GPIO Based LED powersave defines */
151 #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
152 #define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
154 #define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
157 sb_read_sbreg(sb_info_t *si, volatile uint32 *sbr)
160 uint32 val, intr_val = 0;
164 * compact flash only has 11 bits address, while we needs 12 bits address.
165 * MEM_SEG will be OR'd with other 11 bits address in hardware,
166 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
167 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
170 INTR_OFF(si, intr_val);
172 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
173 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
176 val = R_REG(si->osh, sbr);
180 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
181 INTR_RESTORE(si, intr_val);
188 sb_write_sbreg(sb_info_t *si, volatile uint32 *sbr, uint32 v)
191 volatile uint32 dummy;
196 * compact flash only has 11 bits address, while we needs 12 bits address.
197 * MEM_SEG will be OR'd with other 11 bits address in hardware,
198 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
199 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
202 INTR_OFF(si, intr_val);
204 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
205 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
208 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
210 dummy = R_REG(si->osh, sbr);
211 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
212 dummy = R_REG(si->osh, sbr);
213 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
215 dummy = R_REG(si->osh, sbr);
216 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
217 dummy = R_REG(si->osh, sbr);
218 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
219 #endif /* IL_BIGENDIAN */
221 W_REG(si->osh, sbr, v);
225 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
226 INTR_RESTORE(si, intr_val);
231 * Allocate a sb handle.
232 * devid - pci device id (used to determine chip#)
233 * osh - opaque OS handle
234 * regs - virtual address of initial core registers
235 * bustype - pci/pcmcia/sb/sdio/etc
236 * vars - pointer to a pointer area for "environment" variables
237 * varsz - pointer to int to return the size of the vars
240 BCMINITFN(sb_attach)(uint devid, osl_t *osh, void *regs,
241 uint bustype, void *sdh, char **vars, uint *varsz)
245 /* alloc sb_info_t */
246 if ((si = MALLOC(osh, sizeof (sb_info_t))) == NULL) {
247 SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
251 if (sb_doattach(si, devid, osh, regs, bustype, sdh, vars, (uint*)varsz) == NULL) {
252 MFREE(osh, si, sizeof(sb_info_t));
259 /* Using sb_kattach depends on SB_BUS support, either implicit */
260 /* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
261 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
263 /* global kernel resource */
264 static sb_info_t ksi;
265 static bool ksi_attached = FALSE;
267 /* generic kernel variant of sb_attach() */
269 BCMINITFN(sb_kattach)(void)
277 regs = (uint32 *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
278 cid = R_REG(osh, (uint32 *)regs);
279 if (((cid & CID_ID_MASK) == BCM4712_CHIP_ID) &&
280 ((cid & CID_PKG_MASK) != BCM4712LARGE_PKG_ID) &&
281 ((cid & CID_REV_MASK) <= (3 << CID_REV_SHIFT))) {
284 scc = (uint32 *)((uchar*)regs + OFFSETOF(chipcregs_t, slow_clk_ctl));
285 val = R_REG(osh, scc);
286 SB_ERROR((" initial scc = 0x%x\n", val));
288 W_REG(osh, scc, val);
291 if (sb_doattach(&ksi, BCM4710_DEVICE_ID, osh, (void*)regs,
292 SB_BUS, NULL, NULL, NULL) == NULL) {
301 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
304 BCMINITFN(sb_war32414_forceHT)(sb_t *sbh, bool forceHT)
311 if (FORCEHT_WAR32414(si)) {
315 sb_corereg((void*)si, SB_CC_IDX, OFFSETOF(chipcregs_t, system_clk_ctl),
321 BCMINITFN(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
322 uint bustype, void *sdh, char **vars, uint *varsz)
329 ASSERT(GOODREGS(regs));
331 bzero((uchar*)si, sizeof(sb_info_t));
333 si->sb.buscoreidx = si->gpioidx = BADIDX;
339 /* check to see if we are a sb core mimic'ing a pci core */
340 if (bustype == PCI_BUS) {
341 if (OSL_PCI_READ_CONFIG(si->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff) {
342 SB_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SB "
343 "devid:0x%x\n", __FUNCTION__, devid));
348 si->sb.bustype = bustype;
349 if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
350 SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
351 si->sb.bustype, BUSTYPE(si->sb.bustype)));
355 /* need to set memseg flag for CF card first before any sb registers access */
356 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
359 /* kludge to enable the clock on the 4306 which lacks a slowclock */
360 if (BUSTYPE(si->sb.bustype) == PCI_BUS)
361 sb_clkctl_xtal(&si->sb, XTAL|PLL, ON);
363 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
364 w = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
365 if (!GOODCOREADDR(w))
366 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32), SB_ENUM_BASE);
369 /* initialize current core index value */
370 si->curidx = _sb_coreidx(si);
372 if (si->curidx == BADIDX) {
373 SB_ERROR(("sb_doattach: bad core index\n"));
377 /* get sonics backplane revision */
378 sb = REGS2SB(si->curmap);
379 si->sb.sonicsrev = (R_SBREG(si, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
381 /* keep and reuse the initial register mapping */
382 origidx = si->curidx;
383 if (BUSTYPE(si->sb.bustype) == SB_BUS)
384 si->regs[origidx] = regs;
386 /* is core-0 a chipcommon core? */
388 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, 0);
389 if (sb_coreid(&si->sb) != SB_CC)
392 /* determine chip id and rev */
394 /* chip common core found! */
395 si->sb.chip = R_REG(si->osh, &cc->chipid) & CID_ID_MASK;
396 si->sb.chiprev = (R_REG(si->osh, &cc->chipid) & CID_REV_MASK) >> CID_REV_SHIFT;
397 si->sb.chippkg = (R_REG(si->osh, &cc->chipid) & CID_PKG_MASK) >> CID_PKG_SHIFT;
399 /* no chip common core -- must convert device id to chip id */
400 if ((si->sb.chip = sb_pcidev2chip(devid)) == 0) {
401 SB_ERROR(("sb_doattach: unrecognized device id 0x%04x\n", devid));
402 sb_setcoreidx(&si->sb, origidx);
407 /* get chipcommon rev */
408 si->sb.ccrev = cc ? (int)sb_corerev(&si->sb) : NOREV;
410 /* determine numcores */
411 if (cc && ((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
412 si->numcores = (R_REG(si->osh, &cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
414 si->numcores = sb_chip2numcores(si->sb.chip);
416 /* return to original core */
417 sb_setcoreidx(&si->sb, origidx);
425 /* fixup necessary chip/core configurations */
426 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
427 if (sb_pci_fixcfg(si)) {
428 SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
433 /* srom_var_init() depends on sb_scan() info */
434 if (srom_var_init(si, si->sb.bustype, si->curmap, si->osh, vars, varsz)) {
435 SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
441 * The chip revision number is hardwired into all
442 * of the pci function config rev fields and is
443 * independent from the individual core revision numbers.
444 * For example, the "A0" silicon of each chip is chip rev 0.
445 * For PCMCIA we get it from the CIS instead.
447 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
449 si->sb.chiprev = getintvar(*vars, "chiprev");
450 } else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
451 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_REV, sizeof(uint32));
452 si->sb.chiprev = w & 0xff;
457 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
458 w = getintvar(*vars, "regwindowsz");
459 si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
462 /* gpio control core is required */
463 if (!GOODIDX(si->gpioidx)) {
464 SB_ERROR(("sb_doattach: gpio control core not found\n"));
468 /* get boardtype and boardrev */
469 switch (BUSTYPE(si->sb.bustype)) {
471 /* do a pci config read to get subsystem id and subvendor id */
472 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_SVID, sizeof(uint32));
473 si->sb.boardvendor = w & 0xffff;
474 si->sb.boardtype = (w >> 16) & 0xffff;
479 si->sb.boardvendor = getintvar(*vars, "manfid");
480 si->sb.boardtype = getintvar(*vars, "prodid");
485 si->sb.boardvendor = VENDOR_BROADCOM;
486 if ((si->sb.boardtype = getintvar(NULL, "boardtype")) == 0)
487 si->sb.boardtype = 0xffff;
491 if (si->sb.boardtype == 0) {
492 SB_ERROR(("sb_doattach: unknown board type\n"));
493 ASSERT(si->sb.boardtype);
496 /* setup the GPIO based LED powersave register */
497 if (si->sb.ccrev >= 16) {
498 if ((vars == NULL) || ((w = getintvar(*vars, "leddc")) == 0))
499 w = DEFAULT_GPIOTIMERVAL;
500 sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
502 if (FORCEHT_WAR32414(si)) {
503 /* set proper clk setup delays before forcing HT */
504 sb_clkctl_init((void *)si);
505 sb_war32414_forceHT((void *)si, 1);
520 sb = REGS2SB(si->curmap);
522 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
526 sb_coreidx(sb_t *sbh)
534 /* return current index of core */
536 _sb_coreidx(sb_info_t *si)
543 switch (BUSTYPE(si->sb.bustype)) {
545 sb = REGS2SB(si->curmap);
546 sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
550 sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
556 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
557 sbaddr = (uint)tmp << 12;
558 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
559 sbaddr |= (uint)tmp << 16;
560 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
561 sbaddr |= (uint)tmp << 24;
567 sbaddr = (uint32)si->curmap;
575 if (!GOODCOREADDR(sbaddr))
578 return ((sbaddr - SB_ENUM_BASE) / SB_CORE_SIZE);
582 sb_corevendor(sb_t *sbh)
588 sb = REGS2SB(si->curmap);
590 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
594 sb_corerev(sb_t *sbh)
601 sb = REGS2SB(si->curmap);
602 sbidh = R_SBREG(si, &sb->sbidhigh);
604 return (SBCOREREV(sbidh));
617 sb_setosh(sb_t *sbh, osl_t *osh)
622 if (si->osh != NULL) {
623 SB_ERROR(("osh is already set....\n"));
629 /* set/clear sbtmstatelow core-specific flags */
631 sb_coreflags(sb_t *sbh, uint32 mask, uint32 val)
638 sb = REGS2SB(si->curmap);
640 ASSERT((val & ~mask) == 0);
644 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
645 W_SBREG(si, &sb->sbtmstatelow, w);
648 /* return the new value */
649 return (R_SBREG(si, &sb->sbtmstatelow));
652 /* set/clear sbtmstatehigh core-specific flags */
654 sb_coreflagshi(sb_t *sbh, uint32 mask, uint32 val)
661 sb = REGS2SB(si->curmap);
663 ASSERT((val & ~mask) == 0);
664 ASSERT((mask & ~SBTMH_FL_MASK) == 0);
668 w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
669 W_SBREG(si, &sb->sbtmstatehigh, w);
672 /* return the new value */
673 return (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_FL_MASK);
676 /* Run bist on current core. Caller needs to take care of core-specific bist hazards */
678 sb_corebist(sb_t *sbh)
686 sb = REGS2SB(si->curmap);
688 sblo = R_SBREG(si, &sb->sbtmstatelow);
689 W_SBREG(si, &sb->sbtmstatelow, (sblo | SBTML_FGC | SBTML_BE));
691 SPINWAIT(((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTD) == 0), 100000);
693 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTF)
696 W_SBREG(si, &sb->sbtmstatelow, sblo);
702 sb_iscoreup(sb_t *sbh)
708 sb = REGS2SB(si->curmap);
710 return ((R_SBREG(si, &sb->sbtmstatelow) &
711 (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
715 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
716 * switch back to the original core, and return the new value.
718 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
720 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
721 * and (on newer pci cores) chipcommon registers.
724 sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val)
732 ASSERT(GOODIDX(coreidx));
733 ASSERT(regoff < SB_CORE_SIZE);
734 ASSERT((val & ~mask) == 0);
737 if (si->sb.bustype == SB_BUS) {
738 /* If internal bus, we can always get at everything */
740 r = (uint32 *)((uchar *)si->regs[coreidx] + regoff);
741 } else if (si->sb.bustype == PCI_BUS) {
742 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
744 if ((si->coreid[coreidx] == SB_CC) &&
745 ((si->sb.buscoretype == SB_PCIE) ||
746 (si->sb.buscorerev >= 13))) {
747 /* Chipc registers are mapped at 12KB */
750 r = (uint32 *)((char *)si->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
751 } else if (si->sb.buscoreidx == coreidx) {
752 /* pci registers are at either in the last 2KB of an 8KB window
753 * or, in pcie and pci rev 13 at 8KB
756 if ((si->sb.buscoretype == SB_PCIE) ||
757 (si->sb.buscorerev >= 13))
758 r = (uint32 *)((char *)si->curmap +
759 PCI_16KB0_PCIREGS_OFFSET + regoff);
761 r = (uint32 *)((char *)si->curmap +
762 ((regoff >= SBCONFIGOFF) ?
763 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
770 INTR_OFF(si, intr_val);
772 /* save current core index */
773 origidx = sb_coreidx(&si->sb);
776 r = (uint32*) ((uchar*) sb_setcoreidx(&si->sb, coreidx) + regoff);
782 if (regoff >= SBCONFIGOFF) {
783 w = (R_SBREG(si, r) & ~mask) | val;
786 w = (R_REG(si->osh, r) & ~mask) | val;
787 W_REG(si->osh, r, w);
792 if (regoff >= SBCONFIGOFF)
795 w = R_REG(si->osh, r);
798 /* restore core index */
799 if (origidx != coreidx)
800 sb_setcoreidx(&si->sb, origidx);
802 INTR_RESTORE(si, intr_val);
808 #define DWORD_ALIGN(x) (x & ~(0x03))
809 #define BYTE_POS(x) (x & 0x3)
810 #define WORD_POS(x) (x & 0x1)
812 #define BYTE_SHIFT(x) (8 * BYTE_POS(x))
813 #define WORD_SHIFT(x) (16 * WORD_POS(x))
815 #define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
816 #define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
818 #define read_pci_cfg_byte(a) \
819 (BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
821 #define read_pci_cfg_word(a) \
822 (WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
825 /* return TRUE if requested capability exists in the PCI config space */
827 sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen)
834 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
837 /* check for Header type 0 */
838 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
839 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
842 /* check if the capability pointer field exists */
843 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
844 if (!(byte_val & PCI_CAPPTR_PRESENT))
847 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
848 /* check if the capability pointer is 0x00 */
853 /* loop thr'u the capability list and see if the pcie capabilty exists */
855 cap_id = read_pci_cfg_byte(cap_ptr);
857 while (cap_id != req_cap_id) {
858 cap_ptr = read_pci_cfg_byte((cap_ptr+1));
859 if (cap_ptr == 0x00) break;
860 cap_id = read_pci_cfg_byte(cap_ptr);
862 if (cap_id != req_cap_id) {
865 /* found the caller requested capability */
866 if ((buf != NULL) && (buflen != NULL)) {
868 if (!bufsize) goto end;
870 /* copy the cpability data excluding cap ID and next ptr */
872 if ((bufsize + cap_ptr) > SZPCR)
873 bufsize = SZPCR - cap_ptr;
876 *buf = read_pci_cfg_byte(cap_ptr);
885 /* return TRUE if PCIE capability exists the pci config space */
887 sb_ispcie(sb_info_t *si)
889 return (sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL));
892 /* scan the sb enumerated space to identify all cores */
894 BCMINITFN(sb_scan)(sb_info_t *si)
906 /* numcores should already be set */
907 ASSERT((si->numcores > 0) && (si->numcores <= SB_MAXCORES));
909 /* save current core index */
910 origidx = sb_coreidx(&si->sb);
912 si->sb.buscorerev = NOREV;
913 si->sb.buscoreidx = BADIDX;
915 si->gpioidx = BADIDX;
918 pcirev = pcierev = NOREV;
919 pciidx = pcieidx = BADIDX;
921 for (i = 0; i < si->numcores; i++) {
922 sb_setcoreidx(&si->sb, i);
923 si->coreid[i] = sb_coreid(&si->sb);
925 if (si->coreid[i] == SB_PCI) {
927 pcirev = sb_corerev(&si->sb);
929 } else if (si->coreid[i] == SB_PCIE) {
931 pcierev = sb_corerev(&si->sb);
933 } else if (si->coreid[i] == SB_PCMCIA) {
934 si->sb.buscorerev = sb_corerev(&si->sb);
935 si->sb.buscoretype = si->coreid[i];
936 si->sb.buscoreidx = i;
946 si->sb.buscoretype = SB_PCI;
947 si->sb.buscorerev = pcirev;
948 si->sb.buscoreidx = pciidx;
950 si->sb.buscoretype = SB_PCIE;
951 si->sb.buscorerev = pcierev;
952 si->sb.buscoreidx = pcieidx;
956 * Find the gpio "controlling core" type and index.
958 * - if there's a chip common core - use that
959 * - else if there's a pci core (rev >= 2) - use that
960 * - else there had better be an extif core (4710 only)
962 if (GOODIDX(sb_findcoreidx(si, SB_CC, 0))) {
963 si->gpioidx = sb_findcoreidx(si, SB_CC, 0);
965 } else if (PCI(si) && (si->sb.buscorerev >= 2)) {
966 si->gpioidx = si->sb.buscoreidx;
968 } else if (sb_findcoreidx(si, SB_EXTIF, 0)) {
969 si->gpioidx = sb_findcoreidx(si, SB_EXTIF, 0);
970 si->gpioid = SB_EXTIF;
972 ASSERT(si->gpioidx != BADIDX);
974 /* return to original core index */
975 sb_setcoreidx(&si->sb, origidx);
978 /* may be called with core in reset */
990 if (BUSTYPE(si->sb.bustype) == SB_BUS)
991 for (idx = 0; idx < SB_MAXCORES; idx++)
993 REG_UNMAP(si->regs[idx]);
994 si->regs[idx] = NULL;
996 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
998 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
999 MFREE(si->osh, si, sizeof(sb_info_t));
1003 /* use pci dev id to determine chip id for chips not having a chipcommon core */
1005 BCMINITFN(sb_pcidev2chip)(uint pcidev)
1007 if ((pcidev >= BCM4710_DEVICE_ID) && (pcidev <= BCM47XX_USB_ID))
1008 return (BCM4710_CHIP_ID);
1009 if ((pcidev >= BCM4402_ENET_ID) && (pcidev <= BCM4402_V90_ID))
1010 return (BCM4402_CHIP_ID);
1011 if (pcidev == BCM4401_ENET_ID)
1012 return (BCM4402_CHIP_ID);
1017 /* convert chip number to number of i/o cores */
1019 BCMINITFN(sb_chip2numcores)(uint chip)
1021 if (chip == BCM4710_CHIP_ID)
1023 if (chip == BCM4402_CHIP_ID)
1025 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
1027 if (chip == BCM4704_CHIP_ID)
1029 if (chip == BCM5365_CHIP_ID)
1032 SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
1037 /* return index of coreid or BADIDX if not found */
1039 sb_findcoreidx(sb_info_t *si, uint coreid, uint coreunit)
1046 for (i = 0; i < si->numcores; i++)
1047 if (si->coreid[i] == coreid) {
1048 if (found == coreunit)
1057 * this function changes logical "focus" to the indiciated core,
1058 * must be called with interrupt off.
1059 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1062 sb_setcoreidx(sb_t *sbh, uint coreidx)
1070 if (coreidx >= si->numcores)
1074 * If the user has provided an interrupt mask enabled function,
1075 * then assert interrupts are disabled before switching the core.
1077 ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
1079 sbaddr = SB_ENUM_BASE + (coreidx * SB_CORE_SIZE);
1081 switch (BUSTYPE(si->sb.bustype)) {
1084 if (!si->regs[coreidx]) {
1085 si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
1086 ASSERT(GOODREGS(si->regs[coreidx]));
1088 si->curmap = si->regs[coreidx];
1092 /* point bar0 window */
1093 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
1097 tmp = (sbaddr >> 12) & 0x0f;
1098 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
1099 tmp = (sbaddr >> 16) & 0xff;
1100 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
1101 tmp = (sbaddr >> 24) & 0xff;
1102 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
1107 if (!si->regs[coreidx]) {
1108 si->regs[coreidx] = (void *)sbaddr;
1109 ASSERT(GOODREGS(si->regs[coreidx]));
1111 si->curmap = si->regs[coreidx];
1113 #endif /* BCMJTAG */
1116 si->curidx = coreidx;
1118 return (si->curmap);
1122 * this function changes logical "focus" to the indiciated core,
1123 * must be called with interrupt off.
1124 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1127 sb_setcore(sb_t *sbh, uint coreid, uint coreunit)
1133 idx = sb_findcoreidx(si, coreid, coreunit);
1137 return (sb_setcoreidx(sbh, idx));
1140 /* return chip number */
1147 return (si->sb.chip);
1150 /* return chip revision number */
1152 sb_chiprev(sb_t *sbh)
1157 return (si->sb.chiprev);
1160 /* return chip common revision number */
1162 sb_chipcrev(sb_t *sbh)
1167 return (si->sb.ccrev);
1170 /* return chip package option */
1172 sb_chippkg(sb_t *sbh)
1177 return (si->sb.chippkg);
1180 /* return PCI core rev. */
1182 sb_pcirev(sb_t *sbh)
1187 return (si->sb.buscorerev);
1191 BCMINITFN(sb_war16165)(sb_t *sbh)
1197 return (PCI(si) && (si->sb.buscorerev <= 10));
1201 BCMINITFN(sb_war30841)(sb_info_t *si)
1203 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
1204 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
1205 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
1208 /* return PCMCIA core rev. */
1210 BCMINITFN(sb_pcmciarev)(sb_t *sbh)
1215 return (si->sb.buscorerev);
1218 /* return board vendor id */
1220 sb_boardvendor(sb_t *sbh)
1225 return (si->sb.boardvendor);
1228 /* return boardtype */
1230 sb_boardtype(sb_t *sbh)
1237 if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
1238 /* boardtype format is a hex string */
1239 si->sb.boardtype = getintvar(NULL, "boardtype");
1241 /* backward compatibility for older boardtype string format */
1242 if ((si->sb.boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
1243 if (!strcmp(var, "bcm94710dev"))
1244 si->sb.boardtype = BCM94710D_BOARD;
1245 else if (!strcmp(var, "bcm94710ap"))
1246 si->sb.boardtype = BCM94710AP_BOARD;
1247 else if (!strcmp(var, "bu4710"))
1248 si->sb.boardtype = BU4710_BOARD;
1249 else if (!strcmp(var, "bcm94702mn"))
1250 si->sb.boardtype = BCM94702MN_BOARD;
1251 else if (!strcmp(var, "bcm94710r1"))
1252 si->sb.boardtype = BCM94710R1_BOARD;
1253 else if (!strcmp(var, "bcm94710r4"))
1254 si->sb.boardtype = BCM94710R4_BOARD;
1255 else if (!strcmp(var, "bcm94702cpci"))
1256 si->sb.boardtype = BCM94702CPCI_BOARD;
1257 else if (!strcmp(var, "bcm95380_rr"))
1258 si->sb.boardtype = BCM95380RR_BOARD;
1262 return (si->sb.boardtype);
1265 /* return bus type of sbh device */
1272 return (si->sb.bustype);
1275 /* return bus core type */
1277 sb_buscoretype(sb_t *sbh)
1283 return (si->sb.buscoretype);
1286 /* return bus core revision */
1288 sb_buscorerev(sb_t *sbh)
1293 return (si->sb.buscorerev);
1296 /* return list of found cores */
1298 sb_corelist(sb_t *sbh, uint coreid[])
1304 bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof(uint)));
1305 return (si->numcores);
1308 /* return current register mapping */
1310 sb_coreregs(sb_t *sbh)
1315 ASSERT(GOODREGS(si->curmap));
1317 return (si->curmap);
1321 /* do buffered registers update */
1323 sb_commit(sb_t *sbh)
1331 origidx = si->curidx;
1332 ASSERT(GOODIDX(origidx));
1334 INTR_OFF(si, intr_val);
1336 /* switch over to chipcommon core if there is one, else use pci */
1337 if (si->sb.ccrev != NOREV) {
1338 chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
1340 /* do the buffer registers update */
1341 W_REG(si->osh, &ccregs->broadcastaddress, SB_COMMIT);
1342 W_REG(si->osh, &ccregs->broadcastdata, 0x0);
1343 } else if (PCI(si)) {
1344 sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0);
1346 /* do the buffer registers update */
1347 W_REG(si->osh, &pciregs->bcastaddr, SB_COMMIT);
1348 W_REG(si->osh, &pciregs->bcastdata, 0x0);
1352 /* restore core index */
1353 sb_setcoreidx(sbh, origidx);
1354 INTR_RESTORE(si, intr_val);
1357 /* reset and re-enable a core
1359 * bits - core specific bits that are set during and after reset sequence
1360 * resetbits - core specific bits that are set only during reset sequence
1363 sb_core_reset(sb_t *sbh, uint32 bits, uint32 resetbits)
1367 volatile uint32 dummy;
1370 ASSERT(GOODREGS(si->curmap));
1371 sb = REGS2SB(si->curmap);
1374 * Must do the disable sequence first to work for arbitrary current core state.
1376 sb_core_disable(sbh, (bits | resetbits));
1379 * Now do the initialization sequence.
1382 /* set reset while enabling the clock and forcing them on throughout the core */
1383 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits | resetbits));
1384 dummy = R_SBREG(si, &sb->sbtmstatelow);
1387 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
1388 W_SBREG(si, &sb->sbtmstatehigh, 0);
1390 if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1391 AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1394 /* clear reset and allow it to propagate throughout the core */
1395 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
1396 dummy = R_SBREG(si, &sb->sbtmstatelow);
1399 /* leave clock enabled */
1400 W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
1401 dummy = R_SBREG(si, &sb->sbtmstatelow);
1406 sb_core_tofixup(sb_t *sbh)
1413 if ((BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) ||
1414 (PCI(si) && (si->sb.buscorerev >= 5)))
1417 ASSERT(GOODREGS(si->curmap));
1418 sb = REGS2SB(si->curmap);
1420 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
1421 SET_SBREG(si, &sb->sbimconfiglow,
1422 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1423 (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
1425 if (sb_coreid(sbh) == SB_PCI) {
1426 SET_SBREG(si, &sb->sbimconfiglow,
1427 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1428 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1430 SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
1438 * Set the initiator timeout for the "master core".
1439 * The master core is defined to be the core in control
1440 * of the chip and so it issues accesses to non-memory
1441 * locations (Because of dma *any* core can access memeory).
1443 * The routine uses the bus to decide who is the master:
1446 * PCI_BUS => pci or pcie
1447 * PCMCIA_BUS => pcmcia
1448 * SDIO_BUS => pcmcia
1450 * This routine exists so callers can disable initiator
1451 * timeouts so accesses to very slow devices like otp
1452 * won't cause an abort. The routine allows arbitrary
1453 * settings of the service and request timeouts, though.
1455 * Returns the timeout state before changing it or -1
1459 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
1462 sb_set_initiator_to(sb_t *sbh, uint32 to)
1467 uint32 tmp, ret = 0xffffffff;
1472 if ((to & ~TO_MASK) != 0)
1475 /* Figure out the master core */
1477 switch (BUSTYPE(si->sb.bustype)) {
1479 idx = si->sb.buscoreidx;
1486 idx = sb_findcoreidx(si, SB_PCMCIA, 0);
1489 if ((idx = sb_findcoreidx(si, SB_MIPS33, 0)) == BADIDX)
1490 idx = sb_findcoreidx(si, SB_MIPS, 0);
1498 INTR_OFF(si, intr_val);
1499 origidx = sb_coreidx(sbh);
1501 sb = REGS2SB(sb_setcoreidx(sbh, idx));
1503 tmp = R_SBREG(si, &sb->sbimconfiglow);
1504 ret = tmp & TO_MASK;
1505 W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1508 sb_setcoreidx(sbh, origidx);
1509 INTR_RESTORE(si, intr_val);
1514 sb_core_disable(sb_t *sbh, uint32 bits)
1517 volatile uint32 dummy;
1523 ASSERT(GOODREGS(si->curmap));
1524 sb = REGS2SB(si->curmap);
1526 /* if core is already in reset, just return */
1527 if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
1530 /* reject value changed between sonics 2.2 and 2.3 */
1531 if (si->sb.sonicsrev == SONICS_2_2)
1532 rej = (1 << SBTML_REJ_SHIFT);
1534 rej = (2 << SBTML_REJ_SHIFT);
1536 /* if clocks are not enabled, put into reset and return */
1537 if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
1540 /* set target reject and spin until busy is clear (preserve core-specific bits) */
1541 OR_SBREG(si, &sb->sbtmstatelow, rej);
1542 dummy = R_SBREG(si, &sb->sbtmstatelow);
1544 SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
1545 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY)
1546 SB_ERROR(("%s: target state still busy\n", __FUNCTION__));
1548 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
1549 OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
1550 dummy = R_SBREG(si, &sb->sbimstate);
1552 SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
1555 /* set reset and reject while enabling the clocks */
1556 W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
1557 dummy = R_SBREG(si, &sb->sbtmstatelow);
1560 /* don't forget to clear the initiator reject bit */
1561 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
1562 AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
1565 /* leave reset and reject asserted */
1566 W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
1570 /* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
1572 sb_watchdog(sb_t *sbh, uint ticks)
1574 sb_info_t *si = SB_INFO(sbh);
1576 /* make sure we come up in fast clock mode */
1577 sb_clkctl_clk(sbh, CLK_FAST);
1580 switch (si->gpioid) {
1583 if (sb_chip(sbh) == BCM4785_CHIP_ID && ticks <= 1)
1584 MTC0(C0_BROADCOM, 4, (1 << 22));
1585 #endif /* __mips__ */
1586 sb_corereg(si, 0, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
1588 if (sb_chip(sbh) == BCM4785_CHIP_ID && ticks <= 1) {
1589 __asm__ __volatile__ (
1597 #endif /* __mips__ */
1600 sb_corereg(si, si->gpioidx, OFFSETOF(extifregs_t, watchdog), ~0, ticks);
1605 /* initialize the pcmcia core */
1607 sb_pcmcia_init(sb_t *sbh)
1614 /* enable d11 mac interrupts */
1615 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
1616 cor |= COR_IRQEN | COR_FUNEN;
1617 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
1623 * Configure the pci core for pci client (NIC) action
1624 * coremask is the bitvec of cores by index to be enabled.
1627 BCMINITFN(sb_pci_setup)(sb_t *sbh, uint coremask)
1631 sbpciregs_t *pciregs;
1639 /* if not pci bus, we're done */
1640 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
1643 ASSERT(PCI(si) || PCIE(si));
1644 ASSERT(si->sb.buscoreidx != BADIDX);
1646 /* get current core index */
1649 /* we interrupt on this backplane flag number */
1650 ASSERT(GOODREGS(si->curmap));
1651 sb = REGS2SB(si->curmap);
1652 sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
1654 /* switch over to pci core */
1655 pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
1656 sb = REGS2SB(pciregs);
1659 * Enable sb->pci interrupts. Assume
1660 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1662 if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
1663 /* pci config write to set this core bit in PCIIntMask */
1664 w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
1665 w |= (coremask << PCI_SBIM_SHIFT);
1666 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
1668 /* set sbintvec bit for our flag number */
1669 OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
1673 OR_REG(si->osh, &pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
1674 if (si->sb.buscorerev >= 11)
1675 OR_REG(si->osh, &pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
1676 if (si->sb.buscorerev < 5) {
1677 SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1678 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1684 /* PCIE workarounds */
1686 if ((si->sb.buscorerev == 0) || (si->sb.buscorerev == 1)) {
1687 reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS,
1688 PCIE_TLP_WORKAROUNDSREG);
1690 sb_pcie_writereg((void *)sbh, (void *)PCIE_PCIEREGS,
1691 PCIE_TLP_WORKAROUNDSREG, reg_val);
1694 if (si->sb.buscorerev == 1) {
1695 reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS,
1698 sb_pcie_writereg(sbh, (void *)PCIE_PCIEREGS, PCIE_DLLP_LCREG, reg_val);
1701 if (si->sb.buscorerev == 0)
1706 /* switch back to previous core */
1707 sb_setcoreidx(sbh, idx);
1711 sb_base(uint32 admatch)
1716 type = admatch & SBAM_TYPE_MASK;
1722 base = admatch & SBAM_BASE0_MASK;
1723 } else if (type == 1) {
1724 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1725 base = admatch & SBAM_BASE1_MASK;
1726 } else if (type == 2) {
1727 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1728 base = admatch & SBAM_BASE2_MASK;
1735 sb_size(uint32 admatch)
1740 type = admatch & SBAM_TYPE_MASK;
1746 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1747 } else if (type == 1) {
1748 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1749 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1750 } else if (type == 2) {
1751 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1752 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1758 /* return the core-type instantiation # of the current core */
1760 sb_coreunit(sb_t *sbh)
1773 ASSERT(GOODREGS(si->curmap));
1774 coreid = sb_coreid(sbh);
1776 /* count the cores of our type */
1777 for (i = 0; i < idx; i++)
1778 if (si->coreid[i] == coreid)
1784 static INLINE uint32
1788 case CC_F6_2: return 2;
1789 case CC_F6_3: return 3;
1790 case CC_F6_4: return 4;
1791 case CC_F6_5: return 5;
1792 case CC_F6_6: return 6;
1793 case CC_F6_7: return 7;
1798 /* calculate the speed the SB would run at given a set of clockcontrol values */
1800 sb_clock_rate(uint32 pll_type, uint32 n, uint32 m)
1802 uint32 n1, n2, clock, m1, m2, m3, mc;
1804 n1 = n & CN_N1_MASK;
1805 n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
1807 if (pll_type == PLL_TYPE6) {
1808 if (m & CC_T6_MMASK)
1812 } else if ((pll_type == PLL_TYPE1) ||
1813 (pll_type == PLL_TYPE3) ||
1814 (pll_type == PLL_TYPE4) ||
1815 (pll_type == PLL_TYPE7)) {
1818 } else if (pll_type == PLL_TYPE2) {
1821 ASSERT((n1 >= 2) && (n1 <= 7));
1822 ASSERT((n2 >= 5) && (n2 <= 23));
1823 } else if (pll_type == PLL_TYPE5) {
1827 /* PLL types 3 and 7 use BASE2 (25Mhz) */
1828 if ((pll_type == PLL_TYPE3) ||
1829 (pll_type == PLL_TYPE7)) {
1830 clock = CC_CLOCK_BASE2 * n1 * n2;
1832 clock = CC_CLOCK_BASE1 * n1 * n2;
1837 m1 = m & CC_M1_MASK;
1838 m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
1839 m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
1840 mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
1842 if ((pll_type == PLL_TYPE1) ||
1843 (pll_type == PLL_TYPE3) ||
1844 (pll_type == PLL_TYPE4) ||
1845 (pll_type == PLL_TYPE7)) {
1847 if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
1854 case CC_MC_BYPASS: return (clock);
1855 case CC_MC_M1: return (clock / m1);
1856 case CC_MC_M1M2: return (clock / (m1 * m2));
1857 case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
1858 case CC_MC_M1M3: return (clock / (m1 * m3));
1859 default: return (0);
1862 ASSERT(pll_type == PLL_TYPE2);
1867 ASSERT((m1 >= 2) && (m1 <= 7));
1868 ASSERT((m2 >= 3) && (m2 <= 10));
1869 ASSERT((m3 >= 2) && (m3 <= 7));
1871 if ((mc & CC_T2MC_M1BYP) == 0)
1873 if ((mc & CC_T2MC_M2BYP) == 0)
1875 if ((mc & CC_T2MC_M3BYP) == 0)
1882 /* returns the current speed the SB is running at */
1891 uint32 pll_type, rate;
1896 pll_type = PLL_TYPE1;
1898 INTR_OFF(si, intr_val);
1900 /* switch to extif or chipc core */
1901 if ((eir = (extifregs_t *) sb_setcore(sbh, SB_EXTIF, 0))) {
1902 n = R_REG(si->osh, &eir->clockcontrol_n);
1903 m = R_REG(si->osh, &eir->clockcontrol_sb);
1904 } else if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0))) {
1905 pll_type = R_REG(si->osh, &cc->capabilities) & CAP_PLL_MASK;
1906 if (pll_type == PLL_NONE) {
1907 INTR_RESTORE(si, intr_val);
1910 n = R_REG(si->osh, &cc->clockcontrol_n);
1911 if (pll_type == PLL_TYPE6)
1912 m = R_REG(si->osh, &cc->clockcontrol_m3);
1913 else if ((pll_type == PLL_TYPE3) && !(BCMINIT(sb_chip)(sbh) == 0x5365))
1914 m = R_REG(si->osh, &cc->clockcontrol_m2);
1916 m = R_REG(si->osh, &cc->clockcontrol_sb);
1918 INTR_RESTORE(si, intr_val);
1922 /* calculate rate */
1923 if (BCMINIT(sb_chip)(sbh) == 0x5365)
1926 rate = sb_clock_rate(pll_type, n, m);
1928 if (pll_type == PLL_TYPE3)
1932 /* switch back to previous core */
1933 sb_setcoreidx(sbh, idx);
1935 INTR_RESTORE(si, intr_val);
1940 /* change logical "focus" to the gpio core for optimized access */
1942 sb_gpiosetcore(sb_t *sbh)
1948 return (sb_setcoreidx(sbh, si->gpioidx));
1951 /* mask&set gpiocontrol bits */
1953 sb_gpiocontrol(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
1961 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
1963 /* gpios could be shared on router platforms */
1964 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
1965 mask = priority ? (sb_gpioreservation & mask) :
1966 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
1970 switch (si->gpioid) {
1972 regoff = OFFSETOF(chipcregs_t, gpiocontrol);
1976 regoff = OFFSETOF(sbpciregs_t, gpiocontrol);
1983 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
1986 /* mask&set gpio output enable bits */
1988 sb_gpioouten(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
1996 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
1998 /* gpios could be shared on router platforms */
1999 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2000 mask = priority ? (sb_gpioreservation & mask) :
2001 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2005 switch (si->gpioid) {
2007 regoff = OFFSETOF(chipcregs_t, gpioouten);
2011 regoff = OFFSETOF(sbpciregs_t, gpioouten);
2015 regoff = OFFSETOF(extifregs_t, gpio[0].outen);
2019 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2022 /* mask&set gpio output bits */
2024 sb_gpioout(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2032 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2034 /* gpios could be shared on router platforms */
2035 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2036 mask = priority ? (sb_gpioreservation & mask) :
2037 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2041 switch (si->gpioid) {
2043 regoff = OFFSETOF(chipcregs_t, gpioout);
2047 regoff = OFFSETOF(sbpciregs_t, gpioout);
2051 regoff = OFFSETOF(extifregs_t, gpio[0].out);
2055 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2058 /* reserve one gpio */
2060 sb_gpioreserve(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2066 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2068 /* only cores on SB_BUS share GPIO's and only applcation users need to
2069 * reserve/release GPIO
2071 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2072 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2075 /* make sure only one bit is set */
2076 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2077 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2081 /* already reserved */
2082 if (sb_gpioreservation & gpio_bitmask)
2084 /* set reservation */
2085 sb_gpioreservation |= gpio_bitmask;
2087 return sb_gpioreservation;
2090 /* release one gpio */
2092 * releasing the gpio doesn't change the current value on the GPIO last write value
2093 * persists till some one overwrites it
2097 sb_gpiorelease(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2103 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2105 /* only cores on SB_BUS share GPIO's and only applcation users need to
2106 * reserve/release GPIO
2108 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2109 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2112 /* make sure only one bit is set */
2113 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2114 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2118 /* already released */
2119 if (!(sb_gpioreservation & gpio_bitmask))
2122 /* clear reservation */
2123 sb_gpioreservation &= ~gpio_bitmask;
2125 return sb_gpioreservation;
2128 /* return the current gpioin register value */
2130 sb_gpioin(sb_t *sbh)
2138 switch (si->gpioid) {
2140 regoff = OFFSETOF(chipcregs_t, gpioin);
2144 regoff = OFFSETOF(sbpciregs_t, gpioin);
2148 regoff = OFFSETOF(extifregs_t, gpioin);
2152 return (sb_corereg(si, si->gpioidx, regoff, 0, 0));
2155 /* mask&set gpio interrupt polarity bits */
2157 sb_gpiointpolarity(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2165 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2167 /* gpios could be shared on router platforms */
2168 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2169 mask = priority ? (sb_gpioreservation & mask) :
2170 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2174 switch (si->gpioid) {
2176 regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
2180 /* pci gpio implementation does not support interrupt polarity */
2185 regoff = OFFSETOF(extifregs_t, gpiointpolarity);
2189 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2192 /* mask&set gpio interrupt mask bits */
2194 sb_gpiointmask(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2202 priority = GPIO_DRV_PRIORITY; /* compatibility hack */
2204 /* gpios could be shared on router platforms */
2205 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2206 mask = priority ? (sb_gpioreservation & mask) :
2207 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2211 switch (si->gpioid) {
2213 regoff = OFFSETOF(chipcregs_t, gpiointmask);
2217 /* pci gpio implementation does not support interrupt mask */
2222 regoff = OFFSETOF(extifregs_t, gpiointmask);
2226 return (sb_corereg(si, si->gpioidx, regoff, mask, val));
2229 /* assign the gpio to an led */
2231 sb_gpioled(sb_t *sbh, uint32 mask, uint32 val)
2236 if (si->sb.ccrev < 16)
2239 /* gpio led powersave reg */
2240 return (sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
2243 /* mask & set gpio timer val */
2245 sb_gpiotimerval(sb_t *sbh, uint32 mask, uint32 gpiotimerval)
2250 if (si->sb.ccrev < 16)
2253 return (sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
2257 /* return the slow clock source - LPO, XTAL, or PCI */
2259 sb_slowclk_src(sb_info_t *si)
2264 ASSERT(sb_coreid(&si->sb) == SB_CC);
2266 if (si->sb.ccrev < 6) {
2267 if ((BUSTYPE(si->sb.bustype) == PCI_BUS) &&
2268 (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32)) &
2270 return (SCC_SS_PCI);
2272 return (SCC_SS_XTAL);
2273 } else if (si->sb.ccrev < 10) {
2274 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2275 return (R_REG(si->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
2276 } else /* Insta-clock */
2277 return (SCC_SS_XTAL);
2280 /* return the ILP (slowclock) min or max frequency */
2282 sb_slowclk_freq(sb_info_t *si, bool max)
2289 ASSERT(sb_coreid(&si->sb) == SB_CC);
2291 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2293 /* shouldn't be here unless we've established the chip has dynamic clk control */
2294 ASSERT(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL);
2296 slowclk = sb_slowclk_src(si);
2297 if (si->sb.ccrev < 6) {
2298 if (slowclk == SCC_SS_PCI)
2299 return (max? (PCIMAXFREQ/64) : (PCIMINFREQ/64));
2301 return (max? (XTALMAXFREQ/32) : (XTALMINFREQ/32));
2302 } else if (si->sb.ccrev < 10) {
2303 div = 4 * (((R_REG(si->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
2304 if (slowclk == SCC_SS_LPO)
2305 return (max? LPOMAXFREQ : LPOMINFREQ);
2306 else if (slowclk == SCC_SS_XTAL)
2307 return (max? (XTALMAXFREQ/div) : (XTALMINFREQ/div));
2308 else if (slowclk == SCC_SS_PCI)
2309 return (max? (PCIMAXFREQ/div) : (PCIMINFREQ/div));
2313 /* Chipc rev 10 is InstaClock */
2314 div = R_REG(si->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
2315 div = 4 * (div + 1);
2316 return (max ? XTALMAXFREQ : (XTALMINFREQ/div));
2322 BCMINITFN(sb_clkctl_setdelay)(sb_info_t *si, void *chipcregs)
2325 uint slowmaxfreq, pll_delay, slowclk;
2326 uint pll_on_delay, fref_sel_delay;
2328 pll_delay = PLL_DELAY;
2330 /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
2331 * since the xtal will also be powered down by dynamic clk control logic.
2334 slowclk = sb_slowclk_src(si);
2335 if (slowclk != SCC_SS_XTAL)
2336 pll_delay += XTAL_ON_DELAY;
2338 /* Starting with 4318 it is ILP that is used for the delays */
2339 slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
2341 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
2342 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
2344 cc = (chipcregs_t *)chipcregs;
2345 W_REG(si->osh, &cc->pll_on_delay, pll_on_delay);
2346 W_REG(si->osh, &cc->fref_sel_delay, fref_sel_delay);
2349 /* initialize power control delay registers */
2351 BCMINITFN(sb_clkctl_init)(sb_t *sbh)
2359 origidx = si->curidx;
2361 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
2364 if ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev < 2))
2365 W_REG(si->osh, &cc->chipcontrol,
2366 (si->sb.chiprev == 0) ? CHIPCTRL_4321A0_DEFAULT : CHIPCTRL_4321A1_DEFAULT);
2368 if (!(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL))
2371 /* set all Instaclk chip ILP to 1 MHz */
2372 else if (si->sb.ccrev >= 10)
2373 SET_REG(si->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
2374 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
2376 sb_clkctl_setdelay(si, (void *)cc);
2379 sb_setcoreidx(sbh, origidx);
2382 /* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
2384 sb_clkctl_fast_pwrup_delay(sb_t *sbh)
2395 origidx = si->curidx;
2397 INTR_OFF(si, intr_val);
2399 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
2402 if (!(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL))
2405 slowminfreq = sb_slowclk_freq(si, FALSE);
2406 fpdelay = (((R_REG(si->osh, &cc->pll_on_delay) + 2) * 1000000) +
2407 (slowminfreq - 1)) / slowminfreq;
2410 sb_setcoreidx(sbh, origidx);
2411 INTR_RESTORE(si, intr_val);
2415 /* turn primary xtal and/or pll off/on */
2417 sb_clkctl_xtal(sb_t *sbh, uint what, bool on)
2420 uint32 in, out, outen;
2424 switch (BUSTYPE(si->sb.bustype)) {
2433 /* pcie core doesn't have any mapping to control the xtal pu */
2437 in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof(uint32));
2438 out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32));
2439 outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32));
2442 * Avoid glitching the clock if GPRS is already using it.
2443 * We can't actually read the state of the PLLPD so we infer it
2444 * by the value of XTAL_PU which *is* readable via gpioin.
2446 if (on && (in & PCI_CFG_GPIO_XTAL))
2450 outen |= PCI_CFG_GPIO_XTAL;
2452 outen |= PCI_CFG_GPIO_PLL;
2455 /* turn primary xtal on */
2457 out |= PCI_CFG_GPIO_XTAL;
2459 out |= PCI_CFG_GPIO_PLL;
2460 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
2461 sizeof(uint32), out);
2462 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN,
2463 sizeof(uint32), outen);
2464 OSL_DELAY(XTAL_ON_DELAY);
2469 out &= ~PCI_CFG_GPIO_PLL;
2470 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
2471 sizeof(uint32), out);
2476 out &= ~PCI_CFG_GPIO_XTAL;
2478 out |= PCI_CFG_GPIO_PLL;
2479 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32), out);
2480 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32),
2491 /* set dynamic clk control mode (forceslow, forcefast, dynamic) */
2492 /* returns true if we are forcing fast clock */
2494 sb_clkctl_clk(sb_t *sbh, uint mode)
2504 /* chipcommon cores prior to rev6 don't support dynamic clock control */
2505 if (si->sb.ccrev < 6)
2509 /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
2510 ASSERT(si->sb.ccrev != 10);
2512 INTR_OFF(si, intr_val);
2514 origidx = si->curidx;
2516 if (sb_setcore(sbh, SB_MIPS33, 0) && (sb_corerev(&si->sb) <= 7) &&
2517 (BUSTYPE(si->sb.bustype) == SB_BUS) && (si->sb.ccrev >= 10))
2520 /* PR32414WAR "Force HT clock on" all the time, no dynamic clk ctl */
2521 if ((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev <= 1))
2524 cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
2527 if (!(R_REG(si->osh, &cc->capabilities) & CAP_PWR_CTL))
2531 case CLK_FAST: /* force fast (pll) clock */
2532 if (si->sb.ccrev < 10) {
2533 /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
2534 sb_clkctl_xtal(&si->sb, XTAL, ON);
2536 SET_REG(si->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
2538 OR_REG(si->osh, &cc->system_clk_ctl, SYCC_HR);
2539 /* wait for the PLL */
2540 OSL_DELAY(PLL_DELAY);
2543 case CLK_DYNAMIC: /* enable dynamic clock control */
2545 if (si->sb.ccrev < 10) {
2546 scc = R_REG(si->osh, &cc->slow_clk_ctl);
2547 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
2548 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
2550 W_REG(si->osh, &cc->slow_clk_ctl, scc);
2552 /* for dynamic control, we have to release our xtal_pu "force on" */
2554 sb_clkctl_xtal(&si->sb, XTAL, OFF);
2557 AND_REG(si->osh, &cc->system_clk_ctl, ~SYCC_HR);
2566 sb_setcoreidx(sbh, origidx);
2567 INTR_RESTORE(si, intr_val);
2568 return (mode == CLK_FAST);
2571 /* register driver interrupt disabling and restoring callback functions */
2573 sb_register_intr_callback(sb_t *sbh, void *intrsoff_fn, void *intrsrestore_fn,
2574 void *intrsenabled_fn, void *intr_arg)
2579 si->intr_arg = intr_arg;
2580 si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
2581 si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
2582 si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
2583 /* save current core id. when this function called, the current core
2584 * must be the core which provides driver functions(il, et, wl, etc.)
2586 si->dev_coreid = si->coreid[si->curidx];
2591 sb_corepciid(sb_t *sbh, uint func, uint16 *pcivendor, uint16 *pcidevice,
2592 uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif,
2595 uint16 vendor = 0xffff, device = 0xffff;
2599 char varname[SB_DEVPATH_BUFSZ + 8];
2600 uint8 class, subclass, progif;
2601 char devpath[SB_DEVPATH_BUFSZ];
2604 core = sb_coreid(sbh);
2605 unit = sb_coreunit(sbh);
2607 chip = sb_chip(sbh);
2608 chippkg = sb_chippkg(sbh);
2611 header = PCI_HEADER_NORMAL;
2613 /* Verify whether the function exists for the core */
2614 nfunc = (core == SB_USB20H) ? 2 : 1;
2618 /* Known vendor translations */
2619 switch (sb_corevendor(sbh)) {
2621 vendor = VENDOR_BROADCOM;
2627 /* Determine class based on known core codes */
2630 class = PCI_CLASS_NET;
2631 subclass = PCI_NET_ETHER;
2632 device = BCM47XX_ILINE_ID;
2635 class = PCI_CLASS_NET;
2636 subclass = PCI_NET_ETHER;
2637 device = BCM47XX_ENET_ID;
2640 class = PCI_CLASS_NET;
2641 subclass = PCI_NET_ETHER;
2642 device = BCM47XX_GIGETH_ID;
2646 class = PCI_CLASS_MEMORY;
2647 subclass = PCI_MEMORY_RAM;
2648 device = (uint16)core;
2652 class = PCI_CLASS_BRIDGE;
2653 subclass = PCI_BRIDGE_PCI;
2654 device = (uint16)core;
2655 header = PCI_HEADER_BRIDGE;
2659 class = PCI_CLASS_CPU;
2660 subclass = PCI_CPU_MIPS;
2661 device = (uint16)core;
2664 class = PCI_CLASS_COMM;
2665 subclass = PCI_COMM_MODEM;
2666 device = BCM47XX_V90_ID;
2669 class = PCI_CLASS_SERIAL;
2670 subclass = PCI_SERIAL_USB;
2671 progif = 0x10; /* OHCI */
2672 device = BCM47XX_USB_ID;
2675 class = PCI_CLASS_SERIAL;
2676 subclass = PCI_SERIAL_USB;
2677 progif = 0x10; /* OHCI */
2678 device = BCM47XX_USBH_ID;
2681 class = PCI_CLASS_SERIAL;
2682 subclass = PCI_SERIAL_USB;
2683 progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI */
2684 device = BCM47XX_USB20H_ID;
2685 header = 0x80; /* multifunction */
2688 class = PCI_CLASS_SERIAL;
2689 subclass = PCI_SERIAL_USB;
2690 device = BCM47XX_USBD_ID;
2693 class = PCI_CLASS_SERIAL;
2694 subclass = PCI_SERIAL_USB;
2695 device = BCM47XX_USB20D_ID;
2698 class = PCI_CLASS_CRYPT;
2699 subclass = PCI_CRYPT_NETWORK;
2700 device = BCM47XX_IPSEC_ID;
2703 class = PCI_CLASS_NET;
2704 subclass = PCI_NET_OTHER;
2705 device = BCM47XX_ROBO_ID;
2709 class = PCI_CLASS_MEMORY;
2710 subclass = PCI_MEMORY_FLASH;
2711 device = (uint16)core;
2714 class = PCI_CLASS_NET;
2715 subclass = PCI_NET_OTHER;
2716 /* Let nvram variable override core ID */
2717 sb_devpath(sbh, devpath, sizeof(devpath));
2718 sprintf(varname, "%sdevid", devpath);
2719 if ((device = (uint16)getintvar(NULL, varname)))
2722 * no longer support wl%did, but keep the code
2723 * here for backward compatibility.
2725 sprintf(varname, "wl%did", unit);
2726 if ((device = (uint16)getintvar(NULL, varname)))
2728 /* Chip specific conversion */
2729 if (chip == BCM4712_CHIP_ID) {
2730 if (chippkg == BCM4712SMALL_PKG_ID)
2731 device = BCM4306_D11G_ID;
2733 device = BCM4306_D11DUAL_ID;
2740 class = PCI_CLASS_XOR;
2741 subclass = PCI_XOR_QDMA;
2742 device = BCM47XX_SATAXOR_ID;
2745 class = PCI_CLASS_DASDI;
2746 subclass = PCI_DASDI_IDE;
2747 device = BCM47XX_ATA100_ID;
2751 class = subclass = progif = 0xff;
2752 device = (uint16)core;
2756 *pcivendor = vendor;
2757 *pcidevice = device;
2759 *pcisubclass = subclass;
2760 *pciprogif = progif;
2761 *pciheader = header;
2768 /* use the mdio interface to write to mdio slaves */
2770 sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint regaddr, uint val)
2774 sbpcieregs_t *pcieregs;
2776 pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
2779 /* enable mdio access to SERDES */
2780 W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
2782 mdiodata = MDIODATA_START | MDIODATA_WRITE |
2783 (physmedia << MDIODATA_DEVADDR_SHF) |
2784 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
2786 W_REG(si->osh, (&pcieregs->mdiodata), mdiodata);
2790 /* retry till the transaction is complete */
2792 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
2793 /* Disable mdio access to SERDES */
2794 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
2801 SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
2802 /* Disable mdio access to SERDES */
2803 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
2809 /* indirect way to read pcie config regs */
2811 sb_pcie_readreg(void *sb, void* arg1, uint offset)
2815 uint retval = 0xFFFFFFFF;
2816 sbpcieregs_t *pcieregs;
2823 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
2826 addrtype = (uint)((uintptr)arg1);
2828 case PCIE_CONFIGREGS:
2829 W_REG(si->osh, (&pcieregs->configaddr), offset);
2830 retval = R_REG(si->osh, &(pcieregs->configdata));
2833 W_REG(si->osh, &(pcieregs->pcieaddr), offset);
2834 retval = R_REG(si->osh, &(pcieregs->pciedata));
2843 /* indirect way to write pcie config/mdio/pciecore regs */
2845 sb_pcie_writereg(sb_t *sbh, void *arg1, uint offset, uint val)
2848 sbpcieregs_t *pcieregs;
2854 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
2857 addrtype = (uint)((uintptr)arg1);
2860 case PCIE_CONFIGREGS:
2861 W_REG(si->osh, (&pcieregs->configaddr), offset);
2862 W_REG(si->osh, (&pcieregs->configdata), val);
2865 W_REG(si->osh, (&pcieregs->pcieaddr), offset);
2866 W_REG(si->osh, (&pcieregs->pciedata), val);
2875 /* Build device path. Support SB, PCI, and JTAG for now. */
2877 sb_devpath(sb_t *sbh, char *path, int size)
2880 ASSERT(size >= SB_DEVPATH_BUFSZ);
2882 switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
2885 sprintf(path, "sb/%u/", sb_coreidx(sbh));
2888 ASSERT((SB_INFO(sbh))->osh);
2889 sprintf(path, "pci/%u/%u/", OSL_PCI_BUS((SB_INFO(sbh))->osh),
2890 OSL_PCI_SLOT((SB_INFO(sbh))->osh));
2893 SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
2894 SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
2895 sprintf(path, "pc/%u/%u/", 1, 1);
2898 SB_ERROR(("sb_devpath: device 0 assumed\n"));
2899 sprintf(path, "sd/%u/", sb_coreidx(sbh));
2910 * Fixup SROMless PCI device's configuration.
2911 * The current core may be changed upon return.
2914 sb_pci_fixcfg(sb_info_t *si)
2916 uint origidx, pciidx;
2917 sbpciregs_t *pciregs;
2918 sbpcieregs_t *pcieregs;
2919 uint16 val16, *reg16;
2920 char name[SB_DEVPATH_BUFSZ+16], *value;
2921 char devpath[SB_DEVPATH_BUFSZ];
2923 ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
2925 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
2926 /* save the current index */
2927 origidx = sb_coreidx(&si->sb);
2929 /* check 'pi' is correct and fix it if not */
2930 if (si->sb.buscoretype == SB_PCIE) {
2931 pcieregs = (sbpcieregs_t *)sb_setcore(&si->sb, SB_PCIE, 0);
2933 reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
2934 } else if (si->sb.buscoretype == SB_PCI) {
2935 pciregs = (sbpciregs_t *)sb_setcore(&si->sb, SB_PCI, 0);
2937 reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
2942 pciidx = sb_coreidx(&si->sb);
2943 val16 = R_REG(si->osh, reg16);
2944 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
2945 val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
2946 W_REG(si->osh, reg16, val16);
2949 /* restore the original index */
2950 sb_setcoreidx(&si->sb, origidx);
2953 * Fixup bar0window in PCI config space to make the core indicated
2954 * by the nvram variable the current core.
2955 * !Do it last, it may change the current core!
2957 if (sb_devpath(&si->sb, devpath, sizeof(devpath)))
2959 sprintf(name, "%sb0w", devpath);
2960 if ((value = getvar(NULL, name))) {
2961 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32),
2962 bcm_strtoul(value, NULL, 16));
2963 /* update curidx since the current core is changed */
2964 si->curidx = _sb_coreidx(si);
2965 if (si->curidx == BADIDX) {
2966 SB_ERROR(("sb_pci_fixcfg: bad core index\n"));
2975 sb_chipc_capability(sb_t *sbh)
2981 /* Make sure that there is ChipCommon core present */
2982 if (si->coreid[SB_CC_IDX] == SB_CC)
2983 return (sb_corereg(si, SB_CC_IDX, OFFSETOF(chipcregs_t, capabilities),
2988 /* Return ADDR64 capability of the backplane */
2990 sb_backplane64(sb_t *sbh)
2992 return ((sb_chipc_capability(sbh) & CAP_BKPLN64) != 0);
2996 sb_btcgpiowar(sb_t *sbh)
3004 /* Make sure that there is ChipCommon core present &&
3005 * UART_TX is strapped to 1
3007 if (!(sb_chipc_capability(sbh) & CAP_UARTGPIO))
3010 /* sb_corereg cannot be used as we have to guarantee 8-bit read/writes */
3011 INTR_OFF(si, intr_val);
3013 origidx = sb_coreidx(sbh);
3015 cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
3019 W_REG(si->osh, &cc->uart0mcr, R_REG(si->osh, &cc->uart0mcr) | 0x04);
3022 /* restore the original index */
3023 sb_setcoreidx(sbh, origidx);
3025 INTR_RESTORE(si, intr_val);
3028 /* check if the device is removed */
3030 sb_deviceremoved(sb_t *sbh)
3037 switch (BUSTYPE(si->sb.bustype)) {
3040 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_VID, sizeof(uint32));
3041 if ((w & 0xFFFF) != VENDOR_BROADCOM)
3051 /* Return the RAM size of the SOCRAM core */
3053 sb_socram_size(sb_t *sbh)
3059 sbsocramregs_t *regs;
3068 /* Block ints and save current core */
3069 INTR_OFF(si, intr_val);
3070 origidx = sb_coreidx(sbh);
3072 /* Switch to SOCRAM core */
3073 if (!(regs = sb_setcore(sbh, SB_SOCRAM, 0)))
3076 /* Get info for determining size */
3077 if (!(wasup = sb_iscoreup(sbh)))
3078 sb_core_reset(sbh, 0, 0);
3079 corerev = sb_corerev(sbh);
3080 coreinfo = R_REG(si->osh, ®s->coreinfo);
3082 /* Calculate size from coreinfo based on rev */
3085 memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
3087 default: /* rev >= 1 */
3088 memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
3089 memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3093 /* Return to previous state and core */
3095 sb_core_disable(sbh, 0);
3096 sb_setcoreidx(sbh, origidx);
3099 INTR_RESTORE(si, intr_val);