DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/altq/if_altq.h --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/altq/if_altq.h Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/altq/if_altq.h Thu Nov 8 15:41:36 2012 @@ -125,27 +125,42 @@ struct oldtb_regulator { #define ALTQ_IS_READY(ifq) ((ifq)->altq_flags & ALTQF_READY) #define ALTQ_IS_ENABLED(ifq) ((ifq)->altq_flags & ALTQF_ENABLED) #define ALTQ_NEEDS_CLASSIFY(ifq) ((ifq)->altq_flags & ALTQF_CLASSIFY) #define ALTQ_IS_CNDTNING(ifq) ((ifq)->altq_flags & ALTQF_CNDTNING) #define ALTQ_SET_CNDTNING(ifq) ((ifq)->altq_flags |= ALTQF_CNDTNING) #define ALTQ_CLEAR_CNDTNING(ifq) ((ifq)->altq_flags &= ~ALTQF_CNDTNING) #define ALTQ_IS_ATTACHED(ifq) ((ifq)->altq_disc != NULL) #define ALTQ_ENQUEUE(ifq, m, pa, err) \ - (err) = (*(ifq)->altq_enqueue)((ifq),(m),(pa)) +do { \ + mtx_enter(&net_mtx); \ + (err) = (*(ifq)->altq_enqueue)((ifq),(m),(pa)); \ + mtx_leave(&net_mtx); \ +} while (0) + #define ALTQ_DEQUEUE(ifq, m) \ - (m) = (*(ifq)->altq_dequeue)((ifq), ALTDQ_REMOVE) +do { \ + mtx_enter(&net_mtx); \ + (m) = (*(ifq)->altq_dequeue)((ifq), ALTDQ_REMOVE); \ + mtx_leave(&net_mtx); \ +} while (0) + #define ALTQ_POLL(ifq, m) \ (m) = (*(ifq)->altq_dequeue)((ifq), ALTDQ_POLL) + #define ALTQ_PURGE(ifq) \ - (void)(*(ifq)->altq_request)((ifq), ALTRQ_PURGE, (void *)0) +do { \ + mtx_enter(&net_mtx); \ + (void)(*(ifq)->altq_request)((ifq), ALTRQ_PURGE, (void *)0); \ + mtx_leave(&net_mtx); \ +} while (0) #define ALTQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0) #define OLDTBR_IS_ENABLED(ifq) ((ifq)->altq_tbr != NULL) #define TBR_IS_ENABLED(ifq) OLDTBR_IS_ENABLED(ifq) extern int altq_attach(struct ifaltq *, int, void *, int (*)(struct ifaltq *, struct mbuf *, struct altq_pktattr *), struct mbuf *(*)(struct ifaltq *, int), int (*)(struct ifaltq *, int, void *), void *, DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/arch/i386/conf/GENUOS --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/arch/i386/conf/GENUOS Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/arch/i386/conf/GENUOS Thu Nov 8 11:28:00 2012 @@ -3,12 +3,13 @@ machine i386 # ramdisk support: option RAMDISK_HOOKS option MINIROOTSIZE=36864 option NKPTP=8 pseudo-device rd 1 config bsd root on rd0a swap on wd0b option DEVICE_POLLING # network polling support option HZ=1000 # needed for polling #XXX bad for qemu images +option MP_LOCKDEBUG include "arch/i386/conf/GENUOS_COMMON" DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/arch/i386/i386/lock_machdep.c DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/arch/i386/include/cpu.h --- /mount/blink/aegis/project/genuos/branch.10/branch.50/baseline/os/src/sys/arch/i386/include/cpu.h Thu Aug 23 14:06:46 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/arch/i386/include/cpu.h Mon Oct 29 16:51:47 2012 @@ -96,20 +96,21 @@ struct cpu_info { struct proc *ci_fpcurproc; /* current owner of the FPU */ struct proc *ci_fpsaveproc; int ci_fpsaving; /* save in progress */ struct pcb *ci_curpcb; /* VA of current HW PCB */ struct pcb *ci_idle_pcb; /* VA of current PCB */ int ci_idle_tss_sel; /* TSS selector of idle PCB */ struct pmap *ci_curpmap; struct intrsource *ci_isources[MAX_INTR_SOURCES]; + int ci_intrs[NIPL]; u_int32_t ci_ipending; int ci_ilevel; int ci_idepth; u_int32_t ci_imask[NIPL]; u_int32_t ci_iunmask[NIPL]; #ifdef DIAGNOSTIC int ci_mutex_level; #endif paddr_t ci_idle_pcb_paddr; /* PA of idle PCB */ DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/arch/i386/pci/pci_machdep.c --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/arch/i386/pci/pci_machdep.c Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/arch/i386/pci/pci_machdep.c Tue Oct 30 17:30:14 2012 @@ -107,20 +107,23 @@ extern bios_pciinfo_t *bios_pciinfo; #include #if NIOAPIC > 0 #include #endif #include "pcibios.h" #if NPCIBIOS > 0 #include #endif + +struct cpu_info *pci_choosecpu_intr(struct intrhand *); + int pci_mode = -1; /* * Memory Mapped Configuration space access. * * Since mapping the whole configuration space will cost us up to * 256MB of kernel virtual memory, we use seperate mappings per bus. * The mappings are created on-demand, such that we only use kernel * virtual memory for busses that are actually present. */ @@ -772,22 +775,24 @@ void * pci_intr_establish(pci_chipset_tag_t pc, pci_intr_handle_t ih, int level, int (*func)(void *), void *arg, const char *what) { void *ret; int bus, dev; int l = ih.line & APIC_INT_LINE_MASK; pcitag_t tag = ih.tag; int irq = ih.line; if (ih.line & APIC_INT_VIA_MSG) { + struct cpu_info *ci; struct intrhand *ih; pcireg_t reg; + u_int32_t msiaddr; int off, vec; if (pci_get_capability(pc, tag, PCI_CAP_MSI, &off, ®) == 0) panic("%s: no msi capability", __func__); vec = idt_vec_alloc(level, level + 15); if (vec == 0) return (NULL); ih = malloc(sizeof(*ih), M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); @@ -800,26 +805,30 @@ pci_intr_establish(pci_chipset_tag_t pc, pci_intr_hand ih->ih_level = level; ih->ih_irq = irq; ih->ih_pin = tag.mode1; ih->ih_vec = vec; evcount_attach(&ih->ih_count, what, &ih->ih_vec); apic_maxlevel[vec] = level; apic_intrhand[vec] = ih; idt_vec_set(vec, apichandler[vec & 0xf]); + ci = pci_choosecpu_intr(ih); + printf(" (cpu = %d)", ci->ci_cpuid); + msiaddr = 0xfee00000 | ((ci->ci_apicid & 0xff) << 12); + if (reg & PCI_MSI_MC_C64) { - pci_conf_write(pc, tag, off + PCI_MSI_MA, 0xfee00000); + pci_conf_write(pc, tag, off + PCI_MSI_MA, msiaddr); pci_conf_write(pc, tag, off + PCI_MSI_MAU32, 0); pci_conf_write(pc, tag, off + PCI_MSI_MD64, vec); } else { - pci_conf_write(pc, tag, off + PCI_MSI_MA, 0xfee00000); + pci_conf_write(pc, tag, off + PCI_MSI_MA, msiaddr); pci_conf_write(pc, tag, off + PCI_MSI_MD32, vec); } pci_conf_write(pc, tag, off, reg | PCI_MSI_MC_MSIE); return (ih); } pci_decompose_tag(pc, ih.tag, &bus, &dev, NULL); #if NACPIPRT > 0 acpiprt_route_interrupt(bus, dev, ih.pin); #endif @@ -927,11 +936,50 @@ pci_init_extents(void) #if NACPI > 0 void acpi_pci_match(struct device *, struct pci_attach_args *); #endif void pci_dev_postattach(struct device *dev, struct pci_attach_args *pa) { #if NACPI > 0 acpi_pci_match(dev, pa); #endif +} + +struct cpu_info * +pci_choosecpu_intr(struct intrhand *ih) +{ + struct cpu_info *ci, *best; + CPU_INFO_ITERATOR cii; + int level; + + level = ih->ih_level; + ci = best = NULL; + + /* + * Only play with IPL_NET for now, IPL_BIO for instance may require + * interrupts before secondary cpus actually start, see the SCSI + * START_STOP command. + */ + if (level != IPL_NET) { + best = &cpu_info_primary; + goto done; + } + + CPU_INFO_FOREACH(cii, ci) { + if (CPU_IS_PRIMARY(ci)) + continue; + if ((ci->ci_flags & CPUF_PRESENT) == 0) + continue; + if (ci->ci_flags & CPUF_OWNED) + continue; + if (best == NULL || + ci->ci_intrs[level] < best->ci_intrs[level]) + best = ci; + } +done: + KASSERT(best); + + best->ci_intrs[level]++; + + return (best); } DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/dev/pci/if_ix.c --- /mount/blink/aegis/project/genuos/branch.10/branch.50/baseline/os/src/sys/dev/pci/if_ix.c Thu Aug 23 22:16:29 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/dev/pci/if_ix.c Fri Nov 9 13:15:07 2012 @@ -29,21 +29,22 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.5 2008/05/16 18:46:30 jfv Exp $*/ #include #include - +#define ixgbe_enter(sc) (mtx_enter(&(sc)->core_mtx)) +#define ixgbe_leave(sc) (mtx_leave(&(sc)->core_mtx)) /********************************************************************* * Driver version *********************************************************************/ #define IXGBE_DRIVER_VERSION "1.4.4" /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on @@ -141,20 +142,21 @@ void ixgbe_configure_ivars(struct ix_softc *); uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); void ixgbe_setup_vlan_hw_support(struct ix_softc *); /* Support for pluggable optic modules */ int ixgbe_sfp_probe(struct ix_softc *); void ixgbe_setup_optics(struct ix_softc *); /* Legacy (single vector interrupt handler */ int ixgbe_legacy_irq(void *); +int ixgbe_legacy_irq_locked(void *); void ixgbe_enable_queue(struct ix_softc *, uint32_t); void ixgbe_disable_queue(struct ix_softc *, uint32_t); void ixgbe_rearm_queue(struct ix_softc *, uint32_t); void ixgbe_handle_que(void *, int); /********************************************************************* * OpenBSD Device Interface Entry Points *********************************************************************/ struct cfdriver ix_cd = { @@ -387,20 +389,21 @@ ixgbe_detach(struct device *self, int flags) * the packet is requeued. **********************************************************************/ void ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp) { struct mbuf *m_head; struct ix_softc *sc = txr->sc; int post = 0; + MUTEX_ASSERT_LOCKED(&sc->core_mtx); if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) return; if (!sc->link_active) return; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); @@ -442,37 +445,40 @@ ixgbe_start_locked(struct tx_ring *txr, struct ifnet * } void ixgbe_start(struct ifnet *ifp) { struct ix_softc *sc = ifp->if_softc; struct tx_ring *txr = sc->tx_rings; uint32_t queue = 0; + #if 0 /* * This is really just here for testing * TX multiqueue, ultimately what is * needed is the flow support in the stack * and appropriate logic here to deal with * it. -jfv */ if (sc->num_queues > 1) queue = (curcpu % sc->num_queues); #endif txr = &sc->tx_rings[queue]; + ixgbe_enter(sc); if (ifp->if_flags & IFF_RUNNING) ixgbe_start_locked(txr, ifp); - + ixgbe_leave(sc); + return; } /********************************************************************* * Ioctl entry point * * ixgbe_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure @@ -889,20 +895,21 @@ ixgbe_rearm_queue(struct ix_softc *sc, uint32_t vector } void ixgbe_handle_que(void *context, int pending) { struct ix_queue *que = context; struct ix_softc *sc = que->sc; struct tx_ring *txr = que->txr; struct ifnet *ifp = &que->sc->arpcom.ac_if; + MUTEX_ASSERT_LOCKED(&sc->core_mtx); if (ifp->if_flags & IFF_RUNNING) { ixgbe_rxeof(que, -1 /* XXX sc->rx_process_limit */); ixgbe_txeof(txr); if (ixgbe_rxfill(que->rxr)) { /* Advance the Rx Queue "Tail Pointer" */ IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me), que->rxr->last_desc_filled); } @@ -914,30 +921,32 @@ ixgbe_handle_que(void *context, int pending) ixgbe_enable_queue(que->sc, que->msix); } /********************************************************************* * * Legacy Interrupt Service routine * **********************************************************************/ int -ixgbe_legacy_irq(void *arg) +ixgbe_legacy_irq_locked(void *arg) { struct ix_softc *sc = (struct ix_softc *)arg; struct ix_queue *que = sc->queues; struct ifnet *ifp = &sc->arpcom.ac_if; struct tx_ring *txr = sc->tx_rings; struct ixgbe_hw *hw = &sc->hw; uint32_t reg_eicr; int i, refill = 0; + MUTEX_ASSERT_LOCKED(&sc->core_mtx); + reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR); if (reg_eicr == 0) { ixgbe_enable_intr(sc); return (0); } ++que->irqs; if (ifp->if_flags & IFF_RUNNING) { ixgbe_rxeof(que, -1); ixgbe_txeof(txr); @@ -971,20 +980,35 @@ ixgbe_legacy_irq(void *arg) if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) ixgbe_start_locked(txr, ifp); for (i = 0; i < sc->num_queues; i++, que++) ixgbe_enable_queue(sc, que->msix); return (1); } +int +ixgbe_legacy_irq(void *arg) +{ + struct ix_softc *sc = (struct ix_softc *)arg; + int r; + + KERNEL_UNLOCK(); + ixgbe_enter(sc); + r = ixgbe_legacy_irq_locked(sc); + ixgbe_leave(sc); + KERNEL_LOCK(); + + return (r); +} + /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ void ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) @@ -1236,21 +1260,22 @@ ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **upda **********************************************************************/ void ixgbe_local_timer(void *arg) { struct ix_softc *sc = arg; #ifdef IX_DEBUG struct ifnet *ifp = &sc->arpcom.ac_if; #endif int s; - + + MUTEX_ASSERT_LOCKED(&sc->core_mtx); s = splnet(); /* Check for pluggable optics */ if (sc->sfp_probe) if (!ixgbe_sfp_probe(sc)) goto out; /* Nothing to do */ ixgbe_update_link_status(sc); ixgbe_update_stats_counters(sc); @@ -2715,26 +2740,28 @@ ixgbe_rxfill(struct rx_ring *rxr) } void ixgbe_rxrefill(void *xsc) { struct ix_softc *sc = xsc; struct ix_queue *que = sc->queues; int s; s = splnet(); + ixgbe_enter(sc); if (ixgbe_rxfill(que->rxr)) { /* Advance the Rx Queue "Tail Pointer" */ IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me), que->rxr->last_desc_filled); } else timeout_add(&sc->rx_refill, 1); + ixgbe_leave(sc); splx(s); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ int ixgbe_setup_receive_structures(struct ix_softc *sc) @@ -3139,21 +3166,39 @@ ixgbe_rxeof(struct ix_queue *que, int count) rxr->rx_bytes += sendmp->m_pkthdr.len; ixgbe_rx_checksum(staterr, sendmp, ptype); #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap_ether(ifp->if_bpf, sendmp, BPF_DIRECTION_IN); #endif + /* + * XXX this is wrong, we lose attomicity in the driver + * by releasing if_mtx. We could fix it by collecting + * all mbufs in a queue, and only entering the stack + * after we're done doing driver crap. Or by deferring + * ether_input() to a workqueue. On the other side, this + * should always be safe since the rx ring is only + * touched by one cpu. I'm about to say that rx rings + * never need locking. + */ + /* + * 2009 tests entered ether_input_mbuf with the mutex + * held, lets start with those. Problem is, we're + * entering a big chunk of code possibly without the big + * lock. + */ + /* KERNEL_LOCK(); */ ether_input_mbuf(ifp, sendmp); + /* KERNEL_UNLOCK(); */ } next_desc: bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, dsize * i, dsize, BUS_DMASYNC_PREREAD); /* Advance our pointers to the next descriptor. */ if (++i == sc->num_rx_desc) i = 0; } DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/dev/pci/if_ix.h DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/kern/subr_pool.c --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/kern/subr_pool.c Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/kern/subr_pool.c Fri Nov 9 13:15:42 2012 @@ -500,57 +500,72 @@ pool_get(struct pool *pp, int flags) mtx_leave(&pp->pr_mtx); v = NULL; } } else { if (flags & PR_ZERO) memset(v, 0, pp->pr_size); } return (v); } +/* + * Changes: + * - Protect cache. + * - Account for pr_nget on cached case. + * - Place the same constraints on pp->pr_ctor case as pool_get(). + */ void * pool_get_cached(struct pool *pp, int flags) { struct pool_item *v; + mtx_enter(&pp->pr_mtx); if ((v = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) { TAILQ_REMOVE(&pp->pr_cachelist, v, pi_list); pp->pr_ncached--; + pp->pr_nget++; } else { - mtx_enter(&pp->pr_mtx); v = pool_do_get(pp, flags); - mtx_leave(&pp->pr_mtx); pp->pr_nget++; } - if (v && pp->pr_ctor && pp->pr_ctor(pp->pr_arg, v, flags)) { - mtx_enter(&pp->pr_mtx); - pool_do_put(pp, v); - mtx_leave(&pp->pr_mtx); - v = NULL; - pp->pr_nput++; - } - if (v && flags & PR_ZERO) + mtx_leave(&pp->pr_mtx); + + if (v == NULL) + return (NULL); + + if (pp->pr_ctor) { + if (flags & PR_ZERO) + panic("pool_get_cached: PR_ZERO when ctor set"); + if (pp->pr_ctor(pp->pr_arg, v, flags)) { + mtx_enter(&pp->pr_mtx); + pp->pr_nget--; + pool_do_put(pp, v); + v = NULL; + } + } else if (flags & PR_ZERO) memset(v, 0, pp->pr_size); - return v; + + return (v); } void * pool_do_get(struct pool *pp, int flags) { struct pool_item *pi; struct pool_item_header *ph; void *v; int slowdown = 0; #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) int i, *ip; #endif + MUTEX_ASSERT_LOCKED(&pp->pr_mtx); #ifdef MALLOC_DEBUG if (pp->pr_roflags & PR_DEBUG) { void *addr; addr = NULL; debug_malloc(pp->pr_size, M_DEBUG, (flags & PR_WAITOK) ? M_WAITOK : M_NOWAIT, &addr); return (addr); } #endif @@ -744,49 +759,56 @@ pool_put(struct pool *pp, void *v) #ifdef POOL_DEBUG if (pp->pr_roflags & PR_DEBUGCHK) { if (pool_chk(pp)) panic("after pool_put"); } #endif pp->pr_nput++; mtx_leave(&pp->pr_mtx); } +/* + * Changes: + * - Protect cache. + * - Protect pr_nput. + */ void pool_put_cached(struct pool *pp, void *v) { if (pp->pr_dtor) pp->pr_dtor(pp->pr_arg, v); + mtx_enter(&pp->pr_mtx); if (pp->pr_ncached < pp->pr_nreserve) { struct pool_item *pi = v; TAILQ_INSERT_HEAD(&pp->pr_cachelist, pi, pi_list); pp->pr_ncached++; + mtx_leave(&pp->pr_mtx); return; } - mtx_enter(&pp->pr_mtx); pool_do_put(pp, v); - mtx_leave(&pp->pr_mtx); pp->pr_nput++; + mtx_leave(&pp->pr_mtx); } /* * Internal version of pool_put(). */ void pool_do_put(struct pool *pp, void *v) { struct pool_item *pi = v; struct pool_item_header *ph; #if defined(DIAGNOSTIC) && defined(POOL_DEBUG) int i, *ip; #endif + MUTEX_ASSERT_LOCKED(&pp->pr_mtx); if (v == NULL) panic("pool_put of NULL"); #ifdef MALLOC_DEBUG if (pp->pr_roflags & PR_DEBUG) { debug_free(v, M_DEBUG); return; } #endif DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/kern/uipc_domain.c --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/kern/uipc_domain.c Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/kern/uipc_domain.c Thu Nov 8 15:30:34 2012 @@ -30,33 +30,35 @@ * SUCH DAMAGE. * * @(#)uipc_domain.c 8.2 (Berkeley) 10/18/93 */ #include #include #include #include #include +#include #include #include #include #include #include #include #include #include "bluetooth.h" #include "bpfilter.h" #include "pflow.h" struct domain *domains; +struct mutex net_mtx = MUTEX_INITIALIZER(IPL_NET); void pffasttimo(void *); void pfslowtimo(void *); struct domain * pffinddomain(int); #if defined (KEY) || defined (IPSEC) || defined (TCP_SIGNATURE) int pfkey_init(void); #endif /* KEY || IPSEC || TCP_SIGNATURE */ #define ADDDOMAIN(x) { \ DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/kern/uipc_mbuf2.c --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/kern/uipc_mbuf2.c Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/kern/uipc_mbuf2.c Thu Nov 8 17:42:06 2012 @@ -60,23 +60,25 @@ * SUCH DAMAGE. * * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 */ #include #include #include #include #include +#include #include #include +struct mutex m_tdb_mtx = MUTEX_INITIALIZER(IPL_NET); /* can't call it m_dup(), as freebsd[34] uses m_dup() with different arg */ static struct mbuf *m_dup1(struct mbuf *, int, int, int); /* * ensure that [off, off + len] is contiguous on the mbuf chain "m". * packet chain before "off" is kept untouched. * if offp == NULL, the target will start at on resulting chain. * if offp != NULL, the target will start at on resulting chain. * * on error return (NULL return value), original "m" will be freed. @@ -384,51 +386,51 @@ struct m_tag * m_tag_next(struct mbuf *m, struct m_tag *t) { return (SLIST_NEXT(t, m_tag_link)); } /* Detach mbuf 'm' from TDBs */ void m_ipsec_delete(struct mbuf *m) { struct tdb *tdb; - int i, s; + int i; - s = splnet(); /* XXX was MBUFLOCK */ + mtx_enter(&m_tdb_mtx); if ((tdb = m->m_pkthdr.ipsec.tdb_in)) { TAILQ_REMOVE(&tdb->tdb_mbuf_in, m, m_pkthdr.ipsec.tdb_in_link); m->m_pkthdr.ipsec.tdb_in = NULL; } for (i = 0; i < MTDB_MAX; i++) { if ((tdb = m->m_pkthdr.ipsec.tdb_out[i])) { TAILQ_REMOVE(&tdb->tdb_mbuf_out[i], m, m_pkthdr.ipsec.tdb_out_link[i]); m->m_pkthdr.ipsec.tdb_out[i] = NULL; } } - splx(s); + mtx_leave(&m_tdb_mtx); } /* Attach mbuf 'to' to same TDBs as 'from' */ void m_ipsec_copy(struct mbuf *to, struct mbuf *from) { struct tdb *tdb; - int i, s; + int i; - s = splnet(); /* XXX was MBUFLOCK */ + mtx_enter(&m_tdb_mtx); if ((tdb = from->m_pkthdr.ipsec.tdb_in)) { TAILQ_INSERT_HEAD(&tdb->tdb_mbuf_in, to, m_pkthdr.ipsec.tdb_in_link); } else to->m_pkthdr.ipsec.tdb_in = NULL; for (i = 0; i < MTDB_MAX; i++) { if ((tdb = from->m_pkthdr.ipsec.tdb_out[i])) { TAILQ_INSERT_HEAD(&tdb->tdb_mbuf_out[i], to, m_pkthdr.ipsec.tdb_out_link[i]); to->m_pkthdr.ipsec.tdb_out[i] = tdb; } else to->m_pkthdr.ipsec.tdb_out[i] = NULL; } - splx(s); + mtx_leave(&m_tdb_mtx); } DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/net/if.h --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/net/if.h Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/net/if.h Thu Nov 8 15:28:49 2012 @@ -362,62 +362,72 @@ struct ifnet { /* and the entries */ #define IFCAP_CSUM_TCPv6 0x00000080 /* can do IPv6/TCP checksums */ #define IFCAP_CSUM_UDPv6 0x00000100 /* can do IPv6/UDP checksums */ #define IFCAP_WOL 0x00008000 /* can do wake on lan */ /* * Output queues (ifp->if_snd) and internetwork datagram level (pup level 1) * input routines have queues of messages stored on ifqueue structures * (defined above). Entries are added to and deleted from these structures * by these macros, which should be called with ipl raised to splnet(). */ +extern struct mutex net_mtx; + #define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) #define IF_DROP(ifq) ((ifq)->ifq_drops++) #define IF_ENQUEUE(ifq, m) \ do { \ + mtx_enter(&net_mtx); \ (m)->m_nextpkt = NULL; \ if ((ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail == NULL) \ (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].head = m; \ else \ (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail->m_nextpkt = m; \ (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail = m; \ (ifq)->ifq_len++; \ + mtx_leave(&net_mtx); \ } while (/* CONSTCOND */0) #define IF_PREPEND(ifq, m) \ do { \ + mtx_enter(&net_mtx); \ (m)->m_nextpkt = (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].head; \ if ((ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail == NULL) \ (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail = (m); \ (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].head = (m); \ (ifq)->ifq_len++; \ + mtx_leave(&net_mtx); \ } while (/* CONSTCOND */0) #define IF_POLL(ifq, m) \ do { \ int if_dequeue_prio = IFQ_MAXPRIO; \ + mtx_enter(&net_mtx); \ do { \ (m) = (ifq)->ifq_q[if_dequeue_prio].head; \ } while (!(m) && --if_dequeue_prio >= 0); \ + mtx_leave(&net_mtx); \ } while (/* CONSTCOND */0) #define IF_DEQUEUE(ifq, m) \ do { \ int if_dequeue_prio = IFQ_MAXPRIO; \ + mtx_enter(&net_mtx); \ do { \ (m) = (ifq)->ifq_q[if_dequeue_prio].head; \ if (m) { \ if (((ifq)->ifq_q[if_dequeue_prio].head = \ (m)->m_nextpkt) == NULL) \ (ifq)->ifq_q[if_dequeue_prio].tail = NULL; \ (m)->m_nextpkt = NULL; \ (ifq)->ifq_len--; \ } \ } while (!(m) && --if_dequeue_prio >= 0); \ + mtx_leave(&net_mtx); \ } while (/* CONSTCOND */0) #define IF_INPUT_ENQUEUE(ifq, m) \ do { \ if (IF_QFULL(ifq)) { \ IF_DROP(ifq); \ m_freem(m); \ } else \ IF_ENQUEUE(ifq, m); \ } while (/* CONSTCOND */0) @@ -714,31 +724,31 @@ do { \ (err) = 0; \ } \ } \ if ((err)) \ (ifq)->ifq_drops++; \ } while (/* CONSTCOND */0) #define IFQ_DEQUEUE(ifq, m) \ do { \ if (OLDTBR_IS_ENABLED((ifq))) \ - (m) = oldtbr_dequeue((ifq), ALTDQ_REMOVE); \ + (m) = oldtbr_dequeue((ifq), ALTDQ_REMOVE); \ else if (ALTQ_IS_ENABLED((ifq))) \ ALTQ_DEQUEUE((ifq), (m)); \ else \ IF_DEQUEUE((ifq), (m)); \ } while (/* CONSTCOND */0) #define IFQ_POLL(ifq, m) \ do { \ if (TBR_IS_ENABLED((ifq))) \ - (m) = oldtbr_dequeue((ifq), ALTDQ_POLL); \ + (m) = oldtbr_dequeue((ifq), ALTDQ_POLL); \ else if (ALTQ_IS_ENABLED((ifq))) \ ALTQ_POLL((ifq), (m)); \ else \ IF_POLL((ifq), (m)); \ } while (/* CONSTCOND */0) #define IFQ_PURGE(ifq) \ do { \ if (ALTQ_IS_ENABLED((ifq))) \ ALTQ_PURGE((ifq)); \ DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/net/netisr.c --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/net/netisr.c Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/net/netisr.c Thu Nov 8 13:50:40 2012 @@ -36,20 +36,22 @@ void polldone(void); void netintr(void *); int netisr; void *netisr_intr; void netintr(void *unused) /* ARGSUSED */ { int n; + + KERNEL_ASSERT_LOCKED(); while ((n = netisr) != 0) { atomic_clearbits_int(&netisr, n); #ifdef DEVICE_POLLING if (n & (1 << NETISR_POLL)) pollintr(); #endif #ifdef INET #if NETHER > 0 if (n & (1 << NETISR_ARP)) DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/netinet/ip_ipsp.c --- /mount/blink/aegis/project/genuos/branch.10/branch.50/baseline/os/src/sys/netinet/ip_ipsp.c Fri Sep 28 08:52:36 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/netinet/ip_ipsp.c Thu Nov 8 18:01:16 2012 @@ -869,33 +869,33 @@ tdb_alloc(u_int rdomain) ipsec_gen = 1; return tdbp; } void tdb_free(struct tdb *tdbp) { struct ipsec_policy *ipo; struct inpcb *inp; struct mbuf *m; - int s, i; + int i; if (tdbp->tdb_xform) { (*(tdbp->tdb_xform->xf_zeroize))(tdbp); tdbp->tdb_xform = NULL; } /* Remove matching flow */ if (tdbp->tdb_flags & TDBF_FLOW) tdb_del_flow(tdbp); /* Cleanup mbuf references. */ - s = splnet(); + mtx_enter(&m_tdb_mtx); while ((m = TAILQ_FIRST(&tdbp->tdb_mbuf_in))) { TAILQ_REMOVE(&tdbp->tdb_mbuf_in, m, m_pkthdr.ipsec.tdb_in_link); /* * The mbuf continues to exist, but it no longer knows * that it has been IPsec-input processed. Losing this * knowlegde does no harm: a policy violation (e.g. * processing required) would cause the mbuf to be * dropped, so we don't need to mark the mbuf. */ @@ -906,21 +906,21 @@ tdb_free(struct tdb *tdbp) TAILQ_REMOVE(&tdbp->tdb_mbuf_out[i], m, m_pkthdr.ipsec.tdb_out_link[i]); /* * Removing the SA information from m could * lead to double IPsec processing in the worst * case, so we don't care... */ m->m_pkthdr.ipsec.tdb_out[i] = NULL; } } - splx(s); + mtx_leave(&m_tdb_mtx); #if NPFSYNC > 0 /* Cleanup pfsync references */ pfsync_delete_tdb(tdbp); #endif /* Cleanup inp references. */ for (inp = TAILQ_FIRST(&tdbp->tdb_inp_in); inp; inp = TAILQ_FIRST(&tdbp->tdb_inp_in)) { TAILQ_REMOVE(&tdbp->tdb_inp_in, inp, inp_tdb_in_next); DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/netinet/ipsec_input.c --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/netinet/ipsec_input.c Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/netinet/ipsec_input.c Thu Nov 8 18:17:33 2012 @@ -833,30 +833,30 @@ ipsec_common_input_cb(struct mbuf *m, struct tdb *tdbp } pfkeyv2_get_upcall(tdbp); /* notify userland */ splx(s); } } /* * Record what we've done to the packet (under what SA it was * processed). */ + mtx_enter(&m_tdb_mtx); if (tdbp->tdb_sproto != IPPROTO_IPCOMP) { - s = splnet(); if (m->m_pkthdr.ipsec.tdb_in) TAILQ_REMOVE(&m->m_pkthdr.ipsec.tdb_in->tdb_mbuf_in, m, m_pkthdr.ipsec.tdb_in_link); TAILQ_INSERT_HEAD(&tdbp->tdb_mbuf_in, m, m_pkthdr.ipsec.tdb_in_link); - splx(s); m->m_pkthdr.ipsec.tdb_in = tdbp; } + mtx_leave(&m_tdb_mtx); if (sproto == IPPROTO_ESP) { /* Packet is confidential ? */ if (tdbp->tdb_encalgxform) m->m_flags |= M_CONF; /* Check if we had authenticated ESP. */ if (tdbp->tdb_authalgxform) m->m_flags |= M_AUTH; } else if (sproto == IPPROTO_AH) { DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/netinet/ipsec_output.c --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/netinet/ipsec_output.c Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/netinet/ipsec_output.c Fri Nov 9 13:16:55 2012 @@ -396,21 +396,21 @@ ipsp_process_packet(struct mbuf *m, struct tdb *tdb, i int ipsp_process_done(struct mbuf *m, struct tdb *tdb) { #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_hdr *ip6; #endif /* INET6 */ - int s, roff, i; + int roff, i; tdb->tdb_last_used = time_second; if ((tdb->tdb_flags & TDBF_UDPENCAP) != 0) { struct mbuf *mi; struct udphdr *uh; if (!udpencap_enable || !udpencap_port) { m_freem(m); return ENXIO; @@ -466,30 +466,30 @@ ipsp_process_done(struct mbuf *m, struct tdb *tdb) tdb->tdb_dst.sa.sa_family)); return ENXIO; } /* * Add a record of what we've done or what needs to be done to the * packet. * If there is no free slot we risk double encryption with the same * SA and cannot perform proper PMTU discovery. */ + mtx_enter(&m_tdb_mtx); for (i = 0; i < MTDB_MAX; i++) { if (m->m_pkthdr.ipsec.tdb_out[i] == NULL) { - s = splnet(); TAILQ_INSERT_HEAD(&tdb->tdb_mbuf_out[i], m, m_pkthdr.ipsec.tdb_out_link[i]); - splx(s); m->m_pkthdr.ipsec.tdb_out[i] = tdb; break; } } + mtx_leave(&m_tdb_mtx); /* If there's another (bundled) TDB to apply, do so. */ if (tdb->tdb_onext) return ipsp_process_packet(m, tdb->tdb_onext, tdb->tdb_dst.sa.sa_family, 0); #if NPF > 0 /* Add pf tag if requested. */ pf_tag_packet(m, tdb->tdb_tag, -1); pf_pkt_addr_changed(m); DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/sys/mbuf.h --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/sys/mbuf.h Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/sys/mbuf.h Thu Nov 8 17:43:38 2012 @@ -407,20 +407,21 @@ struct mbstat { #ifdef _KERNEL extern struct mbstat mbstat; extern int nmbclust; /* limit on the # of clusters */ extern int mblowat; /* mbuf low water mark */ extern int mcllowat; /* mbuf cluster low water mark */ extern int max_linkhdr; /* largest link-level header */ extern int max_protohdr; /* largest protocol header */ extern int max_hdr; /* largest link+protocol header */ extern int max_datalen; /* MHLEN - max_hdr */ +extern struct mutex m_tdb_mtx; void mbinit(void); struct mbuf *m_copym2(struct mbuf *, int, int, int); struct mbuf *m_copym(struct mbuf *, int, int, int); struct mbuf *m_free(struct mbuf *); struct mbuf *m_free_unlocked(struct mbuf *); struct mbuf *m_get(int, int); struct mbuf *m_getclr(int, int); struct mbuf *m_gethdr(int, int); struct mbuf *m_inithdr(struct mbuf *); DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/sys/sched.h DIFF: /data/aegis/development/genuos.10.50.C248/os/src/sys/sys/systm.h --- /mount/blink/aegis/project/genuos/branch.10/baseline/os/src/sys/sys/systm.h Thu Aug 23 10:27:33 2012 +++ /data/aegis/development/genuos.10.50.C248/os/src/sys/sys/systm.h Mon Nov 5 17:35:05 2012 @@ -336,20 +336,27 @@ void user_config_cmds(char *); #endif #if defined(MULTIPROCESSOR) void _kernel_lock_init(void); void _kernel_lock(void); void _kernel_unlock(void); #define KERNEL_LOCK_INIT() _kernel_lock_init() #define KERNEL_LOCK() _kernel_lock() #define KERNEL_UNLOCK() _kernel_unlock() +#define KERNEL_UNLOCK_ALL() __mp_release_all(&kernel_lock) +#define KERNEL_RELOCK_ALL(x) __mp_acquire_count(&kernel_lock, x) +#define KERNEL_ASSERT_LOCKED() KASSERT(__mp_lock_held(&kernel_lock)) +#define KERNEL_ASSERT_UNLOCKED() KASSERT(__mp_lock_held(&kernel_lock) == 0) + #else /* ! MULTIPROCESSOR */ #define KERNEL_LOCK_INIT() /* nothing */ #define KERNEL_LOCK() /* nothing */ #define KERNEL_UNLOCK() /* nothing */ +#define KERNEL_ASSERT_LOCKED() /* nothing */ +#define KERNEL_ASSERT_UNLOCKED() /* nothing */ #endif /* MULTIPROCESSOR */ #endif /* __SYSTM_H__ */