diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 2296f3d46ca8..558dd0692092 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -910,18 +910,16 @@ core99_gmac_phy_reset(struct device_node *node, long param, long value) macio->type != macio_intrepid) return -ENODEV; - printk(KERN_DEBUG "Hard reset of PHY chip ...\n"); - LOCK(flags); MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET); UNLOCK(flags); - msleep(10); + mdelay(10); LOCK(flags); MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */ KEYLARGO_GPIO_OUTOUT_DATA); UNLOCK(flags); - msleep(10); + mdelay(10); return 0; } diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 55f3b856236e..28ce47a02408 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -1653,40 +1653,36 @@ static void gem_init_rings(struct gem *gp) /* Init PHY interface and start link poll state machine */ static void gem_init_phy(struct gem *gp) { - u32 mif_cfg; + u32 mifcfg; /* Revert MIF CFG setting done on stop_phy */ - mif_cfg = readl(gp->regs + MIF_CFG); - mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); - mif_cfg |= MIF_CFG_MDI0; - writel(mif_cfg, gp->regs + MIF_CFG); - writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); - writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); + mifcfg = readl(gp->regs + MIF_CFG); + mifcfg &= ~MIF_CFG_BBMODE; + writel(mifcfg, gp->regs + MIF_CFG); if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { int i; - u16 ctrl; -#ifdef CONFIG_PPC_PMAC - pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); -#endif - - /* Some PHYs used by apple have problem getting back - * to us, we do an additional reset here + /* Those delay sucks, the HW seem to love them though, I'll + * serisouly consider breaking some locks here to be able + * to schedule instead */ - phy_write(gp, MII_BMCR, BMCR_RESET); - for (i = 0; i < 50; i++) { - if ((phy_read(gp, MII_BMCR) & BMCR_RESET) == 0) + for (i = 0; i < 3; i++) { +#ifdef CONFIG_PPC_PMAC + pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); + msleep(20); +#endif + /* Some PHYs used by apple have problem getting back to us, + * we do an additional reset here + */ + phy_write(gp, MII_BMCR, BMCR_RESET); + msleep(20); + if (phy_read(gp, MII_BMCR) != 0xffff) break; - msleep(10); + if (i == 2) + printk(KERN_WARNING "%s: GMAC PHY not responding !\n", + gp->dev->name); } - if (i == 50) - printk(KERN_WARNING "%s: GMAC PHY not responding !\n", - gp->dev->name); - /* Make sure isolate is off */ - ctrl = phy_read(gp, MII_BMCR); - if (ctrl & BMCR_ISOLATE) - phy_write(gp, MII_BMCR, ctrl & ~BMCR_ISOLATE); } if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && @@ -2123,7 +2119,7 @@ static void gem_reinit_chip(struct gem *gp) /* Must be invoked with no lock held. */ static void gem_stop_phy(struct gem *gp, int wol) { - u32 mif_cfg; + u32 mifcfg; unsigned long flags; /* Let the chip settle down a bit, it seems that helps @@ -2134,9 +2130,9 @@ static void gem_stop_phy(struct gem *gp, int wol) /* Make sure we aren't polling PHY status change. We * don't currently use that feature though */ - mif_cfg = readl(gp->regs + MIF_CFG); - mif_cfg &= ~MIF_CFG_POLL; - writel(mif_cfg, gp->regs + MIF_CFG); + mifcfg = readl(gp->regs + MIF_CFG); + mifcfg &= ~MIF_CFG_POLL; + writel(mifcfg, gp->regs + MIF_CFG); if (wol && gp->has_wol) { unsigned char *e = &gp->dev->dev_addr[0]; @@ -2186,8 +2182,7 @@ static void gem_stop_phy(struct gem *gp, int wol) /* According to Apple, we must set the MDIO pins to this begnign * state or we may 1) eat more current, 2) damage some PHYs */ - mif_cfg = 0; - writel(mif_cfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); + writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); writel(0, gp->regs + MIF_BBCLK); writel(0, gp->regs + MIF_BBDATA); writel(0, gp->regs + MIF_BBOENAB); diff --git a/include/net/route.h b/include/net/route.h index e3e5436f8017..9c04f15090d2 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -170,8 +170,8 @@ static inline int ip_route_connect(struct rtable **rp, u32 dst, return ip_route_output_flow(rp, &fl, sk, 0); } -static inline int ip_route_newports(struct rtable **rp, u16 sport, u16 dport, - struct sock *sk) +static inline int ip_route_newports(struct rtable **rp, u8 protocol, + u16 sport, u16 dport, struct sock *sk) { if (sport != (*rp)->fl.fl_ip_sport || dport != (*rp)->fl.fl_ip_dport) { @@ -180,6 +180,7 @@ static inline int ip_route_newports(struct rtable **rp, u16 sport, u16 dport, memcpy(&fl, &(*rp)->fl, sizeof(fl)); fl.fl_ip_sport = sport; fl.fl_ip_dport = dport; + fl.proto = protocol; ip_rt_put(*rp); *rp = NULL; return ip_route_output_flow(rp, &fl, sk, 0); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ba442883e877..da687c8dc6ff 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -104,6 +104,7 @@ static void destroy_nbp(struct net_bridge_port *p) { struct net_device *dev = p->dev; + dev->br_port = NULL; p->br = NULL; p->dev = NULL; dev_put(dev); @@ -118,13 +119,24 @@ static void destroy_nbp_rcu(struct rcu_head *head) destroy_nbp(p); } -/* called with RTNL */ +/* Delete port(interface) from bridge is done in two steps. + * via RCU. First step, marks device as down. That deletes + * all the timers and stops new packets from flowing through. + * + * Final cleanup doesn't occur until after all CPU's finished + * processing packets. + * + * Protected from multiple admin operations by RTNL mutex + */ static void del_nbp(struct net_bridge_port *p) { struct net_bridge *br = p->br; struct net_device *dev = p->dev; - dev->br_port = NULL; + /* Race between RTNL notify and RCU callback */ + if (p->deleted) + return; + dev_set_promiscuity(dev, -1); cancel_delayed_work(&p->carrier_check); @@ -132,16 +144,13 @@ static void del_nbp(struct net_bridge_port *p) spin_lock_bh(&br->lock); br_stp_disable_port(p); + p->deleted = 1; spin_unlock_bh(&br->lock); br_fdb_delete_by_port(br, p); list_del_rcu(&p->list); - del_timer_sync(&p->message_age_timer); - del_timer_sync(&p->forward_delay_timer); - del_timer_sync(&p->hold_timer); - call_rcu(&p->rcu, destroy_nbp_rcu); } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c5bd631ffcd5..e330b17b6d81 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -68,6 +68,7 @@ struct net_bridge_port /* STP */ u8 priority; u8 state; + u8 deleted; u16 port_no; unsigned char topology_change_ack; unsigned char config_pending; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 00f983226672..dc0487b5bace 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -119,7 +119,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (err != 0) goto failure; - err = ip_route_newports(&rt, inet->sport, inet->dport, sk); + err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, + sk); if (err != 0) goto failure; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index df074259f9c3..80c4d048869e 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -468,6 +468,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, done: if (opt && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); + dst_release(dst); return err; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6ea353907af5..233bdf259965 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -236,7 +236,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (err) goto failure; - err = ip_route_newports(&rt, inet->sport, inet->dport, sk); + err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk); if (err) goto failure; @@ -1845,7 +1845,6 @@ void __init tcp_v4_init(struct net_proto_family *ops) } EXPORT_SYMBOL(ipv4_specific); -EXPORT_SYMBOL(inet_bind_bucket_create); EXPORT_SYMBOL(tcp_hashinfo); EXPORT_SYMBOL(tcp_prot); EXPORT_SYMBOL(tcp_unhash); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 66d04004afda..ca9cf6853755 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -515,6 +515,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, done: if (opt && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); + dst_release(dst); return err; }