ipv4: Use percpu Cache route in IP tunnels

percpu route cache eliminates share of dst refcnt between CPUs.

Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 27d756f..e2c9cff 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -68,23 +68,24 @@
 			 IP_TNL_HASH_BITS);
 }
 
-static inline void __tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
+static inline void __tunnel_dst_set(struct ip_tunnel_dst *idst,
+				    struct dst_entry *dst)
 {
 	struct dst_entry *old_dst;
 
 	if (dst && (dst->flags & DST_NOCACHE))
 		dst = NULL;
 
-	spin_lock_bh(&t->dst_lock);
-	old_dst = rcu_dereference_raw(t->dst_cache);
-	rcu_assign_pointer(t->dst_cache, dst);
+	spin_lock_bh(&idst->lock);
+	old_dst = rcu_dereference(idst->dst);
+	rcu_assign_pointer(idst->dst, dst);
 	dst_release(old_dst);
-	spin_unlock_bh(&t->dst_lock);
+	spin_unlock_bh(&idst->lock);
 }
 
 static inline void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
 {
-	__tunnel_dst_set(t, dst);
+	__tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
 }
 
 static inline void tunnel_dst_reset(struct ip_tunnel *t)
@@ -92,12 +93,20 @@
 	tunnel_dst_set(t, NULL);
 }
 
+static void tunnel_dst_reset_all(struct ip_tunnel *t)
+{
+	int i;
+
+	for_each_possible_cpu(i)
+		__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
+}
+
 static inline struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
 {
 	struct dst_entry *dst;
 
 	rcu_read_lock();
-	dst = rcu_dereference(t->dst_cache);
+	dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
 	if (dst)
 		dst_hold(dst);
 	rcu_read_unlock();
@@ -755,7 +764,7 @@
 		if (set_mtu)
 			dev->mtu = mtu;
 	}
-	tunnel_dst_reset(t);
+	tunnel_dst_reset_all(t);
 	netdev_state_change(dev);
 }
 
@@ -871,6 +880,7 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 	gro_cells_destroy(&tunnel->gro_cells);
+	free_percpu(tunnel->dst_cache);
 	free_percpu(dev->tstats);
 	free_netdev(dev);
 }
@@ -1049,8 +1059,21 @@
 		u64_stats_init(&ipt_stats->syncp);
 	}
 
+	tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+	if (!tunnel->dst_cache) {
+		free_percpu(dev->tstats);
+		return -ENOMEM;
+	}
+
+	for_each_possible_cpu(i) {
+		struct ip_tunnel_dst *idst = per_cpu_ptr(tunnel->dst_cache, i);
+		idst-> dst = NULL;
+		spin_lock_init(&idst->lock);
+	}
+
 	err = gro_cells_init(&tunnel->gro_cells, dev);
 	if (err) {
+		free_percpu(tunnel->dst_cache);
 		free_percpu(dev->tstats);
 		return err;
 	}
@@ -1061,9 +1084,6 @@
 	iph->version		= 4;
 	iph->ihl		= 5;
 
-	tunnel->dst_cache = NULL;
-	spin_lock_init(&tunnel->dst_lock);
-
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1079,7 +1099,7 @@
 	if (itn->fb_tunnel_dev != dev)
 		ip_tunnel_del(netdev_priv(dev));
 
-	tunnel_dst_reset(tunnel);
+	tunnel_dst_reset_all(tunnel);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);