openvswitch: Remove 5-tuple optimization.

The 5-tuple optimization becomes unnecessary with a later per-NUMA
node stats patch.  Remove it first to make the changes easier to
grasp.

Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 1ba1e0b..aa92da2 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -72,7 +72,7 @@
 		*d++ = *s++ & *m++;
 }
 
-struct sw_flow *ovs_flow_alloc(bool percpu_stats)
+struct sw_flow *ovs_flow_alloc(void)
 {
 	struct sw_flow *flow;
 	int cpu;
@@ -84,25 +84,15 @@
 	flow->sf_acts = NULL;
 	flow->mask = NULL;
 
-	flow->stats.is_percpu = percpu_stats;
+	flow->stats = alloc_percpu(struct flow_stats);
+	if (!flow->stats)
+		goto err;
 
-	if (!percpu_stats) {
-		flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
-		if (!flow->stats.stat)
-			goto err;
+	for_each_possible_cpu(cpu) {
+		struct flow_stats *cpu_stats;
 
-		spin_lock_init(&flow->stats.stat->lock);
-	} else {
-		flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
-		if (!flow->stats.cpu_stats)
-			goto err;
-
-		for_each_possible_cpu(cpu) {
-			struct flow_stats *cpu_stats;
-
-			cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
-			spin_lock_init(&cpu_stats->lock);
-		}
+		cpu_stats = per_cpu_ptr(flow->stats, cpu);
+		spin_lock_init(&cpu_stats->lock);
 	}
 	return flow;
 err:
@@ -141,10 +131,7 @@
 static void flow_free(struct sw_flow *flow)
 {
 	kfree((struct sf_flow_acts __force *)flow->sf_acts);
-	if (flow->stats.is_percpu)
-		free_percpu(flow->stats.cpu_stats);
-	else
-		kfree(flow->stats.stat);
+	free_percpu(flow->stats);
 	kmem_cache_free(flow_cache, flow);
 }