scsi: lpfc: Change default queue allocation for reduced memory consumption

By default, the driver attempts to allocate a hdwq per logical cpu in order
to provide good cpu affinity. Some systems have extremely high cpu counts
and this can significantly raise memory consumption.

In testing on x86 platforms (non-AMD) it is found that sharing of a hdwq by
a physical cpu and its HT cpu can occur with little performance
degredation. By sharing, the hdwq count can be halved, significantly
reducing the memory overhead.

Change the default behavior of the driver on non-AMD x86 platforms to
share a hdwq by the cpu and its HT cpu.

Link: https://lore.kernel.org/r/20200501214310.91713-6-jsmart2021@gmail.com
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4104bdc..8b85303 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6023,29 +6023,6 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
 }
 
 /**
- * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
- * @phba: Pointer to HBA context object.
- *
- **/
-static void
-lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
-{
-	unsigned int cpu, numa_node;
-	struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
-
-	cpumask_clear(numa_mask);
-
-	/* Check if we're a NUMA architecture */
-	numa_node = dev_to_node(&phba->pcidev->dev);
-	if (numa_node == NUMA_NO_NODE)
-		return;
-
-	for_each_possible_cpu(cpu)
-		if (cpu_to_node(cpu) == numa_node)
-			cpumask_set_cpu(cpu, numa_mask);
-}
-
-/**
  * lpfc_enable_pci_dev - Enable a generic PCI device.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -6483,7 +6460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
 	phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
 	phba->sli4_hba.curr_disp_cpu = 0;
-	lpfc_cpumask_of_node_init(phba);
 
 	/* Get all the module params for configuring this host */
 	lpfc_get_cfgparam(phba);
@@ -6691,6 +6667,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 #endif
 				/* Not supported for NVMET */
 				phba->cfg_xri_rebalancing = 0;
+				if (phba->irq_chann_mode == NHT_MODE) {
+					phba->cfg_irq_chann =
+						phba->sli4_hba.num_present_cpu;
+					phba->cfg_hdw_queue =
+						phba->sli4_hba.num_present_cpu;
+					phba->irq_chann_mode = NORMAL_MODE;
+				}
 				break;
 			}
 		}
@@ -7032,7 +7015,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 	phba->sli4_hba.num_possible_cpu = 0;
 	phba->sli4_hba.num_present_cpu = 0;
 	phba->sli4_hba.curr_disp_cpu = 0;
-	cpumask_clear(&phba->sli4_hba.numa_mask);
+	cpumask_clear(&phba->sli4_hba.irq_aff_mask);
 
 	/* Free memory allocated for fast-path work queue handles */
 	kfree(phba->sli4_hba.hba_eq_hdl);
@@ -11287,11 +11270,12 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
  * @offline: true, cpu is going offline. false, cpu is coming online.
  *
  * If cpu is going offline, we'll try our best effort to find the next
- * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ * online cpu on the phba's original_mask and migrate all offlining IRQ
+ * affinities.
  *
- * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
  *
- * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
  *	 PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
  *
  **/
@@ -11301,14 +11285,14 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
 	struct lpfc_vector_map_info *cpup;
 	struct cpumask *aff_mask;
 	unsigned int cpu_select, cpu_next, idx;
-	const struct cpumask *numa_mask;
+	const struct cpumask *orig_mask;
 
-	if (!phba->cfg_irq_numa)
+	if (phba->irq_chann_mode == NORMAL_MODE)
 		return;
 
-	numa_mask = &phba->sli4_hba.numa_mask;
+	orig_mask = &phba->sli4_hba.irq_aff_mask;
 
-	if (!cpumask_test_cpu(cpu, numa_mask))
+	if (!cpumask_test_cpu(cpu, orig_mask))
 		return;
 
 	cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -11317,9 +11301,9 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
 		return;
 
 	if (offline) {
-		/* Find next online CPU on NUMA node */
-		cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
-		cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+		/* Find next online CPU on original mask */
+		cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+		cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
 
 		/* Found a valid CPU */
 		if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
@@ -11434,7 +11418,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
 {
 	int vectors, rc, index;
 	char *name;
-	const struct cpumask *numa_mask = NULL;
+	const struct cpumask *aff_mask = NULL;
 	unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
 	struct lpfc_hba_eq_hdl *eqhdl;
 	const struct cpumask *maskp;
@@ -11444,16 +11428,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
 	/* Set up MSI-X multi-message vectors */
 	vectors = phba->cfg_irq_chann;
 
-	if (phba->cfg_irq_numa) {
-		numa_mask = &phba->sli4_hba.numa_mask;
-		cpu_cnt = cpumask_weight(numa_mask);
+	if (phba->irq_chann_mode != NORMAL_MODE)
+		aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+	if (aff_mask) {
+		cpu_cnt = cpumask_weight(aff_mask);
 		vectors = min(phba->cfg_irq_chann, cpu_cnt);
 
-		/* cpu: iterates over numa_mask including offline or online
-		 * cpu_select: iterates over online numa_mask to set affinity
+		/* cpu: iterates over aff_mask including offline or online
+		 * cpu_select: iterates over online aff_mask to set affinity
 		 */
-		cpu = cpumask_first(numa_mask);
-		cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+		cpu = cpumask_first(aff_mask);
+		cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
 	} else {
 		flags |= PCI_IRQ_AFFINITY;
 	}
@@ -11487,7 +11473,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
 
 		eqhdl->irq = pci_irq_vector(phba->pcidev, index);
 
-		if (phba->cfg_irq_numa) {
+		if (aff_mask) {
 			/* If found a neighboring online cpu, set affinity */
 			if (cpu_select < nr_cpu_ids)
 				lpfc_irq_set_aff(eqhdl, cpu_select);
@@ -11497,11 +11483,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
 						LPFC_CPU_FIRST_IRQ,
 						cpu);
 
-			/* Iterate to next offline or online cpu in numa_mask */
-			cpu = cpumask_next(cpu, numa_mask);
+			/* Iterate to next offline or online cpu in aff_mask */
+			cpu = cpumask_next(cpu, aff_mask);
 
-			/* Find next online cpu in numa_mask to set affinity */
-			cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+			/* Find next online cpu in aff_mask to set affinity */
+			cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
 		} else if (vectors == 1) {
 			cpu = cpumask_first(cpu_present_mask);
 			lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,