035-net-mvneta-Configure-XPS-support.patch 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. From: Gregory CLEMENT <gregory.clement@free-electrons.com>
  2. Date: Wed, 9 Dec 2015 18:23:51 +0100
  3. Subject: [PATCH] net: mvneta: Configure XPS support
  4. With this patch each CPU is associated with its own set of TX queues.
  5. It also setup the XPS with an initial configuration which set the
  6. affinity matching the hardware configuration.
  7. Suggested-by: Arnd Bergmann <arnd@arndb.de>
  8. Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
  9. Signed-off-by: David S. Miller <davem@davemloft.net>
  10. ---
  11. --- a/drivers/net/ethernet/marvell/mvneta.c
  12. +++ b/drivers/net/ethernet/marvell/mvneta.c
  13. @@ -111,6 +111,7 @@
  14. #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
  15. #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
  16. #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
  17. +#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
  18. #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
  19. /* Exception Interrupt Port/Queue Cause register
  20. @@ -514,6 +515,9 @@ struct mvneta_tx_queue {
  21. /* DMA address of TSO headers */
  22. dma_addr_t tso_hdrs_phys;
  23. +
  24. + /* Affinity mask for CPUs*/
  25. + cpumask_t affinity_mask;
  26. };
  27. struct mvneta_rx_queue {
  28. @@ -1067,20 +1071,30 @@ static void mvneta_defaults_set(struct m
  29. /* Enable MBUS Retry bit16 */
  30. mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
  31. - /* Set CPU queue access map. CPUs are assigned to the RX
  32. - * queues modulo their number and all the TX queues are
  33. - * assigned to the CPU associated to the default RX queue.
  34. + /* Set CPU queue access map. CPUs are assigned to the RX and
  35. + * TX queues modulo their number. If there is only one TX
  36. + * queue then it is assigned to the CPU associated to the
  37. + * default RX queue.
  38. */
  39. for_each_present_cpu(cpu) {
  40. int rxq_map = 0, txq_map = 0;
  41. - int rxq;
  42. + int rxq, txq;
  43. for (rxq = 0; rxq < rxq_number; rxq++)
  44. if ((rxq % max_cpu) == cpu)
  45. rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
  46. - if (cpu == pp->rxq_def)
  47. - txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
  48. + for (txq = 0; txq < txq_number; txq++)
  49. + if ((txq % max_cpu) == cpu)
  50. + txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
  51. +
  52. + /* With only one TX queue we configure a special case
  53. + * which will allow to get all the irq on a single
  54. + * CPU
  55. + */
  56. + if (txq_number == 1)
  57. + txq_map = (cpu == pp->rxq_def) ?
  58. + MVNETA_CPU_TXQ_ACCESS(1) : 0;
  59. mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
  60. }
  61. @@ -2367,6 +2381,8 @@ static void mvneta_rxq_deinit(struct mvn
  62. static int mvneta_txq_init(struct mvneta_port *pp,
  63. struct mvneta_tx_queue *txq)
  64. {
  65. + int cpu;
  66. +
  67. txq->size = pp->tx_ring_size;
  68. /* A queue must always have room for at least one skb.
  69. @@ -2419,6 +2435,14 @@ static int mvneta_txq_init(struct mvneta
  70. }
  71. mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
  72. + /* Setup XPS mapping */
  73. + if (txq_number > 1)
  74. + cpu = txq->id % num_present_cpus();
  75. + else
  76. + cpu = pp->rxq_def % num_present_cpus();
  77. + cpumask_set_cpu(cpu, &txq->affinity_mask);
  78. + netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
  79. +
  80. return 0;
  81. }
  82. @@ -2840,13 +2864,23 @@ static void mvneta_percpu_elect(struct m
  83. if ((rxq % max_cpu) == cpu)
  84. rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
  85. - if (i == online_cpu_idx) {
  86. - /* Map the default receive queue and transmit
  87. - * queue to the elected CPU
  88. + if (i == online_cpu_idx)
  89. + /* Map the default receive queue queue to the
  90. + * elected CPU
  91. */
  92. rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
  93. - txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
  94. - }
  95. +
  96. + /* We update the TX queue map only if we have one
  97. + * queue. In this case we associate the TX queue to
  98. + * the CPU bound to the default RX queue
  99. + */
  100. + if (txq_number == 1)
  101. + txq_map = (i == online_cpu_idx) ?
  102. + MVNETA_CPU_TXQ_ACCESS(1) : 0;
  103. + else
  104. + txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
  105. + MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
  106. +
  107. mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
  108. /* Update the interrupt mask on each CPU according the