046-net-mvneta-Use-the-new-hwbm-framework.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. From: Gregory CLEMENT <gregory.clement@free-electrons.com>
  2. Date: Mon, 14 Mar 2016 09:39:05 +0100
  3. Subject: [PATCH] net: mvneta: Use the new hwbm framework
  4. Now that the hardware buffer management framework had been introduced,
  5. let's use it.
  6. Tested-by: Sebastian Careba <nitroshift@yahoo.com>
  7. Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
  8. Signed-off-by: David S. Miller <davem@davemloft.net>
  9. ---
  10. --- a/drivers/net/ethernet/marvell/Kconfig
  11. +++ b/drivers/net/ethernet/marvell/Kconfig
  12. @@ -43,6 +43,7 @@ config MVMDIO
  13. config MVNETA_BM
  14. tristate "Marvell Armada 38x/XP network interface BM support"
  15. depends on MVNETA
  16. + select HWBM
  17. ---help---
  18. This driver supports auxiliary block of the network
  19. interface units in the Marvell ARMADA XP and ARMADA 38x SoC
  20. --- a/drivers/net/ethernet/marvell/mvneta.c
  21. +++ b/drivers/net/ethernet/marvell/mvneta.c
  22. @@ -30,6 +30,7 @@
  23. #include <linux/phy.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/skbuff.h>
  26. +#include <net/hwbm.h>
  27. #include "mvneta_bm.h"
  28. #include <net/ip.h>
  29. #include <net/ipv6.h>
  30. @@ -1024,11 +1025,12 @@ static int mvneta_bm_port_init(struct pl
  31. static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
  32. {
  33. struct mvneta_bm_pool *bm_pool = pp->pool_long;
  34. + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
  35. int num;
  36. /* Release all buffers from long pool */
  37. mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
  38. - if (bm_pool->buf_num) {
  39. + if (hwbm_pool->buf_num) {
  40. WARN(1, "cannot free all buffers in pool %d\n",
  41. bm_pool->id);
  42. goto bm_mtu_err;
  43. @@ -1036,14 +1038,14 @@ static void mvneta_bm_update_mtu(struct
  44. bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
  45. bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
  46. - bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
  47. - SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
  48. + hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
  49. + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
  50. /* Fill entire long pool */
  51. - num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size);
  52. - if (num != bm_pool->size) {
  53. + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
  54. + if (num != hwbm_pool->size) {
  55. WARN(1, "pool %d: %d of %d allocated\n",
  56. - bm_pool->id, num, bm_pool->size);
  57. + bm_pool->id, num, hwbm_pool->size);
  58. goto bm_mtu_err;
  59. }
  60. mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
  61. @@ -2069,14 +2071,14 @@ err_drop_frame:
  62. }
  63. /* Refill processing */
  64. - err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool);
  65. + err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
  66. if (err) {
  67. netdev_err(dev, "Linux processing - Can't refill\n");
  68. rxq->missed++;
  69. goto err_drop_frame_ret_pool;
  70. }
  71. - frag_size = bm_pool->frag_size;
  72. + frag_size = bm_pool->hwbm_pool.frag_size;
  73. skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
  74. --- a/drivers/net/ethernet/marvell/mvneta_bm.c
  75. +++ b/drivers/net/ethernet/marvell/mvneta_bm.c
  76. @@ -10,16 +10,17 @@
  77. * warranty of any kind, whether express or implied.
  78. */
  79. -#include <linux/kernel.h>
  80. +#include <linux/clk.h>
  81. #include <linux/genalloc.h>
  82. -#include <linux/platform_device.h>
  83. -#include <linux/netdevice.h>
  84. -#include <linux/skbuff.h>
  85. +#include <linux/io.h>
  86. +#include <linux/kernel.h>
  87. #include <linux/mbus.h>
  88. #include <linux/module.h>
  89. -#include <linux/io.h>
  90. +#include <linux/netdevice.h>
  91. #include <linux/of.h>
  92. -#include <linux/clk.h>
  93. +#include <linux/platform_device.h>
  94. +#include <linux/skbuff.h>
  95. +#include <net/hwbm.h>
  96. #include "mvneta_bm.h"
  97. #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
  98. @@ -88,17 +89,13 @@ static void mvneta_bm_pool_target_set(st
  99. mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
  100. }
  101. -/* Allocate skb for BM pool */
  102. -void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
  103. - dma_addr_t *buf_phys_addr)
  104. +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
  105. {
  106. - void *buf;
  107. + struct mvneta_bm_pool *bm_pool =
  108. + (struct mvneta_bm_pool *)hwbm_pool->priv;
  109. + struct mvneta_bm *priv = bm_pool->priv;
  110. dma_addr_t phys_addr;
  111. - buf = mvneta_frag_alloc(bm_pool->frag_size);
  112. - if (!buf)
  113. - return NULL;
  114. -
  115. /* In order to update buf_cookie field of RX descriptor properly,
  116. * BM hardware expects buf virtual address to be placed in the
  117. * first four bytes of mapped buffer.
  118. @@ -106,75 +103,13 @@ void *mvneta_buf_alloc(struct mvneta_bm
  119. *(u32 *)buf = (u32)buf;
  120. phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
  121. DMA_FROM_DEVICE);
  122. - if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) {
  123. - mvneta_frag_free(bm_pool->frag_size, buf);
  124. - return NULL;
  125. - }
  126. - *buf_phys_addr = phys_addr;
  127. -
  128. - return buf;
  129. -}
  130. -
  131. -/* Refill processing for HW buffer management */
  132. -int mvneta_bm_pool_refill(struct mvneta_bm *priv,
  133. - struct mvneta_bm_pool *bm_pool)
  134. -{
  135. - dma_addr_t buf_phys_addr;
  136. - void *buf;
  137. -
  138. - buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr);
  139. - if (!buf)
  140. + if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
  141. return -ENOMEM;
  142. - mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr);
  143. -
  144. + mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
  145. return 0;
  146. }
  147. -EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill);
  148. -
  149. -/* Allocate buffers for the pool */
  150. -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
  151. - int buf_num)
  152. -{
  153. - int err, i;
  154. -
  155. - if (bm_pool->buf_num == bm_pool->size) {
  156. - dev_dbg(&priv->pdev->dev, "pool %d already filled\n",
  157. - bm_pool->id);
  158. - return bm_pool->buf_num;
  159. - }
  160. -
  161. - if (buf_num < 0 ||
  162. - (buf_num + bm_pool->buf_num > bm_pool->size)) {
  163. - dev_err(&priv->pdev->dev,
  164. - "cannot allocate %d buffers for pool %d\n",
  165. - buf_num, bm_pool->id);
  166. - return 0;
  167. - }
  168. -
  169. - for (i = 0; i < buf_num; i++) {
  170. - err = mvneta_bm_pool_refill(priv, bm_pool);
  171. - if (err < 0)
  172. - break;
  173. - }
  174. -
  175. - /* Update BM driver with number of buffers added to pool */
  176. - bm_pool->buf_num += i;
  177. -
  178. - dev_dbg(&priv->pdev->dev,
  179. - "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n",
  180. - bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
  181. - bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size,
  182. - bm_pool->frag_size);
  183. -
  184. - dev_dbg(&priv->pdev->dev,
  185. - "%s pool %d: %d of %d buffers added\n",
  186. - bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
  187. - bm_pool->id, i, buf_num);
  188. -
  189. - return i;
  190. -}
  191. -EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add);
  192. +EXPORT_SYMBOL_GPL(mvneta_bm_construct);
  193. /* Create pool */
  194. static int mvneta_bm_pool_create(struct mvneta_bm *priv,
  195. @@ -183,8 +118,7 @@ static int mvneta_bm_pool_create(struct
  196. struct platform_device *pdev = priv->pdev;
  197. u8 target_id, attr;
  198. int size_bytes, err;
  199. -
  200. - size_bytes = sizeof(u32) * bm_pool->size;
  201. + size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
  202. bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
  203. &bm_pool->phys_addr,
  204. GFP_KERNEL);
  205. @@ -245,11 +179,16 @@ struct mvneta_bm_pool *mvneta_bm_pool_us
  206. /* Allocate buffers in case BM pool hasn't been used yet */
  207. if (new_pool->type == MVNETA_BM_FREE) {
  208. + struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
  209. +
  210. + new_pool->priv = priv;
  211. new_pool->type = type;
  212. new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
  213. - new_pool->frag_size =
  214. + hwbm_pool->frag_size =
  215. SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
  216. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  217. + hwbm_pool->construct = mvneta_bm_construct;
  218. + hwbm_pool->priv = new_pool;
  219. /* Create new pool */
  220. err = mvneta_bm_pool_create(priv, new_pool);
  221. @@ -260,10 +199,10 @@ struct mvneta_bm_pool *mvneta_bm_pool_us
  222. }
  223. /* Allocate buffers for this pool */
  224. - num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size);
  225. - if (num != new_pool->size) {
  226. + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
  227. + if (num != hwbm_pool->size) {
  228. WARN(1, "pool %d: %d of %d allocated\n",
  229. - new_pool->id, num, new_pool->size);
  230. + new_pool->id, num, hwbm_pool->size);
  231. return NULL;
  232. }
  233. }
  234. @@ -284,7 +223,7 @@ void mvneta_bm_bufs_free(struct mvneta_b
  235. mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
  236. - for (i = 0; i < bm_pool->buf_num; i++) {
  237. + for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
  238. dma_addr_t buf_phys_addr;
  239. u32 *vaddr;
  240. @@ -303,13 +242,13 @@ void mvneta_bm_bufs_free(struct mvneta_b
  241. dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
  242. bm_pool->buf_size, DMA_FROM_DEVICE);
  243. - mvneta_frag_free(bm_pool->frag_size, vaddr);
  244. + hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
  245. }
  246. mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
  247. /* Update BM driver with number of buffers removed from pool */
  248. - bm_pool->buf_num -= i;
  249. + bm_pool->hwbm_pool.buf_num -= i;
  250. }
  251. EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
  252. @@ -317,6 +256,7 @@ EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
  253. void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
  254. struct mvneta_bm_pool *bm_pool, u8 port_map)
  255. {
  256. + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
  257. bm_pool->port_map &= ~port_map;
  258. if (bm_pool->port_map)
  259. return;
  260. @@ -324,11 +264,12 @@ void mvneta_bm_pool_destroy(struct mvnet
  261. bm_pool->type = MVNETA_BM_FREE;
  262. mvneta_bm_bufs_free(priv, bm_pool, port_map);
  263. - if (bm_pool->buf_num)
  264. + if (hwbm_pool->buf_num)
  265. WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
  266. if (bm_pool->virt_addr) {
  267. - dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size,
  268. + dma_free_coherent(&priv->pdev->dev,
  269. + sizeof(u32) * hwbm_pool->size,
  270. bm_pool->virt_addr, bm_pool->phys_addr);
  271. bm_pool->virt_addr = NULL;
  272. }
  273. @@ -381,10 +322,10 @@ static void mvneta_bm_pools_init(struct
  274. MVNETA_BM_POOL_CAP_ALIGN));
  275. size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
  276. }
  277. - bm_pool->size = size;
  278. + bm_pool->hwbm_pool.size = size;
  279. mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
  280. - bm_pool->size);
  281. + bm_pool->hwbm_pool.size);
  282. /* Obtain custom pkt_size from DT */
  283. sprintf(prop, "pool%d,pkt-size", i);
  284. --- a/drivers/net/ethernet/marvell/mvneta_bm.h
  285. +++ b/drivers/net/ethernet/marvell/mvneta_bm.h
  286. @@ -108,20 +108,15 @@ struct mvneta_bm {
  287. };
  288. struct mvneta_bm_pool {
  289. + struct hwbm_pool hwbm_pool;
  290. /* Pool number in the range 0-3 */
  291. u8 id;
  292. enum mvneta_bm_type type;
  293. - /* Buffer Pointers Pool External (BPPE) size in number of bytes */
  294. - int size;
  295. - /* Number of buffers used by this pool */
  296. - int buf_num;
  297. - /* Pool buffer size */
  298. - int buf_size;
  299. /* Packet size */
  300. int pkt_size;
  301. - /* Single frag size */
  302. - u32 frag_size;
  303. + /* Size of the buffer acces through DMA*/
  304. + u32 buf_size;
  305. /* BPPE virtual base address */
  306. u32 *virt_addr;
  307. @@ -143,8 +138,7 @@ void mvneta_bm_pool_destroy(struct mvnet
  308. struct mvneta_bm_pool *bm_pool, u8 port_map);
  309. void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
  310. u8 port_map);
  311. -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
  312. - int buf_num);
  313. +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf);
  314. int mvneta_bm_pool_refill(struct mvneta_bm *priv,
  315. struct mvneta_bm_pool *bm_pool);
  316. struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
  317. @@ -170,8 +164,7 @@ void mvneta_bm_pool_destroy(struct mvnet
  318. struct mvneta_bm_pool *bm_pool, u8 port_map) {}
  319. void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
  320. u8 port_map) {}
  321. -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
  322. - int buf_num) { return 0; }
  323. +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
  324. int mvneta_bm_pool_refill(struct mvneta_bm *priv,
  325. struct mvneta_bm_pool *bm_pool) {return 0; }
  326. struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,