1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684 |
- From: Marcin Wojtas <mw@semihalf.com>
- Date: Mon, 14 Mar 2016 09:39:03 +0100
- Subject: [PATCH] net: mvneta: bm: add support for hardware buffer management
- Buffer manager (BM) is a dedicated hardware unit that can be used by all
- ethernet ports of Armada XP and 38x SoC's. It allows to offload CPU on RX
- path by sparing DRAM access on refilling buffer pool, hardware-based
- filling of descriptor ring data and better memory utilization due to HW
- arbitration for using 'short' pools for small packets.
- Tests performed with A388 SoC working as a network bridge between two
- packet generators showed increase of maximum processed 64B packets by
- ~20k (~555k packets with BM enabled vs ~535 packets without BM). Also
- when pushing 1500B-packets with a line rate achieved, CPU load decreased
- from around 25% without BM to 20% with BM.
- BM comprise up to 4 buffer pointers' (BP) rings kept in DRAM, which
- are called external BP pools - BPPE. Allocating and releasing buffer
- pointers (BP) to/from BPPE is performed indirectly by write/read access
- to a dedicated internal SRAM, where internal BP pools (BPPI) are placed.
- BM hardware controls status of BPPE automatically, as well as assigning
- proper buffers to RX descriptors. For more details please refer to
- Functional Specification of Armada XP or 38x SoC.
- In order to enable support for a separate hardware block, common for all
- ports, a new driver has to be implemented ('mvneta_bm'). It provides
- initialization sequence of address space, clocks, registers, SRAM,
- empty pools' structures and also obtaining optional configuration
- from DT (please refer to device tree binding documentation). mvneta_bm
- exposes also a necessary API to mvneta driver, as well as a dedicated
- structure with BM information (bm_priv), whose presence is used as a
- flag notifying of BM usage by port. It has to be ensured that mvneta_bm
- probe is executed prior to the ones in ports' driver. In case BM is not
- used or its probe fails, mvneta falls back to use software buffer
- management.
- A sequence executed in mvneta_probe function is modified in order to have
- an access to needed resources before possible port's BM initialization is
- done. According to port-pools mapping provided by DT appropriate registers
- are configured and the buffer pools are filled. RX path is modified
- accordingly. Becaues the hardware allows a wide variety of configuration
- options, following assumptions are made:
- * using BM mechanisms can be selectively disabled/enabled basing
- on DT configuration among the ports
- * 'long' pool's single buffer size is tied to port's MTU
- * using 'long' pool by port is obligatory and it cannot be shared
- * using 'short' pool for smaller packets is optional
- * one 'short' pool can be shared among all ports
- This commit enables hardware buffer management operation cooperating with
- existing mvneta driver. New device tree binding documentation is added and
- the one of mvneta is updated accordingly.
- [gregory.clement@free-electrons.com: removed the suspend/resume part]
- Signed-off-by: Marcin Wojtas <mw@semihalf.com>
- Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
- Signed-off-by: David S. Miller <davem@davemloft.net>
- ---
- create mode 100644 Documentation/devicetree/bindings/net/marvell-neta-bm.txt
- create mode 100644 drivers/net/ethernet/marvell/mvneta_bm.c
- create mode 100644 drivers/net/ethernet/marvell/mvneta_bm.h
- --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
- +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
- @@ -13,15 +13,30 @@ Optional properties:
- Value is presented in bytes. If not used, by default 1600B is set for
- "marvell,armada-370-neta" and 9800B for others.
-
- +Optional properties (valid only for Armada XP/38x):
- +
- +- buffer-manager: a phandle to a buffer manager node. Please refer to
- + Documentation/devicetree/bindings/net/marvell-neta-bm.txt
- +- bm,pool-long: ID of a pool, that will accept all packets of a size
- + higher than 'short' pool's threshold (if set) and up to MTU value.
- + Obligatory, when the port is supposed to use hardware
- + buffer management.
- +- bm,pool-short: ID of a pool, that will be used for accepting
- + packets of a size lower than given threshold. If not set, the port
- + will use a single 'long' pool for all packets, as defined above.
- +
- Example:
-
- -ethernet@d0070000 {
- +ethernet@70000 {
- compatible = "marvell,armada-370-neta";
- - reg = <0xd0070000 0x2500>;
- + reg = <0x70000 0x2500>;
- interrupts = <8>;
- clocks = <&gate_clk 4>;
- tx-csum-limit = <9800>
- status = "okay";
- phy = <&phy0>;
- phy-mode = "rgmii-id";
- + buffer-manager = <&bm>;
- + bm,pool-long = <0>;
- + bm,pool-short = <1>;
- };
- --- /dev/null
- +++ b/Documentation/devicetree/bindings/net/marvell-neta-bm.txt
- @@ -0,0 +1,49 @@
- +* Marvell Armada 380/XP Buffer Manager driver (BM)
- +
- +Required properties:
- +
- +- compatible: should be "marvell,armada-380-neta-bm".
- +- reg: address and length of the register set for the device.
- +- clocks: a pointer to the reference clock for this device.
- +- internal-mem: a phandle to BM internal SRAM definition.
- +
- +Optional properties (port):
- +
- +- pool<0 : 3>,capacity: size of external buffer pointers' ring maintained
- + in DRAM. Can be set for each pool (id 0 : 3) separately. The value has
- + to be chosen between 128 and 16352 and it also has to be aligned to 32.
- + Otherwise the driver would adjust a given number or choose default if
- + not set.
- +- pool<0 : 3>,pkt-size: maximum size of a packet accepted by a given buffer
- + pointers' pool (id 0 : 3). It will be taken into consideration only when pool
- + type is 'short'. For 'long' ones it would be overridden by port's MTU.
- + If not set a driver will choose a default value.
- +
- +In order to see how to hook the BM to a given ethernet port, please
- +refer to Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt.
- +
- +Example:
- +
- +- main node:
- +
- +bm: bm@c8000 {
- + compatible = "marvell,armada-380-neta-bm";
- + reg = <0xc8000 0xac>;
- + clocks = <&gateclk 13>;
- + internal-mem = <&bm_bppi>;
- + status = "okay";
- + pool2,capacity = <4096>;
- + pool1,pkt-size = <512>;
- +};
- +
- +- internal SRAM node:
- +
- +bm_bppi: bm-bppi {
- + compatible = "mmio-sram";
- + reg = <MBUS_ID(0x0c, 0x04) 0 0x100000>;
- + ranges = <0 MBUS_ID(0x0c, 0x04) 0 0x100000>;
- + #address-cells = <1>;
- + #size-cells = <1>;
- + clocks = <&gateclk 13>;
- + status = "okay";
- +};
- --- a/drivers/net/ethernet/marvell/Kconfig
- +++ b/drivers/net/ethernet/marvell/Kconfig
- @@ -40,6 +40,19 @@ config MVMDIO
-
- This driver is used by the MV643XX_ETH and MVNETA drivers.
-
- +config MVNETA_BM
- + tristate "Marvell Armada 38x/XP network interface BM support"
- + depends on MVNETA
- + ---help---
- + This driver supports auxiliary block of the network
- + interface units in the Marvell ARMADA XP and ARMADA 38x SoC
- + family, which is called buffer manager.
- +
- + This driver, when enabled, strictly cooperates with mvneta
- + driver and is common for all network ports of the devices,
- + even for Armada 370 SoC, which doesn't support hardware
- + buffer management.
- +
- config MVNETA
- tristate "Marvell Armada 370/38x/XP network interface support"
- depends on PLAT_ORION
- --- a/drivers/net/ethernet/marvell/Makefile
- +++ b/drivers/net/ethernet/marvell/Makefile
- @@ -4,6 +4,7 @@
-
- obj-$(CONFIG_MVMDIO) += mvmdio.o
- obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
- +obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o
- obj-$(CONFIG_MVNETA) += mvneta.o
- obj-$(CONFIG_MVPP2) += mvpp2.o
- obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
- --- a/drivers/net/ethernet/marvell/mvneta.c
- +++ b/drivers/net/ethernet/marvell/mvneta.c
- @@ -30,6 +30,7 @@
- #include <linux/phy.h>
- #include <linux/platform_device.h>
- #include <linux/skbuff.h>
- +#include "mvneta_bm.h"
- #include <net/ip.h>
- #include <net/ipv6.h>
- #include <net/tso.h>
- @@ -37,6 +38,10 @@
- /* Registers */
- #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
- #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
- +#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
- +#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
- +#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
- +#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
- #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
- #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
- #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
- @@ -50,6 +55,9 @@
- #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
- #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
- #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
- +#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
- +#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
- +#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
- #define MVNETA_PORT_RX_RESET 0x1cc0
- #define MVNETA_PORT_RX_DMA_RESET BIT(0)
- #define MVNETA_PHY_ADDR 0x2000
- @@ -107,6 +115,7 @@
- #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
- #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
- #define MVNETA_ACC_MODE 0x2500
- +#define MVNETA_BM_ADDRESS 0x2504
- #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
- #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
- #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
- @@ -253,7 +262,10 @@
- #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
- #define MVNETA_TX_CSUM_DEF_SIZE 1600
- #define MVNETA_TX_CSUM_MAX_SIZE 9800
- -#define MVNETA_ACC_MODE_EXT 1
- +#define MVNETA_ACC_MODE_EXT1 1
- +#define MVNETA_ACC_MODE_EXT2 2
- +
- +#define MVNETA_MAX_DECODE_WIN 6
-
- /* Timeout constants */
- #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
- @@ -293,7 +305,8 @@
- ((addr >= txq->tso_hdrs_phys) && \
- (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
-
- -#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
- +#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
- + (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
-
- struct mvneta_statistic {
- unsigned short offset;
- @@ -359,6 +372,7 @@ struct mvneta_pcpu_port {
- };
-
- struct mvneta_port {
- + u8 id;
- struct mvneta_pcpu_port __percpu *ports;
- struct mvneta_pcpu_stats __percpu *stats;
-
- @@ -392,6 +406,11 @@ struct mvneta_port {
- unsigned int tx_csum_limit;
- unsigned int use_inband_status:1;
-
- + struct mvneta_bm *bm_priv;
- + struct mvneta_bm_pool *pool_long;
- + struct mvneta_bm_pool *pool_short;
- + int bm_win_id;
- +
- u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
-
- u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
- @@ -417,6 +436,8 @@ struct mvneta_port {
- #define MVNETA_TX_L4_CSUM_NOT BIT(31)
-
- #define MVNETA_RXD_ERR_CRC 0x0
- +#define MVNETA_RXD_BM_POOL_SHIFT 13
- +#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
- #define MVNETA_RXD_ERR_SUMMARY BIT(16)
- #define MVNETA_RXD_ERR_OVERRUN BIT(17)
- #define MVNETA_RXD_ERR_LEN BIT(18)
- @@ -561,6 +582,9 @@ static int rxq_def;
-
- static int rx_copybreak __read_mostly = 256;
-
- +/* HW BM need that each port be identify by a unique ID */
- +static int global_port_id;
- +
- #define MVNETA_DRIVER_NAME "mvneta"
- #define MVNETA_DRIVER_VERSION "1.0"
-
- @@ -827,6 +851,214 @@ static void mvneta_rxq_bm_disable(struct
- mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
- }
-
- +/* Enable buffer management (BM) */
- +static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
- + struct mvneta_rx_queue *rxq)
- +{
- + u32 val;
- +
- + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
- + val |= MVNETA_RXQ_HW_BUF_ALLOC;
- + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
- +}
- +
- +/* Notify HW about port's assignment of pool for bigger packets */
- +static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
- + struct mvneta_rx_queue *rxq)
- +{
- + u32 val;
- +
- + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
- + val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
- + val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
- +
- + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
- +}
- +
- +/* Notify HW about port's assignment of pool for smaller packets */
- +static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
- + struct mvneta_rx_queue *rxq)
- +{
- + u32 val;
- +
- + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
- + val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
- + val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
- +
- + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
- +}
- +
- +/* Set port's receive buffer size for assigned BM pool */
- +static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
- + int buf_size,
- + u8 pool_id)
- +{
- + u32 val;
- +
- + if (!IS_ALIGNED(buf_size, 8)) {
- + dev_warn(pp->dev->dev.parent,
- + "illegal buf_size value %d, round to %d\n",
- + buf_size, ALIGN(buf_size, 8));
- + buf_size = ALIGN(buf_size, 8);
- + }
- +
- + val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
- + val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
- + mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
- +}
- +
- +/* Configure MBUS window in order to enable access BM internal SRAM */
- +static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
- + u8 target, u8 attr)
- +{
- + u32 win_enable, win_protect;
- + int i;
- +
- + win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
- +
- + if (pp->bm_win_id < 0) {
- + /* Find first not occupied window */
- + for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
- + if (win_enable & (1 << i)) {
- + pp->bm_win_id = i;
- + break;
- + }
- + }
- + if (i == MVNETA_MAX_DECODE_WIN)
- + return -ENOMEM;
- + } else {
- + i = pp->bm_win_id;
- + }
- +
- + mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
- + mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
- +
- + if (i < 4)
- + mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
- +
- + mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
- + (attr << 8) | target);
- +
- + mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
- +
- + win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
- + win_protect |= 3 << (2 * i);
- + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
- +
- + win_enable &= ~(1 << i);
- + mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
- +
- + return 0;
- +}
- +
- +/* Assign and initialize pools for port. In case of fail
- + * buffer manager will remain disabled for current port.
- + */
- +static int mvneta_bm_port_init(struct platform_device *pdev,
- + struct mvneta_port *pp)
- +{
- + struct device_node *dn = pdev->dev.of_node;
- + u32 long_pool_id, short_pool_id, wsize;
- + u8 target, attr;
- + int err;
- +
- + /* Get BM window information */
- + err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
- + &target, &attr);
- + if (err < 0)
- + return err;
- +
- + pp->bm_win_id = -1;
- +
- + /* Open NETA -> BM window */
- + err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
- + target, attr);
- + if (err < 0) {
- + netdev_info(pp->dev, "fail to configure mbus window to BM\n");
- + return err;
- + }
- +
- + if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
- + netdev_info(pp->dev, "missing long pool id\n");
- + return -EINVAL;
- + }
- +
- + /* Create port's long pool depending on mtu */
- + pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
- + MVNETA_BM_LONG, pp->id,
- + MVNETA_RX_PKT_SIZE(pp->dev->mtu));
- + if (!pp->pool_long) {
- + netdev_info(pp->dev, "fail to obtain long pool for port\n");
- + return -ENOMEM;
- + }
- +
- + pp->pool_long->port_map |= 1 << pp->id;
- +
- + mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
- + pp->pool_long->id);
- +
- + /* If short pool id is not defined, assume using single pool */
- + if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
- + short_pool_id = long_pool_id;
- +
- + /* Create port's short pool */
- + pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
- + MVNETA_BM_SHORT, pp->id,
- + MVNETA_BM_SHORT_PKT_SIZE);
- + if (!pp->pool_short) {
- + netdev_info(pp->dev, "fail to obtain short pool for port\n");
- + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
- + return -ENOMEM;
- + }
- +
- + if (short_pool_id != long_pool_id) {
- + pp->pool_short->port_map |= 1 << pp->id;
- + mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
- + pp->pool_short->id);
- + }
- +
- + return 0;
- +}
- +
- +/* Update settings of a pool for bigger packets */
- +static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
- +{
- + struct mvneta_bm_pool *bm_pool = pp->pool_long;
- + int num;
- +
- + /* Release all buffers from long pool */
- + mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
- + if (bm_pool->buf_num) {
- + WARN(1, "cannot free all buffers in pool %d\n",
- + bm_pool->id);
- + goto bm_mtu_err;
- + }
- +
- + bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
- + bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
- + bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
- +
- + /* Fill entire long pool */
- + num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size);
- + if (num != bm_pool->size) {
- + WARN(1, "pool %d: %d of %d allocated\n",
- + bm_pool->id, num, bm_pool->size);
- + goto bm_mtu_err;
- + }
- + mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
- +
- + return;
- +
- +bm_mtu_err:
- + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
- + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
- +
- + pp->bm_priv = NULL;
- + mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
- + netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
- +}
- +
- /* Start the Ethernet port RX and TX activity */
- static void mvneta_port_up(struct mvneta_port *pp)
- {
- @@ -1152,9 +1384,17 @@ static void mvneta_defaults_set(struct m
- mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
-
- /* Set Port Acceleration Mode */
- - val = MVNETA_ACC_MODE_EXT;
- + if (pp->bm_priv)
- + /* HW buffer management + legacy parser */
- + val = MVNETA_ACC_MODE_EXT2;
- + else
- + /* SW buffer management + legacy parser */
- + val = MVNETA_ACC_MODE_EXT1;
- mvreg_write(pp, MVNETA_ACC_MODE, val);
-
- + if (pp->bm_priv)
- + mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
- +
- /* Update val of portCfg register accordingly with all RxQueue types */
- val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
- mvreg_write(pp, MVNETA_PORT_CONFIG, val);
- @@ -1521,23 +1761,25 @@ static void mvneta_txq_done(struct mvnet
- }
- }
-
- -static void *mvneta_frag_alloc(const struct mvneta_port *pp)
- +void *mvneta_frag_alloc(unsigned int frag_size)
- {
- - if (likely(pp->frag_size <= PAGE_SIZE))
- - return netdev_alloc_frag(pp->frag_size);
- + if (likely(frag_size <= PAGE_SIZE))
- + return netdev_alloc_frag(frag_size);
- else
- - return kmalloc(pp->frag_size, GFP_ATOMIC);
- + return kmalloc(frag_size, GFP_ATOMIC);
- }
- +EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
-
- -static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
- +void mvneta_frag_free(unsigned int frag_size, void *data)
- {
- - if (likely(pp->frag_size <= PAGE_SIZE))
- + if (likely(frag_size <= PAGE_SIZE))
- skb_free_frag(data);
- else
- kfree(data);
- }
- +EXPORT_SYMBOL_GPL(mvneta_frag_free);
-
- -/* Refill processing */
- +/* Refill processing for SW buffer management */
- static int mvneta_rx_refill(struct mvneta_port *pp,
- struct mvneta_rx_desc *rx_desc)
-
- @@ -1545,7 +1787,7 @@ static int mvneta_rx_refill(struct mvnet
- dma_addr_t phys_addr;
- void *data;
-
- - data = mvneta_frag_alloc(pp);
- + data = mvneta_frag_alloc(pp->frag_size);
- if (!data)
- return -ENOMEM;
-
- @@ -1553,7 +1795,7 @@ static int mvneta_rx_refill(struct mvnet
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- - mvneta_frag_free(pp, data);
- + mvneta_frag_free(pp->frag_size, data);
- return -ENOMEM;
- }
-
- @@ -1599,22 +1841,156 @@ static void mvneta_rxq_drop_pkts(struct
- int rx_done, i;
-
- rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
- + if (rx_done)
- + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
- +
- + if (pp->bm_priv) {
- + for (i = 0; i < rx_done; i++) {
- + struct mvneta_rx_desc *rx_desc =
- + mvneta_rxq_next_desc_get(rxq);
- + u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
- + struct mvneta_bm_pool *bm_pool;
- +
- + bm_pool = &pp->bm_priv->bm_pools[pool_id];
- + /* Return dropped buffer to the pool */
- + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
- + rx_desc->buf_phys_addr);
- + }
- + return;
- + }
- +
- for (i = 0; i < rxq->size; i++) {
- struct mvneta_rx_desc *rx_desc = rxq->descs + i;
- void *data = (void *)rx_desc->buf_cookie;
-
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- - mvneta_frag_free(pp, data);
- + mvneta_frag_free(pp->frag_size, data);
- }
- +}
-
- - if (rx_done)
- - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
- +/* Main rx processing when using software buffer management */
- +static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
- + struct mvneta_rx_queue *rxq)
- +{
- + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
- + struct net_device *dev = pp->dev;
- + int rx_done;
- + u32 rcvd_pkts = 0;
- + u32 rcvd_bytes = 0;
- +
- + /* Get number of received packets */
- + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
- +
- + if (rx_todo > rx_done)
- + rx_todo = rx_done;
- +
- + rx_done = 0;
- +
- + /* Fairness NAPI loop */
- + while (rx_done < rx_todo) {
- + struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- + struct sk_buff *skb;
- + unsigned char *data;
- + dma_addr_t phys_addr;
- + u32 rx_status, frag_size;
- + int rx_bytes, err;
- +
- + rx_done++;
- + rx_status = rx_desc->status;
- + rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
- + data = (unsigned char *)rx_desc->buf_cookie;
- + phys_addr = rx_desc->buf_phys_addr;
- +
- + if (!mvneta_rxq_desc_is_first_last(rx_status) ||
- + (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
- +err_drop_frame:
- + dev->stats.rx_errors++;
- + mvneta_rx_error(pp, rx_desc);
- + /* leave the descriptor untouched */
- + continue;
- + }
- +
- + if (rx_bytes <= rx_copybreak) {
- + /* better copy a small frame and not unmap the DMA region */
- + skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
- + if (unlikely(!skb))
- + goto err_drop_frame;
- +
- + dma_sync_single_range_for_cpu(dev->dev.parent,
- + rx_desc->buf_phys_addr,
- + MVNETA_MH_SIZE + NET_SKB_PAD,
- + rx_bytes,
- + DMA_FROM_DEVICE);
- + memcpy(skb_put(skb, rx_bytes),
- + data + MVNETA_MH_SIZE + NET_SKB_PAD,
- + rx_bytes);
- +
- + skb->protocol = eth_type_trans(skb, dev);
- + mvneta_rx_csum(pp, rx_status, skb);
- + napi_gro_receive(&port->napi, skb);
- +
- + rcvd_pkts++;
- + rcvd_bytes += rx_bytes;
- +
- + /* leave the descriptor and buffer untouched */
- + continue;
- + }
- +
- + /* Refill processing */
- + err = mvneta_rx_refill(pp, rx_desc);
- + if (err) {
- + netdev_err(dev, "Linux processing - Can't refill\n");
- + rxq->missed++;
- + goto err_drop_frame;
- + }
- +
- + frag_size = pp->frag_size;
- +
- + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
- +
- + /* After refill old buffer has to be unmapped regardless
- + * the skb is successfully built or not.
- + */
- + dma_unmap_single(dev->dev.parent, phys_addr,
- + MVNETA_RX_BUF_SIZE(pp->pkt_size),
- + DMA_FROM_DEVICE);
- +
- + if (!skb)
- + goto err_drop_frame;
- +
- + rcvd_pkts++;
- + rcvd_bytes += rx_bytes;
- +
- + /* Linux processing */
- + skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
- + skb_put(skb, rx_bytes);
- +
- + skb->protocol = eth_type_trans(skb, dev);
- +
- + mvneta_rx_csum(pp, rx_status, skb);
- +
- + napi_gro_receive(&port->napi, skb);
- + }
- +
- + if (rcvd_pkts) {
- + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
- +
- + u64_stats_update_begin(&stats->syncp);
- + stats->rx_packets += rcvd_pkts;
- + stats->rx_bytes += rcvd_bytes;
- + u64_stats_update_end(&stats->syncp);
- + }
- +
- + /* Update rxq management counters */
- + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
- +
- + return rx_done;
- }
-
- -/* Main rx processing */
- -static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
- - struct mvneta_rx_queue *rxq)
- +/* Main rx processing when using hardware buffer management */
- +static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
- + struct mvneta_rx_queue *rxq)
- {
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
- struct net_device *dev = pp->dev;
- @@ -1633,21 +2009,29 @@ static int mvneta_rx(struct mvneta_port
- /* Fairness NAPI loop */
- while (rx_done < rx_todo) {
- struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- + struct mvneta_bm_pool *bm_pool = NULL;
- struct sk_buff *skb;
- unsigned char *data;
- dma_addr_t phys_addr;
- - u32 rx_status;
- + u32 rx_status, frag_size;
- int rx_bytes, err;
- + u8 pool_id;
-
- rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
- data = (unsigned char *)rx_desc->buf_cookie;
- phys_addr = rx_desc->buf_phys_addr;
- + pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
- + bm_pool = &pp->bm_priv->bm_pools[pool_id];
-
- if (!mvneta_rxq_desc_is_first_last(rx_status) ||
- (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
- - err_drop_frame:
- +err_drop_frame_ret_pool:
- + /* Return the buffer to the pool */
- + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
- + rx_desc->buf_phys_addr);
- +err_drop_frame:
- dev->stats.rx_errors++;
- mvneta_rx_error(pp, rx_desc);
- /* leave the descriptor untouched */
- @@ -1658,7 +2042,7 @@ static int mvneta_rx(struct mvneta_port
- /* better copy a small frame and not unmap the DMA region */
- skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
- if (unlikely(!skb))
- - goto err_drop_frame;
- + goto err_drop_frame_ret_pool;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- rx_desc->buf_phys_addr,
- @@ -1676,26 +2060,31 @@ static int mvneta_rx(struct mvneta_port
- rcvd_pkts++;
- rcvd_bytes += rx_bytes;
-
- + /* Return the buffer to the pool */
- + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
- + rx_desc->buf_phys_addr);
- +
- /* leave the descriptor and buffer untouched */
- continue;
- }
-
- /* Refill processing */
- - err = mvneta_rx_refill(pp, rx_desc);
- + err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- - goto err_drop_frame;
- + goto err_drop_frame_ret_pool;
- }
-
- - skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
- + frag_size = bm_pool->frag_size;
- +
- + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
-
- /* After refill old buffer has to be unmapped regardless
- * the skb is successfully built or not.
- */
- - dma_unmap_single(dev->dev.parent, phys_addr,
- - MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- -
- + dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
- + bm_pool->buf_size, DMA_FROM_DEVICE);
- if (!skb)
- goto err_drop_frame;
-
- @@ -2300,7 +2689,10 @@ static int mvneta_poll(struct napi_struc
-
- if (rx_queue) {
- rx_queue = rx_queue - 1;
- - rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
- + if (pp->bm_priv)
- + rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
- + else
- + rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
- }
-
- budget -= rx_done;
- @@ -2389,9 +2781,17 @@ static int mvneta_rxq_init(struct mvneta
- mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
- mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
-
- - /* Fill RXQ with buffers from RX pool */
- - mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
- - mvneta_rxq_bm_disable(pp, rxq);
- + if (!pp->bm_priv) {
- + /* Fill RXQ with buffers from RX pool */
- + mvneta_rxq_buf_size_set(pp, rxq,
- + MVNETA_RX_BUF_SIZE(pp->pkt_size));
- + mvneta_rxq_bm_disable(pp, rxq);
- + } else {
- + mvneta_rxq_bm_enable(pp, rxq);
- + mvneta_rxq_long_pool_set(pp, rxq);
- + mvneta_rxq_short_pool_set(pp, rxq);
- + }
- +
- mvneta_rxq_fill(pp, rxq, rxq->size);
-
- return 0;
- @@ -2664,6 +3064,9 @@ static int mvneta_change_mtu(struct net_
- dev->mtu = mtu;
-
- if (!netif_running(dev)) {
- + if (pp->bm_priv)
- + mvneta_bm_update_mtu(pp, mtu);
- +
- netdev_update_features(dev);
- return 0;
- }
- @@ -2676,6 +3079,9 @@ static int mvneta_change_mtu(struct net_
- mvneta_cleanup_txqs(pp);
- mvneta_cleanup_rxqs(pp);
-
- + if (pp->bm_priv)
- + mvneta_bm_update_mtu(pp, mtu);
- +
- pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- @@ -3567,6 +3973,7 @@ static int mvneta_probe(struct platform_
- struct resource *res;
- struct device_node *dn = pdev->dev.of_node;
- struct device_node *phy_node;
- + struct device_node *bm_node;
- struct mvneta_port *pp;
- struct net_device *dev;
- const char *dt_mac_addr;
- @@ -3694,26 +4101,39 @@ static int mvneta_probe(struct platform_
-
- pp->tx_csum_limit = tx_csum_limit;
-
- + dram_target_info = mv_mbus_dram_info();
- + if (dram_target_info)
- + mvneta_conf_mbus_windows(pp, dram_target_info);
- +
- pp->tx_ring_size = MVNETA_MAX_TXD;
- pp->rx_ring_size = MVNETA_MAX_RXD;
-
- pp->dev = dev;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- + pp->id = global_port_id++;
- +
- + /* Obtain access to BM resources if enabled and already initialized */
- + bm_node = of_parse_phandle(dn, "buffer-manager", 0);
- + if (bm_node && bm_node->data) {
- + pp->bm_priv = bm_node->data;
- + err = mvneta_bm_port_init(pdev, pp);
- + if (err < 0) {
- + dev_info(&pdev->dev, "use SW buffer management\n");
- + pp->bm_priv = NULL;
- + }
- + }
- +
- err = mvneta_init(&pdev->dev, pp);
- if (err < 0)
- - goto err_free_stats;
- + goto err_netdev;
-
- err = mvneta_port_power_up(pp, phy_mode);
- if (err < 0) {
- dev_err(&pdev->dev, "can't power up port\n");
- - goto err_free_stats;
- + goto err_netdev;
- }
-
- - dram_target_info = mv_mbus_dram_info();
- - if (dram_target_info)
- - mvneta_conf_mbus_windows(pp, dram_target_info);
- -
- for_each_present_cpu(cpu) {
- struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
-
- @@ -3748,6 +4168,13 @@ static int mvneta_probe(struct platform_
-
- return 0;
-
- +err_netdev:
- + unregister_netdev(dev);
- + if (pp->bm_priv) {
- + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
- + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
- + 1 << pp->id);
- + }
- err_free_stats:
- free_percpu(pp->stats);
- err_free_ports:
- @@ -3777,6 +4204,12 @@ static int mvneta_remove(struct platform
- of_node_put(pp->phy_node);
- free_netdev(dev);
-
- + if (pp->bm_priv) {
- + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
- + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
- + 1 << pp->id);
- + }
- +
- return 0;
- }
-
- --- /dev/null
- +++ b/drivers/net/ethernet/marvell/mvneta_bm.c
- @@ -0,0 +1,546 @@
- +/*
- + * Driver for Marvell NETA network controller Buffer Manager.
- + *
- + * Copyright (C) 2015 Marvell
- + *
- + * Marcin Wojtas <mw@semihalf.com>
- + *
- + * This file is licensed under the terms of the GNU General Public
- + * License version 2. This program is licensed "as is" without any
- + * warranty of any kind, whether express or implied.
- + */
- +
- +#include <linux/kernel.h>
- +#include <linux/genalloc.h>
- +#include <linux/platform_device.h>
- +#include <linux/netdevice.h>
- +#include <linux/skbuff.h>
- +#include <linux/mbus.h>
- +#include <linux/module.h>
- +#include <linux/io.h>
- +#include <linux/of.h>
- +#include <linux/clk.h>
- +#include "mvneta_bm.h"
- +
- +#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
- +#define MVNETA_BM_DRIVER_VERSION "1.0"
- +
- +static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
- +{
- + writel(data, priv->reg_base + offset);
- +}
- +
- +static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
- +{
- + return readl(priv->reg_base + offset);
- +}
- +
- +static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
- +{
- + u32 val;
- +
- + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
- + val |= MVNETA_BM_POOL_ENABLE_MASK;
- + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
- +
- + /* Clear BM cause register */
- + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
- +}
- +
- +static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
- +{
- + u32 val;
- +
- + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
- + val &= ~MVNETA_BM_POOL_ENABLE_MASK;
- + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
- +}
- +
- +static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
- +{
- + u32 val;
- +
- + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
- + val |= mask;
- + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
- +}
- +
- +static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
- +{
- + u32 val;
- +
- + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
- + val &= ~mask;
- + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
- +}
- +
- +static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
- + u8 target_id, u8 attr)
- +{
- + u32 val;
- +
- + val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
- + val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
- + val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
- + val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
- + val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
- +
- + mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
- +}
- +
- +/* Allocate skb for BM pool */
- +void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- + dma_addr_t *buf_phys_addr)
- +{
- + void *buf;
- + dma_addr_t phys_addr;
- +
- + buf = mvneta_frag_alloc(bm_pool->frag_size);
- + if (!buf)
- + return NULL;
- +
- + /* In order to update buf_cookie field of RX descriptor properly,
- + * BM hardware expects buf virtual address to be placed in the
- + * first four bytes of mapped buffer.
- + */
- + *(u32 *)buf = (u32)buf;
- + phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
- + DMA_FROM_DEVICE);
- + if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) {
- + mvneta_frag_free(bm_pool->frag_size, buf);
- + return NULL;
- + }
- + *buf_phys_addr = phys_addr;
- +
- + return buf;
- +}
- +
- +/* Refill processing for HW buffer management */
- +int mvneta_bm_pool_refill(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool)
- +{
- + dma_addr_t buf_phys_addr;
- + void *buf;
- +
- + buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr);
- + if (!buf)
- + return -ENOMEM;
- +
- + mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr);
- +
- + return 0;
- +}
- +EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill);
- +
- +/* Allocate buffers for the pool */
- +int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- + int buf_num)
- +{
- + int err, i;
- +
- + if (bm_pool->buf_num == bm_pool->size) {
- + dev_dbg(&priv->pdev->dev, "pool %d already filled\n",
- + bm_pool->id);
- + return bm_pool->buf_num;
- + }
- +
- + if (buf_num < 0 ||
- + (buf_num + bm_pool->buf_num > bm_pool->size)) {
- + dev_err(&priv->pdev->dev,
- + "cannot allocate %d buffers for pool %d\n",
- + buf_num, bm_pool->id);
- + return 0;
- + }
- +
- + for (i = 0; i < buf_num; i++) {
- + err = mvneta_bm_pool_refill(priv, bm_pool);
- + if (err < 0)
- + break;
- + }
- +
- + /* Update BM driver with number of buffers added to pool */
- + bm_pool->buf_num += i;
- +
- + dev_dbg(&priv->pdev->dev,
- + "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n",
- + bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
- + bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size,
- + bm_pool->frag_size);
- +
- + dev_dbg(&priv->pdev->dev,
- + "%s pool %d: %d of %d buffers added\n",
- + bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
- + bm_pool->id, i, buf_num);
- +
- + return i;
- +}
- +EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add);
- +
- +/* Create pool */
- +static int mvneta_bm_pool_create(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool)
- +{
- + struct platform_device *pdev = priv->pdev;
- + u8 target_id, attr;
- + int size_bytes, err;
- +
- + size_bytes = sizeof(u32) * bm_pool->size;
- + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
- + &bm_pool->phys_addr,
- + GFP_KERNEL);
- + if (!bm_pool->virt_addr)
- + return -ENOMEM;
- +
- + if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
- + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
- + bm_pool->phys_addr);
- + dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
- + bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
- + return -ENOMEM;
- + }
- +
- + err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
- + &attr);
- + if (err < 0) {
- + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
- + bm_pool->phys_addr);
- + return err;
- + }
- +
- + /* Set pool address */
- + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
- + bm_pool->phys_addr);
- +
- + mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr);
- + mvneta_bm_pool_enable(priv, bm_pool->id);
- +
- + return 0;
- +}
- +
- +/* Notify the driver that BM pool is being used as specific type and return the
- + * pool pointer on success
- + */
- +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
- + enum mvneta_bm_type type, u8 port_id,
- + int pkt_size)
- +{
- + struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
- + int num, err;
- +
- + if (new_pool->type == MVNETA_BM_LONG &&
- + new_pool->port_map != 1 << port_id) {
- + dev_err(&priv->pdev->dev,
- + "long pool cannot be shared by the ports\n");
- + return NULL;
- + }
- +
- + if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
- + dev_err(&priv->pdev->dev,
- + "mixing pools' types between the ports is forbidden\n");
- + return NULL;
- + }
- +
- + if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
- + new_pool->pkt_size = pkt_size;
- +
- + /* Allocate buffers in case BM pool hasn't been used yet */
- + if (new_pool->type == MVNETA_BM_FREE) {
- + new_pool->type = type;
- + new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
- + new_pool->frag_size =
- + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
- + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- +
- + /* Create new pool */
- + err = mvneta_bm_pool_create(priv, new_pool);
- + if (err) {
- + dev_err(&priv->pdev->dev, "fail to create pool %d\n",
- + new_pool->id);
- + return NULL;
- + }
- +
- + /* Allocate buffers for this pool */
- + num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size);
- + if (num != new_pool->size) {
- + WARN(1, "pool %d: %d of %d allocated\n",
- + new_pool->id, num, new_pool->size);
- + return NULL;
- + }
- + }
- +
- + return new_pool;
- +}
- +EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
- +
- +/* Free all buffers from the pool */
- +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- + u8 port_map)
- +{
- + int i;
- +
- + bm_pool->port_map &= ~port_map;
- + if (bm_pool->port_map)
- + return;
- +
- + mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
- +
- + for (i = 0; i < bm_pool->buf_num; i++) {
- + dma_addr_t buf_phys_addr;
- + u32 *vaddr;
- +
- + /* Get buffer physical address (indirect access) */
- + buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
- +
- + /* Work-around to the problems when destroying the pool,
- + * when it occurs that a read access to BPPI returns 0.
- + */
- + if (buf_phys_addr == 0)
- + continue;
- +
- + vaddr = phys_to_virt(buf_phys_addr);
- + if (!vaddr)
- + break;
- +
- + dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
- + bm_pool->buf_size, DMA_FROM_DEVICE);
- + mvneta_frag_free(bm_pool->frag_size, vaddr);
- + }
- +
- + mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
- +
- + /* Update BM driver with number of buffers removed from pool */
- + bm_pool->buf_num -= i;
- +}
- +EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
- +
- +/* Cleanup pool */
- +void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool, u8 port_map)
- +{
- + bm_pool->port_map &= ~port_map;
- + if (bm_pool->port_map)
- + return;
- +
- + bm_pool->type = MVNETA_BM_FREE;
- +
- + mvneta_bm_bufs_free(priv, bm_pool, port_map);
- + if (bm_pool->buf_num)
- + WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
- +
- + if (bm_pool->virt_addr) {
- + dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size,
- + bm_pool->virt_addr, bm_pool->phys_addr);
- + bm_pool->virt_addr = NULL;
- + }
- +
- + mvneta_bm_pool_disable(priv, bm_pool->id);
- +}
- +EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
- +
- +static void mvneta_bm_pools_init(struct mvneta_bm *priv)
- +{
- + struct device_node *dn = priv->pdev->dev.of_node;
- + struct mvneta_bm_pool *bm_pool;
- + char prop[15];
- + u32 size;
- + int i;
- +
- + /* Activate BM unit */
- + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
- +
- + /* Create all pools with maximum size */
- + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
- + bm_pool = &priv->bm_pools[i];
- + bm_pool->id = i;
- + bm_pool->type = MVNETA_BM_FREE;
- +
- + /* Reset read pointer */
- + mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
- +
- + /* Reset write pointer */
- + mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
- +
- + /* Configure pool size according to DT or use default value */
- + sprintf(prop, "pool%d,capacity", i);
- + if (of_property_read_u32(dn, prop, &size)) {
- + size = MVNETA_BM_POOL_CAP_DEF;
- + } else if (size > MVNETA_BM_POOL_CAP_MAX) {
- + dev_warn(&priv->pdev->dev,
- + "Illegal pool %d capacity %d, set to %d\n",
- + i, size, MVNETA_BM_POOL_CAP_MAX);
- + size = MVNETA_BM_POOL_CAP_MAX;
- + } else if (size < MVNETA_BM_POOL_CAP_MIN) {
- + dev_warn(&priv->pdev->dev,
- + "Illegal pool %d capacity %d, set to %d\n",
- + i, size, MVNETA_BM_POOL_CAP_MIN);
- + size = MVNETA_BM_POOL_CAP_MIN;
- + } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
- + dev_warn(&priv->pdev->dev,
- + "Illegal pool %d capacity %d, round to %d\n",
- + i, size, ALIGN(size,
- + MVNETA_BM_POOL_CAP_ALIGN));
- + size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
- + }
- + bm_pool->size = size;
- +
- + mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
- + bm_pool->size);
- +
- + /* Obtain custom pkt_size from DT */
- + sprintf(prop, "pool%d,pkt-size", i);
- + if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
- + bm_pool->pkt_size = 0;
- + }
- +}
- +
- +static void mvneta_bm_default_set(struct mvneta_bm *priv)
- +{
- + u32 val;
- +
- + /* Mask BM all interrupts */
- + mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
- +
- + /* Clear BM cause register */
- + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
- +
- + /* Set BM configuration register */
- + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
- +
- + /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
- + val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
- + val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
- + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
- +}
- +
- +static int mvneta_bm_init(struct mvneta_bm *priv)
- +{
- + mvneta_bm_default_set(priv);
- +
- + /* Allocate and initialize BM pools structures */
- + priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
- + sizeof(struct mvneta_bm_pool),
- + GFP_KERNEL);
- + if (!priv->bm_pools)
- + return -ENOMEM;
- +
- + mvneta_bm_pools_init(priv);
- +
- + return 0;
- +}
- +
- +static int mvneta_bm_get_sram(struct device_node *dn,
- + struct mvneta_bm *priv)
- +{
- + priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
- + if (!priv->bppi_pool)
- + return -ENOMEM;
- +
- + priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
- + MVNETA_BM_BPPI_SIZE,
- + &priv->bppi_phys_addr);
- + if (!priv->bppi_virt_addr)
- + return -ENOMEM;
- +
- + return 0;
- +}
- +
- +static void mvneta_bm_put_sram(struct mvneta_bm *priv)
- +{
- + gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
- + MVNETA_BM_BPPI_SIZE);
- +}
- +
- +static int mvneta_bm_probe(struct platform_device *pdev)
- +{
- + struct device_node *dn = pdev->dev.of_node;
- + struct mvneta_bm *priv;
- + struct resource *res;
- + int err;
- +
- + priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
- + if (!priv)
- + return -ENOMEM;
- +
- + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- + priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- + if (IS_ERR(priv->reg_base))
- + return PTR_ERR(priv->reg_base);
- +
- + priv->clk = devm_clk_get(&pdev->dev, NULL);
- + if (IS_ERR(priv->clk))
- + return PTR_ERR(priv->clk);
- + err = clk_prepare_enable(priv->clk);
- + if (err < 0)
- + return err;
- +
- + err = mvneta_bm_get_sram(dn, priv);
- + if (err < 0) {
- + dev_err(&pdev->dev, "failed to allocate internal memory\n");
- + goto err_clk;
- + }
- +
- + priv->pdev = pdev;
- +
- + /* Initialize buffer manager internals */
- + err = mvneta_bm_init(priv);
- + if (err < 0) {
- + dev_err(&pdev->dev, "failed to initialize controller\n");
- + goto err_sram;
- + }
- +
- + dn->data = priv;
- + platform_set_drvdata(pdev, priv);
- +
- + dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
- +
- + return 0;
- +
- +err_sram:
- + mvneta_bm_put_sram(priv);
- +err_clk:
- + clk_disable_unprepare(priv->clk);
- + return err;
- +}
- +
- +static int mvneta_bm_remove(struct platform_device *pdev)
- +{
- + struct mvneta_bm *priv = platform_get_drvdata(pdev);
- + u8 all_ports_map = 0xff;
- + int i = 0;
- +
- + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
- + struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
- +
- + mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
- + }
- +
- + mvneta_bm_put_sram(priv);
- +
- + /* Dectivate BM unit */
- + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
- +
- + clk_disable_unprepare(priv->clk);
- +
- + return 0;
- +}
- +
- +static const struct of_device_id mvneta_bm_match[] = {
- + { .compatible = "marvell,armada-380-neta-bm" },
- + { }
- +};
- +MODULE_DEVICE_TABLE(of, mvneta_bm_match);
- +
- +static struct platform_driver mvneta_bm_driver = {
- + .probe = mvneta_bm_probe,
- + .remove = mvneta_bm_remove,
- + .driver = {
- + .name = MVNETA_BM_DRIVER_NAME,
- + .of_match_table = mvneta_bm_match,
- + },
- +};
- +
- +module_platform_driver(mvneta_bm_driver);
- +
- +MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
- +MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
- +MODULE_LICENSE("GPL v2");
- --- /dev/null
- +++ b/drivers/net/ethernet/marvell/mvneta_bm.h
- @@ -0,0 +1,189 @@
- +/*
- + * Driver for Marvell NETA network controller Buffer Manager.
- + *
- + * Copyright (C) 2015 Marvell
- + *
- + * Marcin Wojtas <mw@semihalf.com>
- + *
- + * This file is licensed under the terms of the GNU General Public
- + * License version 2. This program is licensed "as is" without any
- + * warranty of any kind, whether express or implied.
- + */
- +
- +#ifndef _MVNETA_BM_H_
- +#define _MVNETA_BM_H_
- +
- +/* BM Configuration Register */
- +#define MVNETA_BM_CONFIG_REG 0x0
- +#define MVNETA_BM_STATUS_MASK 0x30
- +#define MVNETA_BM_ACTIVE_MASK BIT(4)
- +#define MVNETA_BM_MAX_IN_BURST_SIZE_MASK 0x60000
- +#define MVNETA_BM_MAX_IN_BURST_SIZE_16BP BIT(18)
- +#define MVNETA_BM_EMPTY_LIMIT_MASK BIT(19)
- +
- +/* BM Activation Register */
- +#define MVNETA_BM_COMMAND_REG 0x4
- +#define MVNETA_BM_START_MASK BIT(0)
- +#define MVNETA_BM_STOP_MASK BIT(1)
- +#define MVNETA_BM_PAUSE_MASK BIT(2)
- +
- +/* BM Xbar interface Register */
- +#define MVNETA_BM_XBAR_01_REG 0x8
- +#define MVNETA_BM_XBAR_23_REG 0xc
- +#define MVNETA_BM_XBAR_POOL_REG(pool) \
- + (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG)
- +#define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0)
- +#define MVNETA_BM_TARGET_ID_MASK(pool) \
- + (0xf << MVNETA_BM_TARGET_ID_OFFS(pool))
- +#define MVNETA_BM_TARGET_ID_VAL(pool, id) \
- + ((id) << MVNETA_BM_TARGET_ID_OFFS(pool))
- +#define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4)
- +#define MVNETA_BM_XBAR_ATTR_MASK(pool) \
- + (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool))
- +#define MVNETA_BM_XBAR_ATTR_VAL(pool, attr) \
- + ((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool))
- +
- +/* Address of External Buffer Pointers Pool Register */
- +#define MVNETA_BM_POOL_BASE_REG(pool) (0x10 + ((pool) << 4))
- +#define MVNETA_BM_POOL_ENABLE_MASK BIT(0)
- +
- +/* External Buffer Pointers Pool RD pointer Register */
- +#define MVNETA_BM_POOL_READ_PTR_REG(pool) (0x14 + ((pool) << 4))
- +#define MVNETA_BM_POOL_SET_READ_PTR_MASK 0xfffc
- +#define MVNETA_BM_POOL_GET_READ_PTR_OFFS 16
- +#define MVNETA_BM_POOL_GET_READ_PTR_MASK 0xfffc0000
- +
- +/* External Buffer Pointers Pool WR pointer */
- +#define MVNETA_BM_POOL_WRITE_PTR_REG(pool) (0x18 + ((pool) << 4))
- +#define MVNETA_BM_POOL_SET_WRITE_PTR_OFFS 0
- +#define MVNETA_BM_POOL_SET_WRITE_PTR_MASK 0xfffc
- +#define MVNETA_BM_POOL_GET_WRITE_PTR_OFFS 16
- +#define MVNETA_BM_POOL_GET_WRITE_PTR_MASK 0xfffc0000
- +
- +/* External Buffer Pointers Pool Size Register */
- +#define MVNETA_BM_POOL_SIZE_REG(pool) (0x1c + ((pool) << 4))
- +#define MVNETA_BM_POOL_SIZE_MASK 0x3fff
- +
- +/* BM Interrupt Cause Register */
- +#define MVNETA_BM_INTR_CAUSE_REG (0x50)
- +
- +/* BM interrupt Mask Register */
- +#define MVNETA_BM_INTR_MASK_REG (0x54)
- +
- +/* Other definitions */
- +#define MVNETA_BM_SHORT_PKT_SIZE 256
- +#define MVNETA_BM_POOLS_NUM 4
- +#define MVNETA_BM_POOL_CAP_MIN 128
- +#define MVNETA_BM_POOL_CAP_DEF 2048
- +#define MVNETA_BM_POOL_CAP_MAX \
- + (16 * 1024 - MVNETA_BM_POOL_CAP_ALIGN)
- +#define MVNETA_BM_POOL_CAP_ALIGN 32
- +#define MVNETA_BM_POOL_PTR_ALIGN 32
- +
- +#define MVNETA_BM_POOL_ACCESS_OFFS 8
- +
- +#define MVNETA_BM_BPPI_SIZE 0x100000
- +
- +#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
- +
- +enum mvneta_bm_type {
- + MVNETA_BM_FREE,
- + MVNETA_BM_LONG,
- + MVNETA_BM_SHORT
- +};
- +
- +struct mvneta_bm {
- + void __iomem *reg_base;
- + struct clk *clk;
- + struct platform_device *pdev;
- +
- + struct gen_pool *bppi_pool;
- + /* BPPI virtual base address */
- + void __iomem *bppi_virt_addr;
- + /* BPPI physical base address */
- + dma_addr_t bppi_phys_addr;
- +
- + /* BM pools */
- + struct mvneta_bm_pool *bm_pools;
- +};
- +
- +struct mvneta_bm_pool {
- + /* Pool number in the range 0-3 */
- + u8 id;
- + enum mvneta_bm_type type;
- +
- + /* Buffer Pointers Pool External (BPPE) size in number of bytes */
- + int size;
- + /* Number of buffers used by this pool */
- + int buf_num;
- + /* Pool buffer size */
- + int buf_size;
- + /* Packet size */
- + int pkt_size;
- + /* Single frag size */
- + u32 frag_size;
- +
- + /* BPPE virtual base address */
- + u32 *virt_addr;
- + /* BPPE physical base address */
- + dma_addr_t phys_addr;
- +
- + /* Ports using BM pool */
- + u8 port_map;
- +
- + struct mvneta_bm *priv;
- +};
- +
- +/* Declarations and definitions */
- +void *mvneta_frag_alloc(unsigned int frag_size);
- +void mvneta_frag_free(unsigned int frag_size, void *data);
- +
- +#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE)
- +void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool, u8 port_map);
- +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- + u8 port_map);
- +int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- + int buf_num);
- +int mvneta_bm_pool_refill(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool);
- +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
- + enum mvneta_bm_type type, u8 port_id,
- + int pkt_size);
- +
- +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool,
- + dma_addr_t buf_phys_addr)
- +{
- + writel_relaxed(buf_phys_addr, priv->bppi_virt_addr +
- + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
- +}
- +
- +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool)
- +{
- + return readl_relaxed(priv->bppi_virt_addr +
- + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
- +}
- +#else
- +void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool, u8 port_map) {}
- +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- + u8 port_map) {}
- +int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- + int buf_num) { return 0; }
- +int mvneta_bm_pool_refill(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool) {return 0; }
- +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
- + enum mvneta_bm_type type, u8 port_id,
- + int pkt_size) { return NULL; }
- +
- +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool,
- + dma_addr_t buf_phys_addr) {}
- +
- +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
- + struct mvneta_bm_pool *bm_pool)
- +{ return 0; }
- +#endif /* CONFIG_MVNETA_BM */
- +#endif
|