7016-dpa-add-dpaa_eth-driver.patch 548 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160
  1. From 2af9b49c7e6bad2dee75960ddf61fd52a4d3748f Mon Sep 17 00:00:00 2001
  2. From: Zhao Qiang <qiang.zhao@nxp.com>
  3. Date: Wed, 16 Dec 2015 22:00:36 +0200
  4. Subject: [PATCH 16/70] dpa: add dpaa_eth driver
  5. Dpaa is Datapatch Acceleration Architecture, this architecture provides
  6. the infrastructure to support simplified sharing of networking
  7. interfaces and accelerators by multiple CPUs.
  8. Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
  9. Signed-off-by: Camelia Groza <camelia.groza@freescale.com>
  10. Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
  11. Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
  12. Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
  13. Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
  14. ---
  15. drivers/net/ethernet/freescale/Kconfig | 2 +
  16. drivers/net/ethernet/freescale/Makefile | 1 +
  17. drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 187 ++
  18. drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 59 +
  19. .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++
  20. .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++
  21. .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++
  22. .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
  23. drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1183 +++++++++++
  24. drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 695 +++++++
  25. .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++
  26. .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 +
  27. .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1719 ++++++++++++++++
  28. .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 230 +++
  29. .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1787 ++++++++++++++++
  30. .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 227 +++
  31. .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c | 1735 ++++++++++++++++
  32. .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h | 90 +
  33. .../freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c | 201 ++
  34. .../ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c | 499 +++++
  35. .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c | 2156 ++++++++++++++++++++
  36. .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h | 294 +++
  37. .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++
  38. .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1128 ++++++++++
  39. .../ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c | 914 +++++++++
  40. .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++
  41. .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++
  42. .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 +++++
  43. .../freescale/sdk_dpaa/dpaa_generic_ethtool.c | 286 +++
  44. .../freescale/sdk_dpaa/dpaa_macsec_ethtool.c | 250 +++
  45. drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 287 +++
  46. drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 915 +++++++++
  47. drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 470 +++++
  48. drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 134 ++
  49. .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 ++++++++
  50. .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
  51. 36 files changed, 18957 insertions(+)
  52. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
  53. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
  54. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
  55. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
  56. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
  57. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
  58. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
  59. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
  60. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
  61. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
  62. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
  63. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
  64. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
  65. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
  66. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
  67. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
  68. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
  69. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
  70. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
  71. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
  72. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
  73. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
  74. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
  75. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
  76. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
  77. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
  78. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
  79. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
  80. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
  81. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
  82. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
  83. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
  84. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
  85. create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
  86. --- a/drivers/net/ethernet/freescale/Kconfig
  87. +++ b/drivers/net/ethernet/freescale/Kconfig
  88. @@ -93,4 +93,6 @@ config GIANFAR
  89. on the 8540.
  90. source "drivers/net/ethernet/freescale/sdk_fman/Kconfig"
  91. +source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig"
  92. +
  93. endif # NET_VENDOR_FREESCALE
  94. --- a/drivers/net/ethernet/freescale/Makefile
  95. +++ b/drivers/net/ethernet/freescale/Makefile
  96. @@ -18,3 +18,4 @@ gianfar_driver-objs := gianfar.o \
  97. obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
  98. ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
  99. obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/
  100. +obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/
  101. --- /dev/null
  102. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
  103. @@ -0,0 +1,187 @@
  104. +menuconfig FSL_SDK_DPAA_ETH
  105. + tristate "DPAA Ethernet"
  106. + depends on (FSL_SOC || ARM64 || ARM) && FSL_BMAN && FSL_QMAN && FSL_SDK_FMAN
  107. + select PHYLIB
  108. + ---help---
  109. + Data Path Acceleration Architecture Ethernet driver,
  110. + supporting the Freescale QorIQ chips.
  111. + Depends on Freescale Buffer Manager and Queue Manager
  112. + driver and Frame Manager Driver.
  113. +
  114. +if FSL_SDK_DPAA_ETH
  115. +
  116. +config FSL_DPAA_HOOKS
  117. + bool "DPAA Ethernet driver hooks"
  118. +
  119. +config FSL_DPAA_MACSEC
  120. + tristate "DPAA MACSEC"
  121. + select FSL_DPAA_HOOKS
  122. + ---help---
  123. + Enable MACSEC support in DPAA.
  124. +
  125. +config FSL_DPAA_CEETM
  126. + bool "DPAA CEETM QoS"
  127. + select NET_SCHED
  128. + default n
  129. + ---help---
  130. + Enable QoS offloading support through the CEETM hardware block.
  131. +
  132. +config FSL_DPAA_OFFLINE_PORTS
  133. + bool "Offline Ports support"
  134. + depends on FSL_SDK_DPAA_ETH
  135. + default y
  136. + ---help---
  137. + The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
  138. + most of the functionality of the regular, online ports, except they receive their
  139. + frames from a core or an accelerator on the SoC, via QMan frame queues,
  140. + rather than directly from the network.
  141. + Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
  142. + any online FMan port. They deliver the processed frames to frame queues, according
  143. + to the applied PCD configurations.
  144. +
  145. + Choosing this feature will not impact the functionality and/or performance of the system,
  146. + so it is safe to have it.
  147. +
  148. +config FSL_DPAA_ADVANCED_DRIVERS
  149. + bool "Advanced DPAA Ethernet drivers"
  150. + depends on FSL_SDK_DPAA_ETH
  151. + default y
  152. + ---help---
  153. + Besides the standard DPAA Ethernet driver there are available other flavours
  154. + of DPAA drivers that support advanced scenarios:
  155. + - DPAA Shared MAC driver
  156. + - DPAA MAC-less driver
  157. + - DPAA Proxy initialization driver (for USDPAA)
  158. + Select this to also build the advanced drivers.
  159. +
  160. +config FSL_DPAA_GENERIC_DRIVER
  161. + bool "Generic DPAA Ethernet driver"
  162. + depends on FSL_SDK_DPAA_ETH
  163. + default y
  164. + ---help---
  165. + This enables the DPAA Generic driver (oNIC).
  166. +
  167. +config FSL_DPAA_ETH_JUMBO_FRAME
  168. + bool "Optimize for jumbo frames"
  169. + depends on !ARM64 && !ARM
  170. + default n
  171. + ---help---
  172. + Optimize the DPAA Ethernet driver throughput for large frames
  173. + termination traffic (e.g. 4K and above).
  174. + NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
  175. + is set to 9600 bytes.
  176. + Using this option in combination with small frames increases
  177. + significantly the driver's memory footprint and may even deplete
  178. + the system memory.
  179. + This option is not available on LS1043.
  180. +
  181. +config FSL_DPAA_TS
  182. + bool "Linux compliant timestamping"
  183. + depends on FSL_SDK_DPAA_ETH
  184. + default n
  185. + ---help---
  186. + Enable Linux API compliant timestamping support.
  187. +
  188. +config FSL_DPAA_1588
  189. + bool "IEEE 1588-compliant timestamping"
  190. + depends on FSL_SDK_DPAA_ETH
  191. + select FSL_DPAA_TS
  192. + default n
  193. + ---help---
  194. + Enable IEEE1588 support code.
  195. +
  196. +config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  197. + bool "Use driver's Tx queue selection mechanism"
  198. + default y
  199. + depends on FSL_SDK_DPAA_ETH
  200. + ---help---
  201. + The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
  202. + of the egress FQ. That will override the XPS support for this netdevice.
  203. + If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
  204. + or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
  205. + and use the standard XPS support instead.
  206. +
  207. +config FSL_DPAA_ETH_MAX_BUF_COUNT
  208. + int "Maximum nuber of buffers in private bpool"
  209. + depends on FSL_SDK_DPAA_ETH
  210. + range 64 2048
  211. + default "128"
  212. + ---help---
  213. + The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
  214. + buffer pool. One needn't normally modify this, as it has probably been tuned for performance
  215. + already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
  216. +
  217. +config FSL_DPAA_ETH_REFILL_THRESHOLD
  218. + int "Private bpool refill threshold"
  219. + depends on FSL_SDK_DPAA_ETH
  220. + range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
  221. + default "80"
  222. + ---help---
  223. + The DPAA-Ethernet driver will start replenishing buffer pools whose count
  224. + falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
  225. + modify this value unless one has very specific performance reasons.
  226. +
  227. +config FSL_DPAA_CS_THRESHOLD_1G
  228. + hex "Egress congestion threshold on 1G ports"
  229. + depends on FSL_SDK_DPAA_ETH
  230. + range 0x1000 0x10000000
  231. + default "0x06000000"
  232. + ---help---
  233. + The size in bytes of the egress Congestion State notification threshold on 1G ports.
  234. + The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
  235. + (e.g. by sending UDP datagrams at "while(1) speed"),
  236. + and the larger the frame size, the more acute the problem.
  237. + So we have to find a balance between these factors:
  238. + - avoiding the device staying congested for a prolonged time (risking
  239. + the netdev watchdog to fire - see also the tx_timeout module param);
  240. + - affecting performance of protocols such as TCP, which otherwise
  241. + behave well under the congestion notification mechanism;
  242. + - preventing the Tx cores from tightly-looping (as if the congestion
  243. + threshold was too low to be effective);
  244. + - running out of memory if the CS threshold is set too high.
  245. +
  246. +config FSL_DPAA_CS_THRESHOLD_10G
  247. + hex "Egress congestion threshold on 10G ports"
  248. + depends on FSL_SDK_DPAA_ETH
  249. + range 0x1000 0x20000000
  250. + default "0x10000000"
  251. +
  252. +config FSL_DPAA_INGRESS_CS_THRESHOLD
  253. + hex "Ingress congestion threshold on FMan ports"
  254. + depends on FSL_SDK_DPAA_ETH
  255. + default "0x10000000"
  256. + ---help---
  257. + The size in bytes of the ingress tail-drop threshold on FMan ports.
  258. + Traffic piling up above this value will be rejected by QMan and discarded by FMan.
  259. +
  260. +config FSL_DPAA_ETH_DEBUGFS
  261. + bool "DPAA Ethernet debugfs interface"
  262. + depends on DEBUG_FS && FSL_SDK_DPAA_ETH
  263. + default y
  264. + ---help---
  265. + This option compiles debugfs code for the DPAA Ethernet driver.
  266. +
  267. +config FSL_DPAA_ETH_DEBUG
  268. + bool "DPAA Ethernet Debug Support"
  269. + depends on FSL_SDK_DPAA_ETH
  270. + default n
  271. + ---help---
  272. + This option compiles debug code for the DPAA Ethernet driver.
  273. +
  274. +config FSL_DPAA_DBG_LOOP
  275. + bool "DPAA Ethernet Debug loopback"
  276. + depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  277. + default n
  278. + ---help---
  279. + This option allows to divert all received traffic on a certain interface A towards a
  280. + selected interface B. This option is used to benchmark the HW + Ethernet driver in
  281. + isolation from the Linux networking stack. The loops are controlled by debugfs entries,
  282. + one for each interface. By default all loops are disabled (target value is -1). I.e. to
  283. + change the loop setting for interface 4 and divert all received traffic to interface 5
  284. + write Tx interface number in the receive interface debugfs file:
  285. + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
  286. + 4->-1
  287. + # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
  288. + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
  289. + 4->5
  290. +endif # FSL_SDK_DPAA_ETH
  291. --- /dev/null
  292. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
  293. @@ -0,0 +1,59 @@
  294. +#
  295. +# Makefile for the Freescale Ethernet controllers
  296. +#
  297. +ccflags-y += -DVERSION=\"\"
  298. +#
  299. +# Include netcomm SW specific definitions
  300. +include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
  301. +
  302. +ccflags-y += -I$(NET_DPA)
  303. +
  304. +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
  305. +obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
  306. +
  307. +fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
  308. +ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
  309. +fsl_dpa-objs += dpaa_debugfs.o
  310. +endif
  311. +ifeq ($(CONFIG_FSL_DPAA_1588),y)
  312. +fsl_dpa-objs += dpaa_1588.o
  313. +endif
  314. +ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
  315. +ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
  316. +fsl_dpa-objs += dpaa_eth_ceetm.o
  317. +endif
  318. +
  319. +fsl_mac-objs += mac.o mac-api.o
  320. +
  321. +# Advanced drivers
  322. +ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
  323. +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
  324. +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
  325. +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_shared.o
  326. +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_macless.o
  327. +obj-$(CONFIG_FSL_DPAA_MACSEC) += fsl_dpa_macsec.o
  328. +
  329. +fsl_advanced-objs += dpaa_eth_base.o
  330. +# suport for multiple drivers per kernel module comes in kernel 3.14
  331. +# so we are forced to generate several modules for the advanced drivers
  332. +fsl_proxy-objs += dpaa_eth_proxy.o
  333. +fsl_dpa_shared-objs += dpaa_eth_shared.o
  334. +fsl_dpa_macless-objs += dpaa_eth_macless.o
  335. +fsl_dpa_macsec-objs += dpaa_eth_macsec.o dpaa_macsec_ethtool.o
  336. +
  337. +ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
  338. +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
  339. +
  340. +fsl_oh-objs += offline_port.o
  341. +endif
  342. +endif
  343. +
  344. +# Generic driver
  345. +ifeq ($(CONFIG_FSL_DPAA_GENERIC_DRIVER),y)
  346. +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_generic.o
  347. +
  348. +fsl_generic-objs += dpaa_eth_generic.o dpaa_eth_generic_sysfs.o dpaa_generic_ethtool.o
  349. +endif
  350. +
  351. +# Needed by the tracing framework
  352. +CFLAGS_dpaa_eth.o := -I$(src)
  353. --- /dev/null
  354. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
  355. @@ -0,0 +1,580 @@
  356. +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
  357. + * Copyright (C) 2009 IXXAT Automation, GmbH
  358. + *
  359. + * DPAA Ethernet Driver -- IEEE 1588 interface functionality
  360. + *
  361. + * This program is free software; you can redistribute it and/or modify
  362. + * it under the terms of the GNU General Public License as published by
  363. + * the Free Software Foundation; either version 2 of the License, or
  364. + * (at your option) any later version.
  365. + *
  366. + * This program is distributed in the hope that it will be useful,
  367. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  368. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  369. + * GNU General Public License for more details.
  370. + *
  371. + * You should have received a copy of the GNU General Public License along
  372. + * with this program; if not, write to the Free Software Foundation, Inc.,
  373. + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  374. + *
  375. + */
  376. +#include <linux/io.h>
  377. +#include <linux/device.h>
  378. +#include <linux/fs.h>
  379. +#include <linux/vmalloc.h>
  380. +#include <linux/spinlock.h>
  381. +#include <linux/ip.h>
  382. +#include <linux/ipv6.h>
  383. +#include <linux/udp.h>
  384. +#include <asm/div64.h>
  385. +#include "dpaa_eth.h"
  386. +#include "dpaa_eth_common.h"
  387. +#include "dpaa_1588.h"
  388. +#include "mac.h"
  389. +
  390. +static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
  391. +{
  392. + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
  393. +
  394. + circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
  395. + if (!circ_buf->buf)
  396. + return 1;
  397. +
  398. + circ_buf->head = 0;
  399. + circ_buf->tail = 0;
  400. + ptp_buf->size = size;
  401. + spin_lock_init(&ptp_buf->ptp_lock);
  402. +
  403. + return 0;
  404. +}
  405. +
  406. +static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
  407. +{
  408. + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
  409. +
  410. + circ_buf->head = 0;
  411. + circ_buf->tail = 0;
  412. + ptp_buf->size = size;
  413. +}
  414. +
  415. +static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
  416. + struct dpa_ptp_data *data)
  417. +{
  418. + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
  419. + int size = ptp_buf->size;
  420. + struct dpa_ptp_data *tmp;
  421. + unsigned long flags;
  422. + int head, tail;
  423. +
  424. + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
  425. +
  426. + head = circ_buf->head;
  427. + tail = circ_buf->tail;
  428. +
  429. + if (CIRC_SPACE(head, tail, size) <= 0)
  430. + circ_buf->tail = (tail + 1) & (size - 1);
  431. +
  432. + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
  433. + memcpy(tmp, data, sizeof(struct dpa_ptp_data));
  434. +
  435. + circ_buf->head = (head + 1) & (size - 1);
  436. +
  437. + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
  438. +
  439. + return 0;
  440. +}
  441. +
  442. +static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
  443. + struct dpa_ptp_ident *src)
  444. +{
  445. + int ret;
  446. +
  447. + if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
  448. + return 0;
  449. +
  450. + if ((dst->netw_prot == src->netw_prot)
  451. + || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
  452. + if (dst->seq_id != src->seq_id)
  453. + return 0;
  454. +
  455. + ret = memcmp(dst->snd_port_id, src->snd_port_id,
  456. + DPA_PTP_SOURCE_PORT_LENGTH);
  457. + if (ret)
  458. + return 0;
  459. + else
  460. + return 1;
  461. + }
  462. +
  463. + return 0;
  464. +}
  465. +
  466. +static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
  467. + struct dpa_ptp_ident *ident,
  468. + struct dpa_ptp_time *ts)
  469. +{
  470. + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
  471. + int size = ptp_buf->size;
  472. + int head, tail, idx;
  473. + unsigned long flags;
  474. + struct dpa_ptp_data *tmp, *tmp2;
  475. + struct dpa_ptp_ident *tmp_ident;
  476. +
  477. + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
  478. +
  479. + head = circ_buf->head;
  480. + tail = idx = circ_buf->tail;
  481. +
  482. + if (CIRC_CNT(head, tail, size) == 0) {
  483. + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
  484. + return 1;
  485. + }
  486. +
  487. + while (idx != head) {
  488. + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
  489. + tmp_ident = &tmp->ident;
  490. + if (dpa_ptp_is_ident_match(tmp_ident, ident))
  491. + break;
  492. + idx = (idx + 1) & (size - 1);
  493. + }
  494. +
  495. + if (idx == head) {
  496. + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
  497. + return 1;
  498. + }
  499. +
  500. + ts->sec = tmp->ts.sec;
  501. + ts->nsec = tmp->ts.nsec;
  502. +
  503. + if (idx != tail) {
  504. + if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
  505. + tail = circ_buf->tail =
  506. + (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
  507. + }
  508. +
  509. + while (CIRC_CNT(idx, tail, size) > 0) {
  510. + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
  511. + idx = (idx - 1) & (size - 1);
  512. + tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
  513. + *tmp = *tmp2;
  514. + }
  515. + }
  516. + circ_buf->tail = (tail + 1) & (size - 1);
  517. +
  518. + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
  519. +
  520. + return 0;
  521. +}
  522. +
  523. +/* Parse the PTP packets
  524. + *
  525. + * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
  526. + * an IEEE802.3 ethernet frame. This function returns the position of
  527. + * the PTP packet or NULL if no PTP found
  528. + */
  529. +static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
  530. +{
  531. + u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
  532. + u8 *ptp_loc = NULL;
  533. + u8 msg_type;
  534. + u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
  535. + struct iphdr *iph;
  536. + struct udphdr *udph;
  537. + struct ipv6hdr *ipv6h;
  538. +
  539. + /* when we can receive S/G frames we need to check the data we want to
  540. + * access is in the linear skb buffer
  541. + */
  542. + if (!pskb_may_pull(skb, access_len))
  543. + return NULL;
  544. +
  545. + *eth_type = *((u16 *)pos);
  546. +
  547. + /* Check if inner tag is here */
  548. + if (*eth_type == ETH_P_8021Q) {
  549. + access_len += DPA_VLAN_TAG_LEN;
  550. +
  551. + if (!pskb_may_pull(skb, access_len))
  552. + return NULL;
  553. +
  554. + pos += DPA_VLAN_TAG_LEN;
  555. + *eth_type = *((u16 *)pos);
  556. + }
  557. +
  558. + pos += DPA_ETYPE_LEN;
  559. +
  560. + switch (*eth_type) {
  561. + /* Transport of PTP over Ethernet */
  562. + case ETH_P_1588:
  563. + ptp_loc = pos;
  564. +
  565. + if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
  566. + return NULL;
  567. +
  568. + msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
  569. + if ((msg_type == PTP_MSGTYPE_SYNC)
  570. + || (msg_type == PTP_MSGTYPE_DELREQ)
  571. + || (msg_type == PTP_MSGTYPE_PDELREQ)
  572. + || (msg_type == PTP_MSGTYPE_PDELRESP))
  573. + return ptp_loc;
  574. + break;
  575. + /* Transport of PTP over IPv4 */
  576. + case ETH_P_IP:
  577. + iph = (struct iphdr *)pos;
  578. + access_len += sizeof(struct iphdr);
  579. +
  580. + if (!pskb_may_pull(skb, access_len))
  581. + return NULL;
  582. +
  583. + if (ntohs(iph->protocol) != IPPROTO_UDP)
  584. + return NULL;
  585. +
  586. + access_len += iph->ihl * 4 - sizeof(struct iphdr) +
  587. + sizeof(struct udphdr);
  588. +
  589. + if (!pskb_may_pull(skb, access_len))
  590. + return NULL;
  591. +
  592. + pos += iph->ihl * 4;
  593. + udph = (struct udphdr *)pos;
  594. + if (ntohs(udph->dest) != 319)
  595. + return NULL;
  596. + ptp_loc = pos + sizeof(struct udphdr);
  597. + break;
  598. + /* Transport of PTP over IPv6 */
  599. + case ETH_P_IPV6:
  600. + ipv6h = (struct ipv6hdr *)pos;
  601. +
  602. + access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
  603. +
  604. + if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
  605. + return NULL;
  606. +
  607. + pos += sizeof(struct ipv6hdr);
  608. + udph = (struct udphdr *)pos;
  609. + if (ntohs(udph->dest) != 319)
  610. + return NULL;
  611. + ptp_loc = pos + sizeof(struct udphdr);
  612. + break;
  613. + default:
  614. + break;
  615. + }
  616. +
  617. + return ptp_loc;
  618. +}
  619. +
  620. +static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
  621. + struct sk_buff *skb, void *data, enum port_type rx_tx,
  622. + struct dpa_ptp_data *ptp_data)
  623. +{
  624. + u64 nsec;
  625. + u32 mod;
  626. + u8 *ptp_loc;
  627. + u16 eth_type;
  628. +
  629. + ptp_loc = dpa_ptp_parse_packet(skb, &eth_type);
  630. + if (!ptp_loc)
  631. + return -EINVAL;
  632. +
  633. + switch (eth_type) {
  634. + case ETH_P_IP:
  635. + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
  636. + break;
  637. + case ETH_P_IPV6:
  638. + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
  639. + break;
  640. + case ETH_P_1588:
  641. + ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
  642. + break;
  643. + default:
  644. + return -EINVAL;
  645. + }
  646. +
  647. + if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
  648. + return -EINVAL;
  649. +
  650. + ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
  651. + ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
  652. + ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
  653. + memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
  654. + DPA_PTP_SOURCE_PORT_LENGTH);
  655. +
  656. + nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
  657. + mod = do_div(nsec, NANOSEC_PER_SECOND);
  658. + ptp_data->ts.sec = nsec;
  659. + ptp_data->ts.nsec = mod;
  660. +
  661. + return 0;
  662. +}
  663. +
  664. +void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
  665. + struct sk_buff *skb, void *data)
  666. +{
  667. + struct dpa_ptp_tsu *tsu = priv->tsu;
  668. + struct dpa_ptp_data ptp_tx_data;
  669. +
  670. + if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
  671. + return;
  672. +
  673. + dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
  674. +}
  675. +
  676. +void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
  677. + struct sk_buff *skb, void *data)
  678. +{
  679. + struct dpa_ptp_tsu *tsu = priv->tsu;
  680. + struct dpa_ptp_data ptp_rx_data;
  681. +
  682. + if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
  683. + return;
  684. +
  685. + dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
  686. +}
  687. +
  688. +static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
  689. + struct dpa_ptp_ident *ident,
  690. + struct dpa_ptp_time *ts)
  691. +{
  692. + struct dpa_ptp_tsu *tsu = ptp_tsu;
  693. + struct dpa_ptp_time tmp;
  694. + int flag;
  695. +
  696. + flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
  697. + if (!flag) {
  698. + ts->sec = tmp.sec;
  699. + ts->nsec = tmp.nsec;
  700. + return 0;
  701. + }
  702. +
  703. + return -1;
  704. +}
  705. +
  706. +static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
  707. + struct dpa_ptp_ident *ident,
  708. + struct dpa_ptp_time *ts)
  709. +{
  710. + struct dpa_ptp_tsu *tsu = ptp_tsu;
  711. + struct dpa_ptp_time tmp;
  712. + int flag;
  713. +
  714. + flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
  715. + if (!flag) {
  716. + ts->sec = tmp.sec;
  717. + ts->nsec = tmp.nsec;
  718. + return 0;
  719. + }
  720. +
  721. + return -1;
  722. +}
  723. +
  724. +static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
  725. + struct dpa_ptp_time *cnt_time)
  726. +{
  727. + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
  728. + u64 tmp, fiper;
  729. +
  730. + if (mac_dev->fm_rtc_disable)
  731. + mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
  732. +
  733. + /* TMR_FIPER1 will pulse every second after ALARM1 expired */
  734. + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
  735. + fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
  736. + if (mac_dev->fm_rtc_set_alarm)
  737. + mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
  738. + 0, tmp);
  739. + if (mac_dev->fm_rtc_set_fiper)
  740. + mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
  741. + 0, fiper);
  742. +
  743. + if (mac_dev->fm_rtc_enable)
  744. + mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
  745. +}
  746. +
  747. +static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
  748. + struct dpa_ptp_time *curr_time)
  749. +{
  750. + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
  751. + u64 tmp;
  752. + u32 mod;
  753. +
  754. + if (mac_dev->fm_rtc_get_cnt)
  755. + mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
  756. + &tmp);
  757. +
  758. + mod = do_div(tmp, NANOSEC_PER_SECOND);
  759. + curr_time->sec = (u32)tmp;
  760. + curr_time->nsec = mod;
  761. +}
  762. +
  763. +static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
  764. + struct dpa_ptp_time *cnt_time)
  765. +{
  766. + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
  767. + u64 tmp;
  768. +
  769. + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
  770. +
  771. + if (mac_dev->fm_rtc_set_cnt)
  772. + mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
  773. + tmp);
  774. +
  775. + /* Restart fiper two seconds later */
  776. + cnt_time->sec += 2;
  777. + cnt_time->nsec = 0;
  778. + dpa_set_fiper_alarm(tsu, cnt_time);
  779. +}
  780. +
  781. +static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
  782. +{
  783. + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
  784. + u32 drift;
  785. +
  786. + if (mac_dev->fm_rtc_get_drift)
  787. + mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
  788. + &drift);
  789. +
  790. + *addend = drift;
  791. +}
  792. +
  793. +static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
  794. +{
  795. + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
  796. +
  797. + if (mac_dev->fm_rtc_set_drift)
  798. + mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
  799. + addend);
  800. +}
  801. +
  802. +static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
  803. +{
  804. + dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
  805. + dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
  806. +}
  807. +
  808. +int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
  809. +{
  810. + struct dpa_priv_s *priv = netdev_priv(dev);
  811. + struct dpa_ptp_tsu *tsu = priv->tsu;
  812. + struct mac_device *mac_dev = priv->mac_dev;
  813. + struct dpa_ptp_data ptp_data;
  814. + struct dpa_ptp_data *ptp_data_user;
  815. + struct dpa_ptp_time act_time;
  816. + u32 addend;
  817. + int retval = 0;
  818. +
  819. + if (!tsu || !tsu->valid)
  820. + return -ENODEV;
  821. +
  822. + switch (cmd) {
  823. + case PTP_ENBL_TXTS_IOCTL:
  824. + tsu->hwts_tx_en_ioctl = 1;
  825. + if (mac_dev->fm_rtc_enable)
  826. + mac_dev->fm_rtc_enable(get_fm_handle(dev));
  827. + if (mac_dev->ptp_enable)
  828. + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
  829. + break;
  830. + case PTP_DSBL_TXTS_IOCTL:
  831. + tsu->hwts_tx_en_ioctl = 0;
  832. + if (mac_dev->fm_rtc_disable)
  833. + mac_dev->fm_rtc_disable(get_fm_handle(dev));
  834. + if (mac_dev->ptp_disable)
  835. + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
  836. + break;
  837. + case PTP_ENBL_RXTS_IOCTL:
  838. + tsu->hwts_rx_en_ioctl = 1;
  839. + break;
  840. + case PTP_DSBL_RXTS_IOCTL:
  841. + tsu->hwts_rx_en_ioctl = 0;
  842. + break;
  843. + case PTP_GET_RX_TIMESTAMP:
  844. + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
  845. + if (copy_from_user(&ptp_data.ident,
  846. + &ptp_data_user->ident, sizeof(ptp_data.ident)))
  847. + return -EINVAL;
  848. +
  849. + if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
  850. + return -EAGAIN;
  851. +
  852. + if (copy_to_user((void __user *)&ptp_data_user->ts,
  853. + &ptp_data.ts, sizeof(ptp_data.ts)))
  854. + return -EFAULT;
  855. + break;
  856. + case PTP_GET_TX_TIMESTAMP:
  857. + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
  858. + if (copy_from_user(&ptp_data.ident,
  859. + &ptp_data_user->ident, sizeof(ptp_data.ident)))
  860. + return -EINVAL;
  861. +
  862. + if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
  863. + return -EAGAIN;
  864. +
  865. + if (copy_to_user((void __user *)&ptp_data_user->ts,
  866. + &ptp_data.ts, sizeof(ptp_data.ts)))
  867. + return -EFAULT;
  868. + break;
  869. + case PTP_GET_TIME:
  870. + dpa_get_curr_cnt(tsu, &act_time);
  871. + if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
  872. + return -EFAULT;
  873. + break;
  874. + case PTP_SET_TIME:
  875. + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
  876. + return -EINVAL;
  877. + dpa_set_1588cnt(tsu, &act_time);
  878. + break;
  879. + case PTP_GET_ADJ:
  880. + dpa_get_drift(tsu, &addend);
  881. + if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
  882. + return -EFAULT;
  883. + break;
  884. + case PTP_SET_ADJ:
  885. + if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
  886. + return -EINVAL;
  887. + dpa_set_drift(tsu, addend);
  888. + break;
  889. + case PTP_SET_FIPER_ALARM:
  890. + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
  891. + return -EINVAL;
  892. + dpa_set_fiper_alarm(tsu, &act_time);
  893. + break;
  894. + case PTP_CLEANUP_TS:
  895. + dpa_flush_timestamp(tsu);
  896. + break;
  897. + default:
  898. + return -EINVAL;
  899. + }
  900. +
  901. + return retval;
  902. +}
  903. +
  904. +int dpa_ptp_init(struct dpa_priv_s *priv)
  905. +{
  906. + struct dpa_ptp_tsu *tsu;
  907. +
  908. + /* Allocate memory for PTP structure */
  909. + tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
  910. + if (!tsu)
  911. + return -ENOMEM;
  912. +
  913. + tsu->valid = TRUE;
  914. + tsu->dpa_priv = priv;
  915. +
  916. + dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
  917. + dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
  918. +
  919. + priv->tsu = tsu;
  920. +
  921. + return 0;
  922. +}
  923. +EXPORT_SYMBOL(dpa_ptp_init);
  924. +
  925. +void dpa_ptp_cleanup(struct dpa_priv_s *priv)
  926. +{
  927. + struct dpa_ptp_tsu *tsu = priv->tsu;
  928. +
  929. + tsu->valid = FALSE;
  930. + vfree(tsu->rx_timestamps.circ_buf.buf);
  931. + vfree(tsu->tx_timestamps.circ_buf.buf);
  932. +
  933. + kfree(tsu);
  934. +}
  935. +EXPORT_SYMBOL(dpa_ptp_cleanup);
  936. --- /dev/null
  937. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
  938. @@ -0,0 +1,138 @@
  939. +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
  940. + *
  941. + * This program is free software; you can redistribute it and/or modify
  942. + * it under the terms of the GNU General Public License as published by
  943. + * the Free Software Foundation; either version 2 of the License, or
  944. + * (at your option) any later version.
  945. + *
  946. + * This program is distributed in the hope that it will be useful,
  947. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  948. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  949. + * GNU General Public License for more details.
  950. + *
  951. + * You should have received a copy of the GNU General Public License along
  952. + * with this program; if not, write to the Free Software Foundation, Inc.,
  953. + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  954. + *
  955. + */
  956. +#ifndef __DPAA_1588_H__
  957. +#define __DPAA_1588_H__
  958. +
  959. +#include <linux/netdevice.h>
  960. +#include <linux/etherdevice.h>
  961. +#include <linux/circ_buf.h>
  962. +#include <linux/fsl_qman.h>
  963. +
  964. +#define DEFAULT_PTP_RX_BUF_SZ 256
  965. +#define DEFAULT_PTP_TX_BUF_SZ 256
  966. +
  967. +/* 1588 private ioctl calls */
  968. +#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
  969. +#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
  970. +#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
  971. +#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
  972. +#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
  973. +#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
  974. +#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
  975. +#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
  976. +#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
  977. +#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
  978. +#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
  979. +#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
  980. +
  981. +/* PTP V2 message type */
  982. +enum {
  983. + PTP_MSGTYPE_SYNC = 0x0,
  984. + PTP_MSGTYPE_DELREQ = 0x1,
  985. + PTP_MSGTYPE_PDELREQ = 0x2,
  986. + PTP_MSGTYPE_PDELRESP = 0x3,
  987. + PTP_MSGTYPE_FLWUP = 0x8,
  988. + PTP_MSGTYPE_DELRESP = 0x9,
  989. + PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
  990. + PTP_MSGTYPE_ANNOUNCE = 0xB,
  991. + PTP_MSGTYPE_SGNLNG = 0xC,
  992. + PTP_MSGTYPE_MNGMNT = 0xD,
  993. +};
  994. +
  995. +/* Byte offset of data in the PTP V2 headers */
  996. +#define PTP_OFFS_MSG_TYPE 0
  997. +#define PTP_OFFS_VER_PTP 1
  998. +#define PTP_OFFS_MSG_LEN 2
  999. +#define PTP_OFFS_DOM_NMB 4
  1000. +#define PTP_OFFS_FLAGS 6
  1001. +#define PTP_OFFS_CORFIELD 8
  1002. +#define PTP_OFFS_SRCPRTID 20
  1003. +#define PTP_OFFS_SEQ_ID 30
  1004. +#define PTP_OFFS_CTRL 32
  1005. +#define PTP_OFFS_LOGMEAN 33
  1006. +
  1007. +#define PTP_IP_OFFS 14
  1008. +#define PTP_UDP_OFFS 34
  1009. +#define PTP_HEADER_OFFS 42
  1010. +#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
  1011. +#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
  1012. +#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
  1013. +#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
  1014. +
  1015. +/* 1588-2008 network protocol enumeration values */
  1016. +#define DPA_PTP_PROT_IPV4 1
  1017. +#define DPA_PTP_PROT_IPV6 2
  1018. +#define DPA_PTP_PROT_802_3 3
  1019. +#define DPA_PTP_PROT_DONTCARE 0xFFFF
  1020. +
  1021. +#define DPA_PTP_SOURCE_PORT_LENGTH 10
  1022. +#define DPA_PTP_HEADER_SZE 34
  1023. +#define DPA_ETYPE_LEN 2
  1024. +#define DPA_VLAN_TAG_LEN 4
  1025. +#define NANOSEC_PER_SECOND 1000000000
  1026. +
  1027. +/* The threshold between the current found one and the oldest one */
  1028. +#define TS_ACCUMULATION_THRESHOLD 50
  1029. +
  1030. +/* Struct needed to identify a timestamp */
  1031. +struct dpa_ptp_ident {
  1032. + u8 version;
  1033. + u8 msg_type;
  1034. + u16 netw_prot;
  1035. + u16 seq_id;
  1036. + u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
  1037. +};
  1038. +
  1039. +/* Timestamp format in 1588-2008 */
  1040. +struct dpa_ptp_time {
  1041. + u64 sec; /* just 48 bit used */
  1042. + u32 nsec;
  1043. +};
  1044. +
  1045. +/* needed for timestamp data over ioctl */
  1046. +struct dpa_ptp_data {
  1047. + struct dpa_ptp_ident ident;
  1048. + struct dpa_ptp_time ts;
  1049. +};
  1050. +
  1051. +struct dpa_ptp_circ_buf {
  1052. + struct circ_buf circ_buf;
  1053. + u32 size;
  1054. + spinlock_t ptp_lock;
  1055. +};
  1056. +
  1057. +/* PTP TSU control structure */
  1058. +struct dpa_ptp_tsu {
  1059. + struct dpa_priv_s *dpa_priv;
  1060. + bool valid;
  1061. + struct dpa_ptp_circ_buf rx_timestamps;
  1062. + struct dpa_ptp_circ_buf tx_timestamps;
  1063. +
  1064. + /* HW timestamping over ioctl enabled flag */
  1065. + int hwts_tx_en_ioctl;
  1066. + int hwts_rx_en_ioctl;
  1067. +};
  1068. +
  1069. +extern int dpa_ptp_init(struct dpa_priv_s *priv);
  1070. +extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
  1071. +extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
  1072. + struct sk_buff *skb, void *data);
  1073. +extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
  1074. + struct sk_buff *skb, void *data);
  1075. +extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
  1076. +#endif
  1077. --- /dev/null
  1078. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
  1079. @@ -0,0 +1,180 @@
  1080. +/* Copyright 2008-2013 Freescale Semiconductor Inc.
  1081. + *
  1082. + * Redistribution and use in source and binary forms, with or without
  1083. + * modification, are permitted provided that the following conditions are met:
  1084. + * * Redistributions of source code must retain the above copyright
  1085. + * notice, this list of conditions and the following disclaimer.
  1086. + * * Redistributions in binary form must reproduce the above copyright
  1087. + * notice, this list of conditions and the following disclaimer in the
  1088. + * documentation and/or other materials provided with the distribution.
  1089. + * * Neither the name of Freescale Semiconductor nor the
  1090. + * names of its contributors may be used to endorse or promote products
  1091. + * derived from this software without specific prior written permission.
  1092. + *
  1093. + *
  1094. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1095. + * GNU General Public License ("GPL") as published by the Free Software
  1096. + * Foundation, either version 2 of that License or (at your option) any
  1097. + * later version.
  1098. + *
  1099. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  1100. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1101. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1102. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  1103. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1104. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1105. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1106. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1107. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1108. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1109. + */
  1110. +
  1111. +#include <linux/module.h>
  1112. +#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
  1113. +#include <linux/debugfs.h>
  1114. +#include "dpaa_debugfs.h"
  1115. +#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
  1116. +
  1117. +#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
  1118. +#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
  1119. +
  1120. +static struct dentry *dpa_debugfs_root;
  1121. +
  1122. +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
  1123. +static ssize_t dpa_loop_write(struct file *f,
  1124. + const char __user *buf, size_t count, loff_t *off);
  1125. +
  1126. +static const struct file_operations dpa_debugfs_lp_fops = {
  1127. + .open = dpa_debugfs_loop_open,
  1128. + .write = dpa_loop_write,
  1129. + .read = seq_read,
  1130. + .llseek = seq_lseek,
  1131. + .release = single_release,
  1132. +};
  1133. +
  1134. +static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
  1135. +{
  1136. + struct dpa_priv_s *priv;
  1137. +
  1138. + BUG_ON(offset == NULL);
  1139. +
  1140. + priv = netdev_priv((struct net_device *)file->private);
  1141. + seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
  1142. +
  1143. + return 0;
  1144. +}
  1145. +
  1146. +static int user_input_convert(const char __user *user_buf, size_t count,
  1147. + long *val)
  1148. +{
  1149. + char buf[12];
  1150. +
  1151. + if (count > sizeof(buf) - 1)
  1152. + return -EINVAL;
  1153. + if (copy_from_user(buf, user_buf, count))
  1154. + return -EFAULT;
  1155. + buf[count] = '\0';
  1156. + if (kstrtol(buf, 0, val))
  1157. + return -EINVAL;
  1158. + return 0;
  1159. +}
  1160. +
  1161. +static ssize_t dpa_loop_write(struct file *f,
  1162. + const char __user *buf, size_t count, loff_t *off)
  1163. +{
  1164. + struct dpa_priv_s *priv;
  1165. + struct net_device *netdev;
  1166. + struct seq_file *sf;
  1167. + int ret;
  1168. + long val;
  1169. +
  1170. + ret = user_input_convert(buf, count, &val);
  1171. + if (ret)
  1172. + return ret;
  1173. +
  1174. + sf = (struct seq_file *)f->private_data;
  1175. + netdev = (struct net_device *)sf->private;
  1176. + priv = netdev_priv(netdev);
  1177. +
  1178. + priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
  1179. +
  1180. + return count;
  1181. +}
  1182. +
  1183. +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
  1184. +{
  1185. + int _errno;
  1186. + const struct net_device *net_dev;
  1187. +
  1188. + _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
  1189. + if (unlikely(_errno < 0)) {
  1190. + net_dev = (struct net_device *)inode->i_private;
  1191. +
  1192. + if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
  1193. + netdev_err(net_dev, "single_open() = %d\n",
  1194. + _errno);
  1195. + }
  1196. +
  1197. + return _errno;
  1198. +}
  1199. +
  1200. +
  1201. +int dpa_netdev_debugfs_create(struct net_device *net_dev)
  1202. +{
  1203. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  1204. + static int cnt;
  1205. + char loop_file_name[100];
  1206. +
  1207. + if (unlikely(dpa_debugfs_root == NULL)) {
  1208. + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
  1209. + KBUILD_BASENAME".c", __LINE__, __func__,
  1210. + "root debugfs missing, possible module ordering issue");
  1211. + return -ENOMEM;
  1212. + }
  1213. +
  1214. + sprintf(loop_file_name, "eth%d_loop", ++cnt);
  1215. + priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
  1216. + S_IRUGO,
  1217. + dpa_debugfs_root,
  1218. + net_dev,
  1219. + &dpa_debugfs_lp_fops);
  1220. + if (unlikely(priv->debugfs_loop_file == NULL)) {
  1221. + netdev_err(net_dev, "debugfs_create_file(%s/%s)",
  1222. + dpa_debugfs_root->d_iname,
  1223. + loop_file_name);
  1224. +
  1225. + return -ENOMEM;
  1226. + }
  1227. + return 0;
  1228. +}
  1229. +
  1230. +void dpa_netdev_debugfs_remove(struct net_device *net_dev)
  1231. +{
  1232. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  1233. +
  1234. + debugfs_remove(priv->debugfs_loop_file);
  1235. +}
  1236. +
  1237. +int __init dpa_debugfs_module_init(void)
  1238. +{
  1239. + int _errno = 0;
  1240. +
  1241. + pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
  1242. +
  1243. + dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
  1244. +
  1245. + if (unlikely(dpa_debugfs_root == NULL)) {
  1246. + _errno = -ENOMEM;
  1247. + pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
  1248. + KBUILD_BASENAME".c", __LINE__, __func__);
  1249. + pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
  1250. + DPA_ETH_DEBUGFS_ROOT, _errno);
  1251. + }
  1252. +
  1253. + return _errno;
  1254. +}
  1255. +
  1256. +void __exit dpa_debugfs_module_exit(void)
  1257. +{
  1258. + debugfs_remove(dpa_debugfs_root);
  1259. +}
  1260. --- /dev/null
  1261. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
  1262. @@ -0,0 +1,43 @@
  1263. +/* Copyright 2008-2013 Freescale Semiconductor Inc.
  1264. + *
  1265. + * Redistribution and use in source and binary forms, with or without
  1266. + * modification, are permitted provided that the following conditions are met:
  1267. + * * Redistributions of source code must retain the above copyright
  1268. + * notice, this list of conditions and the following disclaimer.
  1269. + * * Redistributions in binary form must reproduce the above copyright
  1270. + * notice, this list of conditions and the following disclaimer in the
  1271. + * documentation and/or other materials provided with the distribution.
  1272. + * * Neither the name of Freescale Semiconductor nor the
  1273. + * names of its contributors may be used to endorse or promote products
  1274. + * derived from this software without specific prior written permission.
  1275. + *
  1276. + *
  1277. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1278. + * GNU General Public License ("GPL") as published by the Free Software
  1279. + * Foundation, either version 2 of that License or (at your option) any
  1280. + * later version.
  1281. + *
  1282. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  1283. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1284. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1285. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  1286. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1287. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1288. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1289. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1290. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1291. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1292. + */
  1293. +
  1294. +#ifndef DPAA_DEBUGFS_H_
  1295. +#define DPAA_DEBUGFS_H_
  1296. +
  1297. +#include <linux/netdevice.h>
  1298. +#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
  1299. +
  1300. +int dpa_netdev_debugfs_create(struct net_device *net_dev);
  1301. +void dpa_netdev_debugfs_remove(struct net_device *net_dev);
  1302. +int __init dpa_debugfs_module_init(void);
  1303. +void __exit dpa_debugfs_module_exit(void);
  1304. +
  1305. +#endif /* DPAA_DEBUGFS_H_ */
  1306. --- /dev/null
  1307. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
  1308. @@ -0,0 +1,1183 @@
  1309. +/* Copyright 2008-2013 Freescale Semiconductor Inc.
  1310. + *
  1311. + * Redistribution and use in source and binary forms, with or without
  1312. + * modification, are permitted provided that the following conditions are met:
  1313. + * * Redistributions of source code must retain the above copyright
  1314. + * notice, this list of conditions and the following disclaimer.
  1315. + * * Redistributions in binary form must reproduce the above copyright
  1316. + * notice, this list of conditions and the following disclaimer in the
  1317. + * documentation and/or other materials provided with the distribution.
  1318. + * * Neither the name of Freescale Semiconductor nor the
  1319. + * names of its contributors may be used to endorse or promote products
  1320. + * derived from this software without specific prior written permission.
  1321. + *
  1322. + *
  1323. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1324. + * GNU General Public License ("GPL") as published by the Free Software
  1325. + * Foundation, either version 2 of that License or (at your option) any
  1326. + * later version.
  1327. + *
  1328. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  1329. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1330. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1331. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  1332. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1333. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1334. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1335. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1336. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1337. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1338. + */
  1339. +
  1340. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  1341. +#define pr_fmt(fmt) \
  1342. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  1343. + KBUILD_BASENAME".c", __LINE__, __func__
  1344. +#else
  1345. +#define pr_fmt(fmt) \
  1346. + KBUILD_MODNAME ": " fmt
  1347. +#endif
  1348. +
  1349. +#include <linux/init.h>
  1350. +#include <linux/module.h>
  1351. +#include <linux/of_mdio.h>
  1352. +#include <linux/of_net.h>
  1353. +#include <linux/kthread.h>
  1354. +#include <linux/io.h>
  1355. +#include <linux/if_arp.h> /* arp_hdr_len() */
  1356. +#include <linux/if_vlan.h> /* VLAN_HLEN */
  1357. +#include <linux/icmp.h> /* struct icmphdr */
  1358. +#include <linux/ip.h> /* struct iphdr */
  1359. +#include <linux/ipv6.h> /* struct ipv6hdr */
  1360. +#include <linux/udp.h> /* struct udphdr */
  1361. +#include <linux/tcp.h> /* struct tcphdr */
  1362. +#include <linux/net.h> /* net_ratelimit() */
  1363. +#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
  1364. +#include <linux/highmem.h>
  1365. +#include <linux/percpu.h>
  1366. +#include <linux/dma-mapping.h>
  1367. +#include <linux/fsl_bman.h>
  1368. +
  1369. +#include "fsl_fman.h"
  1370. +#include "fm_ext.h"
  1371. +#include "fm_port_ext.h"
  1372. +
  1373. +#include "mac.h"
  1374. +#include "dpaa_eth.h"
  1375. +#include "dpaa_eth_common.h"
  1376. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  1377. +#include "dpaa_debugfs.h"
  1378. +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
  1379. +
  1380. +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
  1381. + * using trace events only need to #include <trace/events/sched.h>
  1382. + */
  1383. +#define CREATE_TRACE_POINTS
  1384. +#include "dpaa_eth_trace.h"
  1385. +
  1386. +#define DPA_NAPI_WEIGHT 64
  1387. +
  1388. +/* Valid checksum indication */
  1389. +#define DPA_CSUM_VALID 0xFFFF
  1390. +
  1391. +#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
  1392. +
  1393. +MODULE_LICENSE("Dual BSD/GPL");
  1394. +
  1395. +MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
  1396. +
  1397. +MODULE_DESCRIPTION(DPA_DESCRIPTION);
  1398. +
  1399. +static uint8_t debug = -1;
  1400. +module_param(debug, byte, S_IRUGO);
  1401. +MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
  1402. +
  1403. +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
  1404. +static uint16_t tx_timeout = 1000;
  1405. +module_param(tx_timeout, ushort, S_IRUGO);
  1406. +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
  1407. +
  1408. +static const char rtx[][3] = {
  1409. + [RX] = "RX",
  1410. + [TX] = "TX"
  1411. +};
  1412. +
  1413. +/* BM */
  1414. +
  1415. +#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
  1416. +
  1417. +static uint8_t dpa_priv_common_bpid;
  1418. +
  1419. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  1420. +struct net_device *dpa_loop_netdevs[20];
  1421. +#endif
  1422. +
  1423. +#ifdef CONFIG_PM
  1424. +
  1425. +static int dpaa_suspend(struct device *dev)
  1426. +{
  1427. + struct net_device *net_dev;
  1428. + struct dpa_priv_s *priv;
  1429. + struct mac_device *mac_dev;
  1430. + int err = 0;
  1431. +
  1432. + net_dev = dev_get_drvdata(dev);
  1433. +
  1434. + if (net_dev->flags & IFF_UP) {
  1435. + priv = netdev_priv(net_dev);
  1436. + mac_dev = priv->mac_dev;
  1437. +
  1438. + if (priv->wol & DPAA_WOL_MAGIC) {
  1439. + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
  1440. + priv->mac_dev->get_mac_handle(mac_dev), true);
  1441. + if (err) {
  1442. + netdev_err(net_dev, "set_wol() = %d\n", err);
  1443. + goto set_wol_failed;
  1444. + }
  1445. + }
  1446. +
  1447. + err = fm_port_suspend(mac_dev->port_dev[RX]);
  1448. + if (err) {
  1449. + netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
  1450. + goto rx_port_suspend_failed;
  1451. + }
  1452. +
  1453. + err = fm_port_suspend(mac_dev->port_dev[TX]);
  1454. + if (err) {
  1455. + netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
  1456. + goto tx_port_suspend_failed;
  1457. + }
  1458. + }
  1459. +
  1460. + return 0;
  1461. +
  1462. +tx_port_suspend_failed:
  1463. + fm_port_resume(mac_dev->port_dev[RX]);
  1464. +rx_port_suspend_failed:
  1465. + if (priv->wol & DPAA_WOL_MAGIC) {
  1466. + priv->mac_dev->set_wol(mac_dev->port_dev[RX],
  1467. + priv->mac_dev->get_mac_handle(mac_dev), false);
  1468. + }
  1469. +set_wol_failed:
  1470. + return err;
  1471. +}
  1472. +
  1473. +static int dpaa_resume(struct device *dev)
  1474. +{
  1475. + struct net_device *net_dev;
  1476. + struct dpa_priv_s *priv;
  1477. + struct mac_device *mac_dev;
  1478. + int err = 0;
  1479. +
  1480. + net_dev = dev_get_drvdata(dev);
  1481. +
  1482. + if (net_dev->flags & IFF_UP) {
  1483. + priv = netdev_priv(net_dev);
  1484. + mac_dev = priv->mac_dev;
  1485. +
  1486. + err = fm_port_resume(mac_dev->port_dev[TX]);
  1487. + if (err) {
  1488. + netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
  1489. + goto resume_failed;
  1490. + }
  1491. +
  1492. + err = fm_port_resume(mac_dev->port_dev[RX]);
  1493. + if (err) {
  1494. + netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
  1495. + goto resume_failed;
  1496. + }
  1497. +
  1498. + if (priv->wol & DPAA_WOL_MAGIC) {
  1499. + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
  1500. + priv->mac_dev->get_mac_handle(mac_dev), false);
  1501. + if (err) {
  1502. + netdev_err(net_dev, "set_wol() = %d\n", err);
  1503. + goto resume_failed;
  1504. + }
  1505. + }
  1506. + }
  1507. +
  1508. + return 0;
  1509. +
  1510. +resume_failed:
  1511. + return err;
  1512. +}
  1513. +
  1514. +static const struct dev_pm_ops dpaa_pm_ops = {
  1515. + .suspend = dpaa_suspend,
  1516. + .resume = dpaa_resume,
  1517. +};
  1518. +
  1519. +#define DPAA_PM_OPS (&dpaa_pm_ops)
  1520. +
  1521. +#else /* CONFIG_PM */
  1522. +
  1523. +#define DPAA_PM_OPS NULL
  1524. +
  1525. +#endif /* CONFIG_PM */
  1526. +
  1527. +/* Checks whether the checksum field in Parse Results array is valid
  1528. + * (equals 0xFFFF) and increments the .cse counter otherwise
  1529. + */
  1530. +static inline void
  1531. +dpa_csum_validation(const struct dpa_priv_s *priv,
  1532. + struct dpa_percpu_priv_s *percpu_priv,
  1533. + const struct qm_fd *fd)
  1534. +{
  1535. + dma_addr_t addr = qm_fd_addr(fd);
  1536. + struct dpa_bp *dpa_bp = priv->dpa_bp;
  1537. + void *frm = phys_to_virt(addr);
  1538. + fm_prs_result_t *parse_result;
  1539. +
  1540. + if (unlikely(!frm))
  1541. + return;
  1542. +
  1543. + dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
  1544. + DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
  1545. +
  1546. + parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
  1547. +
  1548. + if (parse_result->cksum != DPA_CSUM_VALID)
  1549. + percpu_priv->rx_errors.cse++;
  1550. +}
  1551. +
  1552. +static void _dpa_rx_error(struct net_device *net_dev,
  1553. + const struct dpa_priv_s *priv,
  1554. + struct dpa_percpu_priv_s *percpu_priv,
  1555. + const struct qm_fd *fd,
  1556. + u32 fqid)
  1557. +{
  1558. + /* limit common, possibly innocuous Rx FIFO Overflow errors'
  1559. + * interference with zero-loss convergence benchmark results.
  1560. + */
  1561. + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
  1562. + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
  1563. + else
  1564. + if (netif_msg_hw(priv) && net_ratelimit())
  1565. + netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
  1566. + fd->status & FM_FD_STAT_RX_ERRORS);
  1567. +#ifdef CONFIG_FSL_DPAA_HOOKS
  1568. + if (dpaa_eth_hooks.rx_error &&
  1569. + dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
  1570. + /* it's up to the hook to perform resource cleanup */
  1571. + return;
  1572. +#endif
  1573. + percpu_priv->stats.rx_errors++;
  1574. +
  1575. + if (fd->status & FM_PORT_FRM_ERR_DMA)
  1576. + percpu_priv->rx_errors.dme++;
  1577. + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
  1578. + percpu_priv->rx_errors.fpe++;
  1579. + if (fd->status & FM_PORT_FRM_ERR_SIZE)
  1580. + percpu_priv->rx_errors.fse++;
  1581. + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
  1582. + percpu_priv->rx_errors.phe++;
  1583. + if (fd->status & FM_FD_STAT_L4CV)
  1584. + dpa_csum_validation(priv, percpu_priv, fd);
  1585. +
  1586. + dpa_fd_release(net_dev, fd);
  1587. +}
  1588. +
  1589. +static void _dpa_tx_error(struct net_device *net_dev,
  1590. + const struct dpa_priv_s *priv,
  1591. + struct dpa_percpu_priv_s *percpu_priv,
  1592. + const struct qm_fd *fd,
  1593. + u32 fqid)
  1594. +{
  1595. + struct sk_buff *skb;
  1596. +
  1597. + if (netif_msg_hw(priv) && net_ratelimit())
  1598. + netdev_warn(net_dev, "FD status = 0x%08x\n",
  1599. + fd->status & FM_FD_STAT_TX_ERRORS);
  1600. +#ifdef CONFIG_FSL_DPAA_HOOKS
  1601. + if (dpaa_eth_hooks.tx_error &&
  1602. + dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
  1603. + /* now the hook must ensure proper cleanup */
  1604. + return;
  1605. +#endif
  1606. + percpu_priv->stats.tx_errors++;
  1607. +
  1608. + /* If we intended the buffers from this frame to go into the bpools
  1609. + * when the FMan transmit was done, we need to put it in manually.
  1610. + */
  1611. + if (fd->bpid != 0xff) {
  1612. + dpa_fd_release(net_dev, fd);
  1613. + return;
  1614. + }
  1615. +
  1616. + skb = _dpa_cleanup_tx_fd(priv, fd);
  1617. + dev_kfree_skb(skb);
  1618. +}
  1619. +
  1620. +/* Helper function to factor out frame validation logic on all Rx paths. Its
  1621. + * purpose is to extract from the Parse Results structure information about
  1622. + * the integrity of the frame, its checksum, the length of the parsed headers
  1623. + * and whether the frame is suitable for GRO.
  1624. + *
  1625. + * Assumes no parser errors, since any error frame is dropped before this
  1626. + * function is called.
  1627. + *
  1628. + * @skb will have its ip_summed field overwritten;
  1629. + * @use_gro will only be written with 0, if the frame is definitely not
  1630. + * GRO-able; otherwise, it will be left unchanged;
  1631. + * @hdr_size will be written with a safe value, at least the size of the
  1632. + * headers' length.
  1633. + */
  1634. +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
  1635. + const struct qm_fd *fd,
  1636. + struct sk_buff *skb, int *use_gro)
  1637. +{
  1638. + if (fd->status & FM_FD_STAT_L4CV) {
  1639. + /* The parser has run and performed L4 checksum validation.
  1640. + * We know there were no parser errors (and implicitly no
  1641. + * L4 csum error), otherwise we wouldn't be here.
  1642. + */
  1643. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  1644. +
  1645. + /* Don't go through GRO for certain types of traffic that
  1646. + * we know are not GRO-able, such as dgram-based protocols.
  1647. + * In the worst-case scenarios, such as small-pkt terminating
  1648. + * UDP, the extra GRO processing would be overkill.
  1649. + *
  1650. + * The only protocol the Parser supports that is also GRO-able
  1651. + * is currently TCP.
  1652. + */
  1653. + if (!fm_l4_frame_is_tcp(parse_results))
  1654. + *use_gro = 0;
  1655. +
  1656. + return;
  1657. + }
  1658. +
  1659. + /* We're here because either the parser didn't run or the L4 checksum
  1660. + * was not verified. This may include the case of a UDP frame with
  1661. + * checksum zero or an L4 proto other than TCP/UDP
  1662. + */
  1663. + skb->ip_summed = CHECKSUM_NONE;
  1664. +
  1665. + /* Bypass GRO for unknown traffic or if no PCDs are applied */
  1666. + *use_gro = 0;
  1667. +}
  1668. +
  1669. +int dpaa_eth_poll(struct napi_struct *napi, int budget)
  1670. +{
  1671. + struct dpa_napi_portal *np =
  1672. + container_of(napi, struct dpa_napi_portal, napi);
  1673. +
  1674. + int cleaned = qman_p_poll_dqrr(np->p, budget);
  1675. +
  1676. + if (cleaned < budget) {
  1677. + int tmp;
  1678. + napi_complete(napi);
  1679. + tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
  1680. + DPA_BUG_ON(tmp);
  1681. + }
  1682. +
  1683. + return cleaned;
  1684. +}
  1685. +EXPORT_SYMBOL(dpaa_eth_poll);
  1686. +
  1687. +static void __hot _dpa_tx_conf(struct net_device *net_dev,
  1688. + const struct dpa_priv_s *priv,
  1689. + struct dpa_percpu_priv_s *percpu_priv,
  1690. + const struct qm_fd *fd,
  1691. + u32 fqid)
  1692. +{
  1693. + struct sk_buff *skb;
  1694. +
  1695. + /* do we need the timestamp for the error frames? */
  1696. +
  1697. + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
  1698. + if (netif_msg_hw(priv) && net_ratelimit())
  1699. + netdev_warn(net_dev, "FD status = 0x%08x\n",
  1700. + fd->status & FM_FD_STAT_TX_ERRORS);
  1701. +
  1702. + percpu_priv->stats.tx_errors++;
  1703. + }
  1704. +
  1705. + /* hopefully we need not get the timestamp before the hook */
  1706. +#ifdef CONFIG_FSL_DPAA_HOOKS
  1707. + if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
  1708. + fd, fqid) == DPAA_ETH_STOLEN)
  1709. + /* it's the hook that must now perform cleanup */
  1710. + return;
  1711. +#endif
  1712. + /* This might not perfectly reflect the reality, if the core dequeuing
  1713. + * the Tx confirmation is different from the one that did the enqueue,
  1714. + * but at least it'll show up in the total count.
  1715. + */
  1716. + percpu_priv->tx_confirm++;
  1717. +
  1718. + skb = _dpa_cleanup_tx_fd(priv, fd);
  1719. +
  1720. + dev_kfree_skb(skb);
  1721. +}
  1722. +
  1723. +enum qman_cb_dqrr_result
  1724. +priv_rx_error_dqrr(struct qman_portal *portal,
  1725. + struct qman_fq *fq,
  1726. + const struct qm_dqrr_entry *dq)
  1727. +{
  1728. + struct net_device *net_dev;
  1729. + struct dpa_priv_s *priv;
  1730. + struct dpa_percpu_priv_s *percpu_priv;
  1731. + int *count_ptr;
  1732. +
  1733. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  1734. + priv = netdev_priv(net_dev);
  1735. +
  1736. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  1737. + count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
  1738. +
  1739. + if (dpaa_eth_napi_schedule(percpu_priv, portal))
  1740. + return qman_cb_dqrr_stop;
  1741. +
  1742. + if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
  1743. + /* Unable to refill the buffer pool due to insufficient
  1744. + * system memory. Just release the frame back into the pool,
  1745. + * otherwise we'll soon end up with an empty buffer pool.
  1746. + */
  1747. + dpa_fd_release(net_dev, &dq->fd);
  1748. + else
  1749. + _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
  1750. +
  1751. + return qman_cb_dqrr_consume;
  1752. +}
  1753. +
  1754. +
  1755. +enum qman_cb_dqrr_result __hot
  1756. +priv_rx_default_dqrr(struct qman_portal *portal,
  1757. + struct qman_fq *fq,
  1758. + const struct qm_dqrr_entry *dq)
  1759. +{
  1760. + struct net_device *net_dev;
  1761. + struct dpa_priv_s *priv;
  1762. + struct dpa_percpu_priv_s *percpu_priv;
  1763. + int *count_ptr;
  1764. + struct dpa_bp *dpa_bp;
  1765. +
  1766. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  1767. + priv = netdev_priv(net_dev);
  1768. + dpa_bp = priv->dpa_bp;
  1769. +
  1770. + /* Trace the Rx fd */
  1771. + trace_dpa_rx_fd(net_dev, fq, &dq->fd);
  1772. +
  1773. + /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
  1774. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  1775. + count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
  1776. +
  1777. + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
  1778. + return qman_cb_dqrr_stop;
  1779. +
  1780. + /* Vale of plenty: make sure we didn't run out of buffers */
  1781. +
  1782. + if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
  1783. + /* Unable to refill the buffer pool due to insufficient
  1784. + * system memory. Just release the frame back into the pool,
  1785. + * otherwise we'll soon end up with an empty buffer pool.
  1786. + */
  1787. + dpa_fd_release(net_dev, &dq->fd);
  1788. + else
  1789. + _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
  1790. + count_ptr);
  1791. +
  1792. + return qman_cb_dqrr_consume;
  1793. +}
  1794. +
  1795. +enum qman_cb_dqrr_result
  1796. +priv_tx_conf_error_dqrr(struct qman_portal *portal,
  1797. + struct qman_fq *fq,
  1798. + const struct qm_dqrr_entry *dq)
  1799. +{
  1800. + struct net_device *net_dev;
  1801. + struct dpa_priv_s *priv;
  1802. + struct dpa_percpu_priv_s *percpu_priv;
  1803. +
  1804. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  1805. + priv = netdev_priv(net_dev);
  1806. +
  1807. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  1808. +
  1809. + if (dpaa_eth_napi_schedule(percpu_priv, portal))
  1810. + return qman_cb_dqrr_stop;
  1811. +
  1812. + _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
  1813. +
  1814. + return qman_cb_dqrr_consume;
  1815. +}
  1816. +
  1817. +enum qman_cb_dqrr_result __hot
  1818. +priv_tx_conf_default_dqrr(struct qman_portal *portal,
  1819. + struct qman_fq *fq,
  1820. + const struct qm_dqrr_entry *dq)
  1821. +{
  1822. + struct net_device *net_dev;
  1823. + struct dpa_priv_s *priv;
  1824. + struct dpa_percpu_priv_s *percpu_priv;
  1825. +
  1826. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  1827. + priv = netdev_priv(net_dev);
  1828. +
  1829. + /* Trace the fd */
  1830. + trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
  1831. +
  1832. + /* Non-migratable context, safe to use raw_cpu_ptr */
  1833. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  1834. +
  1835. + if (dpaa_eth_napi_schedule(percpu_priv, portal))
  1836. + return qman_cb_dqrr_stop;
  1837. +
  1838. + _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
  1839. +
  1840. + return qman_cb_dqrr_consume;
  1841. +}
  1842. +
  1843. +void priv_ern(struct qman_portal *portal,
  1844. + struct qman_fq *fq,
  1845. + const struct qm_mr_entry *msg)
  1846. +{
  1847. + struct net_device *net_dev;
  1848. + const struct dpa_priv_s *priv;
  1849. + struct sk_buff *skb;
  1850. + struct dpa_percpu_priv_s *percpu_priv;
  1851. + struct qm_fd fd = msg->ern.fd;
  1852. +
  1853. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  1854. + priv = netdev_priv(net_dev);
  1855. + /* Non-migratable context, safe to use raw_cpu_ptr */
  1856. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  1857. +
  1858. + percpu_priv->stats.tx_dropped++;
  1859. + percpu_priv->stats.tx_fifo_errors++;
  1860. + count_ern(percpu_priv, msg);
  1861. +
  1862. + /* If we intended this buffer to go into the pool
  1863. + * when the FM was done, we need to put it in
  1864. + * manually.
  1865. + */
  1866. + if (msg->ern.fd.bpid != 0xff) {
  1867. + dpa_fd_release(net_dev, &fd);
  1868. + return;
  1869. + }
  1870. +
  1871. + skb = _dpa_cleanup_tx_fd(priv, &fd);
  1872. + dev_kfree_skb_any(skb);
  1873. +}
  1874. +
  1875. +const struct dpa_fq_cbs_t private_fq_cbs = {
  1876. + .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
  1877. + .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
  1878. + .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
  1879. + .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
  1880. + .egress_ern = { .cb = { .ern = priv_ern } }
  1881. +};
  1882. +EXPORT_SYMBOL(private_fq_cbs);
  1883. +
  1884. +static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
  1885. +{
  1886. + struct dpa_percpu_priv_s *percpu_priv;
  1887. + int i, j;
  1888. +
  1889. + for_each_possible_cpu(i) {
  1890. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  1891. +
  1892. + for (j = 0; j < qman_portal_max; j++)
  1893. + napi_enable(&percpu_priv->np[j].napi);
  1894. + }
  1895. +}
  1896. +
  1897. +static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
  1898. +{
  1899. + struct dpa_percpu_priv_s *percpu_priv;
  1900. + int i, j;
  1901. +
  1902. + for_each_possible_cpu(i) {
  1903. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  1904. +
  1905. + for (j = 0; j < qman_portal_max; j++)
  1906. + napi_disable(&percpu_priv->np[j].napi);
  1907. + }
  1908. +}
  1909. +
  1910. +static int __cold dpa_eth_priv_start(struct net_device *net_dev)
  1911. +{
  1912. + int err;
  1913. + struct dpa_priv_s *priv;
  1914. +
  1915. + priv = netdev_priv(net_dev);
  1916. +
  1917. + dpaa_eth_napi_enable(priv);
  1918. +
  1919. + err = dpa_start(net_dev);
  1920. + if (err < 0)
  1921. + dpaa_eth_napi_disable(priv);
  1922. +
  1923. + return err;
  1924. +}
  1925. +
  1926. +
  1927. +
  1928. +static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
  1929. +{
  1930. + int _errno;
  1931. + struct dpa_priv_s *priv;
  1932. +
  1933. + _errno = dpa_stop(net_dev);
  1934. + /* Allow NAPI to consume any frame still in the Rx/TxConfirm
  1935. + * ingress queues. This is to avoid a race between the current
  1936. + * context and ksoftirqd which could leave NAPI disabled while
  1937. + * in fact there's still Rx traffic to be processed.
  1938. + */
  1939. + usleep_range(5000, 10000);
  1940. +
  1941. + priv = netdev_priv(net_dev);
  1942. + dpaa_eth_napi_disable(priv);
  1943. +
  1944. + return _errno;
  1945. +}
  1946. +
  1947. +#ifdef CONFIG_NET_POLL_CONTROLLER
  1948. +static void dpaa_eth_poll_controller(struct net_device *net_dev)
  1949. +{
  1950. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  1951. + struct dpa_percpu_priv_s *percpu_priv =
  1952. + raw_cpu_ptr(priv->percpu_priv);
  1953. + struct qman_portal *p;
  1954. + const struct qman_portal_config *pc;
  1955. + struct dpa_napi_portal *np;
  1956. +
  1957. + p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
  1958. + pc = qman_p_get_portal_config(p);
  1959. + np = &percpu_priv->np[pc->index];
  1960. +
  1961. + qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
  1962. + qman_p_poll_dqrr(np->p, np->napi.weight);
  1963. + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
  1964. +}
  1965. +#endif
  1966. +
  1967. +static const struct net_device_ops dpa_private_ops = {
  1968. + .ndo_open = dpa_eth_priv_start,
  1969. + .ndo_start_xmit = dpa_tx,
  1970. + .ndo_stop = dpa_eth_priv_stop,
  1971. + .ndo_tx_timeout = dpa_timeout,
  1972. + .ndo_get_stats64 = dpa_get_stats64,
  1973. + .ndo_set_mac_address = dpa_set_mac_address,
  1974. + .ndo_validate_addr = eth_validate_addr,
  1975. +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  1976. + .ndo_select_queue = dpa_select_queue,
  1977. +#endif
  1978. + .ndo_change_mtu = dpa_change_mtu,
  1979. + .ndo_set_rx_mode = dpa_set_rx_mode,
  1980. + .ndo_init = dpa_ndo_init,
  1981. + .ndo_set_features = dpa_set_features,
  1982. + .ndo_fix_features = dpa_fix_features,
  1983. + .ndo_do_ioctl = dpa_ioctl,
  1984. +#ifdef CONFIG_NET_POLL_CONTROLLER
  1985. + .ndo_poll_controller = dpaa_eth_poll_controller,
  1986. +#endif
  1987. +};
  1988. +
  1989. +static int dpa_private_napi_add(struct net_device *net_dev)
  1990. +{
  1991. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  1992. + struct dpa_percpu_priv_s *percpu_priv;
  1993. + int i, cpu;
  1994. +
  1995. + for_each_possible_cpu(cpu) {
  1996. + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
  1997. +
  1998. + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
  1999. + qman_portal_max * sizeof(struct dpa_napi_portal),
  2000. + GFP_KERNEL);
  2001. +
  2002. + if (unlikely(percpu_priv->np == NULL)) {
  2003. + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
  2004. + return -ENOMEM;
  2005. + }
  2006. +
  2007. + for (i = 0; i < qman_portal_max; i++)
  2008. + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
  2009. + dpaa_eth_poll, DPA_NAPI_WEIGHT);
  2010. + }
  2011. +
  2012. + return 0;
  2013. +}
  2014. +
  2015. +void dpa_private_napi_del(struct net_device *net_dev)
  2016. +{
  2017. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  2018. + struct dpa_percpu_priv_s *percpu_priv;
  2019. + int i, cpu;
  2020. +
  2021. + for_each_possible_cpu(cpu) {
  2022. + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
  2023. +
  2024. + if (percpu_priv->np) {
  2025. + for (i = 0; i < qman_portal_max; i++)
  2026. + netif_napi_del(&percpu_priv->np[i].napi);
  2027. +
  2028. + devm_kfree(net_dev->dev.parent, percpu_priv->np);
  2029. + }
  2030. + }
  2031. +}
  2032. +EXPORT_SYMBOL(dpa_private_napi_del);
  2033. +
  2034. +static int dpa_private_netdev_init(struct net_device *net_dev)
  2035. +{
  2036. + int i;
  2037. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  2038. + struct dpa_percpu_priv_s *percpu_priv;
  2039. + const uint8_t *mac_addr;
  2040. +
  2041. + /* Although we access another CPU's private data here
  2042. + * we do it at initialization so it is safe
  2043. + */
  2044. + for_each_possible_cpu(i) {
  2045. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  2046. + percpu_priv->net_dev = net_dev;
  2047. + }
  2048. +
  2049. + net_dev->netdev_ops = &dpa_private_ops;
  2050. + mac_addr = priv->mac_dev->addr;
  2051. +
  2052. + net_dev->mem_start = priv->mac_dev->res->start;
  2053. + net_dev->mem_end = priv->mac_dev->res->end;
  2054. +
  2055. + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  2056. + NETIF_F_LLTX);
  2057. +
  2058. + /* Advertise S/G and HIGHDMA support for private interfaces */
  2059. + net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
  2060. + /* Recent kernels enable GSO automatically, if
  2061. + * we declare NETIF_F_SG. For conformity, we'll
  2062. + * still declare GSO explicitly.
  2063. + */
  2064. + net_dev->features |= NETIF_F_GSO;
  2065. +
  2066. + /* Advertise GRO support */
  2067. + net_dev->features |= NETIF_F_GRO;
  2068. +
  2069. + return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
  2070. +}
  2071. +
  2072. +static struct dpa_bp * __cold
  2073. +dpa_priv_bp_probe(struct device *dev)
  2074. +{
  2075. + struct dpa_bp *dpa_bp;
  2076. +
  2077. + dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
  2078. + if (unlikely(dpa_bp == NULL)) {
  2079. + dev_err(dev, "devm_kzalloc() failed\n");
  2080. + return ERR_PTR(-ENOMEM);
  2081. + }
  2082. +
  2083. + dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
  2084. + dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
  2085. +
  2086. + dpa_bp->seed_cb = dpa_bp_priv_seed;
  2087. + dpa_bp->free_buf_cb = _dpa_bp_free_pf;
  2088. +
  2089. + return dpa_bp;
  2090. +}
  2091. +
  2092. +/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
  2093. + * We won't be sending congestion notifications to FMan; for now, we just use
  2094. + * this CGR to generate enqueue rejections to FMan in order to drop the frames
  2095. + * before they reach our ingress queues and eat up memory.
  2096. + */
  2097. +static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
  2098. +{
  2099. + struct qm_mcc_initcgr initcgr;
  2100. + u32 cs_th;
  2101. + int err;
  2102. +
  2103. + err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
  2104. + if (err < 0) {
  2105. + pr_err("Error %d allocating CGR ID\n", err);
  2106. + goto out_error;
  2107. + }
  2108. +
  2109. + /* Enable CS TD, but disable Congestion State Change Notifications. */
  2110. + initcgr.we_mask = QM_CGR_WE_CS_THRES;
  2111. + initcgr.cgr.cscn_en = QM_CGR_EN;
  2112. + cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
  2113. + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
  2114. +
  2115. + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
  2116. + initcgr.cgr.cstd_en = QM_CGR_EN;
  2117. +
  2118. + /* This is actually a hack, because this CGR will be associated with
  2119. + * our affine SWP. However, we'll place our ingress FQs in it.
  2120. + */
  2121. + err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
  2122. + &initcgr);
  2123. + if (err < 0) {
  2124. + pr_err("Error %d creating ingress CGR with ID %d\n", err,
  2125. + priv->ingress_cgr.cgrid);
  2126. + qman_release_cgrid(priv->ingress_cgr.cgrid);
  2127. + goto out_error;
  2128. + }
  2129. + pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
  2130. + priv->ingress_cgr.cgrid, priv->mac_dev->addr);
  2131. +
  2132. + /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
  2133. + * range), but we have no common initialization path between the
  2134. + * different variants of the DPAA Eth driver, so we do it here rather
  2135. + * than modifying every other variant than "private Eth".
  2136. + */
  2137. + priv->use_ingress_cgr = true;
  2138. +
  2139. +out_error:
  2140. + return err;
  2141. +}
  2142. +
  2143. +static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
  2144. + size_t count)
  2145. +{
  2146. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  2147. + int i;
  2148. +
  2149. + if (netif_msg_probe(priv))
  2150. + dev_dbg(net_dev->dev.parent,
  2151. + "Using private BM buffer pools\n");
  2152. +
  2153. + priv->bp_count = count;
  2154. +
  2155. + for (i = 0; i < count; i++) {
  2156. + int err;
  2157. + err = dpa_bp_alloc(&dpa_bp[i]);
  2158. + if (err < 0) {
  2159. + dpa_bp_free(priv);
  2160. + priv->dpa_bp = NULL;
  2161. + return err;
  2162. + }
  2163. +
  2164. + priv->dpa_bp = &dpa_bp[i];
  2165. + }
  2166. +
  2167. + dpa_priv_common_bpid = priv->dpa_bp->bpid;
  2168. + return 0;
  2169. +}
  2170. +
  2171. +static const struct of_device_id dpa_match[];
  2172. +
  2173. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2174. +static int dpa_new_loop_id(void)
  2175. +{
  2176. + static int if_id;
  2177. +
  2178. + return if_id++;
  2179. +}
  2180. +#endif
  2181. +
  2182. +static int
  2183. +dpaa_eth_priv_probe(struct platform_device *_of_dev)
  2184. +{
  2185. + int err = 0, i, channel;
  2186. + struct device *dev;
  2187. + struct device_node *dpa_node;
  2188. + struct dpa_bp *dpa_bp;
  2189. + struct dpa_fq *dpa_fq, *tmp;
  2190. + size_t count = 1;
  2191. + struct net_device *net_dev = NULL;
  2192. + struct dpa_priv_s *priv = NULL;
  2193. + struct dpa_percpu_priv_s *percpu_priv;
  2194. + struct fm_port_fqs port_fqs;
  2195. + struct dpa_buffer_layout_s *buf_layout = NULL;
  2196. + struct mac_device *mac_dev;
  2197. + struct task_struct *kth;
  2198. +
  2199. + dev = &_of_dev->dev;
  2200. +
  2201. + dpa_node = dev->of_node;
  2202. +
  2203. + if (!of_device_is_available(dpa_node))
  2204. + return -ENODEV;
  2205. +
  2206. + /* Get the buffer pools assigned to this interface;
  2207. + * run only once the default pool probing code
  2208. + */
  2209. + dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
  2210. + dpa_priv_bp_probe(dev);
  2211. + if (IS_ERR(dpa_bp))
  2212. + return PTR_ERR(dpa_bp);
  2213. +
  2214. + /* Allocate this early, so we can store relevant information in
  2215. + * the private area (needed by 1588 code in dpa_mac_probe)
  2216. + */
  2217. + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
  2218. + if (!net_dev) {
  2219. + dev_err(dev, "alloc_etherdev_mq() failed\n");
  2220. + goto alloc_etherdev_mq_failed;
  2221. + }
  2222. +
  2223. + /* Do this here, so we can be verbose early */
  2224. + SET_NETDEV_DEV(net_dev, dev);
  2225. + dev_set_drvdata(dev, net_dev);
  2226. +
  2227. + priv = netdev_priv(net_dev);
  2228. + priv->net_dev = net_dev;
  2229. + strcpy(priv->if_type, "private");
  2230. +
  2231. + priv->msg_enable = netif_msg_init(debug, -1);
  2232. +
  2233. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2234. + priv->loop_id = dpa_new_loop_id();
  2235. + priv->loop_to = -1; /* disabled by default */
  2236. + dpa_loop_netdevs[priv->loop_id] = net_dev;
  2237. +#endif
  2238. +
  2239. + mac_dev = dpa_mac_probe(_of_dev);
  2240. + if (IS_ERR(mac_dev) || !mac_dev) {
  2241. + err = PTR_ERR(mac_dev);
  2242. + goto mac_probe_failed;
  2243. + }
  2244. +
  2245. + /* We have physical ports, so we need to establish
  2246. + * the buffer layout.
  2247. + */
  2248. + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
  2249. + GFP_KERNEL);
  2250. + if (!buf_layout) {
  2251. + dev_err(dev, "devm_kzalloc() failed\n");
  2252. + goto alloc_failed;
  2253. + }
  2254. + dpa_set_buffers_layout(mac_dev, buf_layout);
  2255. +
  2256. + /* For private ports, need to compute the size of the default
  2257. + * buffer pool, based on FMan port buffer layout;also update
  2258. + * the maximum buffer size for private ports if necessary
  2259. + */
  2260. + dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
  2261. +
  2262. +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
  2263. + /* We only want to use jumbo frame optimization if we actually have
  2264. + * L2 MAX FRM set for jumbo frames as well.
  2265. + */
  2266. + if (fm_get_max_frm() < 9600)
  2267. + dev_warn(dev,
  2268. + "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
  2269. +#endif
  2270. +
  2271. + INIT_LIST_HEAD(&priv->dpa_fq_list);
  2272. +
  2273. + memset(&port_fqs, 0, sizeof(port_fqs));
  2274. +
  2275. + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
  2276. + if (!err)
  2277. + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
  2278. + &port_fqs, true, TX);
  2279. +
  2280. + if (err < 0)
  2281. + goto fq_probe_failed;
  2282. +
  2283. + /* bp init */
  2284. +
  2285. + err = dpa_priv_bp_create(net_dev, dpa_bp, count);
  2286. +
  2287. + if (err < 0)
  2288. + goto bp_create_failed;
  2289. +
  2290. + priv->mac_dev = mac_dev;
  2291. +
  2292. + channel = dpa_get_channel();
  2293. +
  2294. + if (channel < 0) {
  2295. + err = channel;
  2296. + goto get_channel_failed;
  2297. + }
  2298. +
  2299. + priv->channel = (uint16_t)channel;
  2300. +
  2301. + /* Start a thread that will walk the cpus with affine portals
  2302. + * and add this pool channel to each's dequeue mask.
  2303. + */
  2304. + kth = kthread_run(dpaa_eth_add_channel,
  2305. + (void *)(unsigned long)priv->channel,
  2306. + "dpaa_%p:%d", net_dev, priv->channel);
  2307. + if (!kth) {
  2308. + err = -ENOMEM;
  2309. + goto add_channel_failed;
  2310. + }
  2311. +
  2312. + dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
  2313. +
  2314. + /* Create a congestion group for this netdev, with
  2315. + * dynamically-allocated CGR ID.
  2316. + * Must be executed after probing the MAC, but before
  2317. + * assigning the egress FQs to the CGRs.
  2318. + */
  2319. + err = dpaa_eth_cgr_init(priv);
  2320. + if (err < 0) {
  2321. + dev_err(dev, "Error initializing CGR\n");
  2322. + goto tx_cgr_init_failed;
  2323. + }
  2324. + err = dpaa_eth_priv_ingress_cgr_init(priv);
  2325. + if (err < 0) {
  2326. + dev_err(dev, "Error initializing ingress CGR\n");
  2327. + goto rx_cgr_init_failed;
  2328. + }
  2329. +
  2330. + /* Add the FQs to the interface, and make them active */
  2331. + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
  2332. + err = dpa_fq_init(dpa_fq, false);
  2333. + if (err < 0)
  2334. + goto fq_alloc_failed;
  2335. + }
  2336. +
  2337. + priv->buf_layout = buf_layout;
  2338. + priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
  2339. + priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
  2340. +
  2341. + /* All real interfaces need their ports initialized */
  2342. + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
  2343. + buf_layout, dev);
  2344. +
  2345. +#ifdef CONFIG_FMAN_PFC
  2346. + for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
  2347. + err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
  2348. + mac_dev->port_dev[TX], i, i);
  2349. + if (unlikely(err != 0)) {
  2350. + dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
  2351. + goto pfc_mapping_failed;
  2352. + }
  2353. + }
  2354. +#endif
  2355. +
  2356. + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
  2357. +
  2358. + if (priv->percpu_priv == NULL) {
  2359. + dev_err(dev, "devm_alloc_percpu() failed\n");
  2360. + err = -ENOMEM;
  2361. + goto alloc_percpu_failed;
  2362. + }
  2363. + for_each_possible_cpu(i) {
  2364. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  2365. + memset(percpu_priv, 0, sizeof(*percpu_priv));
  2366. + }
  2367. +
  2368. + /* Initialize NAPI */
  2369. + err = dpa_private_napi_add(net_dev);
  2370. +
  2371. + if (err < 0)
  2372. + goto napi_add_failed;
  2373. +
  2374. + err = dpa_private_netdev_init(net_dev);
  2375. +
  2376. + if (err < 0)
  2377. + goto netdev_init_failed;
  2378. +
  2379. + dpaa_eth_sysfs_init(&net_dev->dev);
  2380. +
  2381. +#ifdef CONFIG_PM
  2382. + device_set_wakeup_capable(dev, true);
  2383. +#endif
  2384. +
  2385. + pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
  2386. +
  2387. + return 0;
  2388. +
  2389. +netdev_init_failed:
  2390. +napi_add_failed:
  2391. + dpa_private_napi_del(net_dev);
  2392. +alloc_percpu_failed:
  2393. +#ifdef CONFIG_FMAN_PFC
  2394. +pfc_mapping_failed:
  2395. +#endif
  2396. + dpa_fq_free(dev, &priv->dpa_fq_list);
  2397. +fq_alloc_failed:
  2398. + qman_delete_cgr_safe(&priv->ingress_cgr);
  2399. + qman_release_cgrid(priv->ingress_cgr.cgrid);
  2400. +rx_cgr_init_failed:
  2401. + qman_delete_cgr_safe(&priv->cgr_data.cgr);
  2402. + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
  2403. +tx_cgr_init_failed:
  2404. +add_channel_failed:
  2405. +get_channel_failed:
  2406. + dpa_bp_free(priv);
  2407. +bp_create_failed:
  2408. +fq_probe_failed:
  2409. +alloc_failed:
  2410. +mac_probe_failed:
  2411. + dev_set_drvdata(dev, NULL);
  2412. + free_netdev(net_dev);
  2413. +alloc_etherdev_mq_failed:
  2414. + if (atomic_read(&dpa_bp->refs) == 0)
  2415. + devm_kfree(dev, dpa_bp);
  2416. +
  2417. + return err;
  2418. +}
  2419. +
  2420. +static const struct of_device_id dpa_match[] = {
  2421. + {
  2422. + .compatible = "fsl,dpa-ethernet"
  2423. + },
  2424. + {}
  2425. +};
  2426. +MODULE_DEVICE_TABLE(of, dpa_match);
  2427. +
  2428. +static struct platform_driver dpa_driver = {
  2429. + .driver = {
  2430. + .name = KBUILD_MODNAME,
  2431. + .of_match_table = dpa_match,
  2432. + .owner = THIS_MODULE,
  2433. + .pm = DPAA_PM_OPS,
  2434. + },
  2435. + .probe = dpaa_eth_priv_probe,
  2436. + .remove = dpa_remove
  2437. +};
  2438. +
  2439. +static int __init __cold dpa_load(void)
  2440. +{
  2441. + int _errno;
  2442. +
  2443. + pr_info(DPA_DESCRIPTION "\n");
  2444. +
  2445. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2446. + dpa_debugfs_module_init();
  2447. +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
  2448. +
  2449. + /* initialise dpaa_eth mirror values */
  2450. + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
  2451. + dpa_max_frm = fm_get_max_frm();
  2452. + dpa_num_cpus = num_possible_cpus();
  2453. +
  2454. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2455. + memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
  2456. +#endif
  2457. +
  2458. + _errno = platform_driver_register(&dpa_driver);
  2459. + if (unlikely(_errno < 0)) {
  2460. + pr_err(KBUILD_MODNAME
  2461. + ": %s:%hu:%s(): platform_driver_register() = %d\n",
  2462. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  2463. + }
  2464. +
  2465. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  2466. + KBUILD_BASENAME".c", __func__);
  2467. +
  2468. + return _errno;
  2469. +}
  2470. +module_init(dpa_load);
  2471. +
  2472. +static void __exit __cold dpa_unload(void)
  2473. +{
  2474. + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
  2475. + KBUILD_BASENAME".c", __func__);
  2476. +
  2477. + platform_driver_unregister(&dpa_driver);
  2478. +
  2479. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2480. + dpa_debugfs_module_exit();
  2481. +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
  2482. +
  2483. + /* Only one channel is used and needs to be relased after all
  2484. + * interfaces are removed
  2485. + */
  2486. + dpa_release_channel();
  2487. +
  2488. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  2489. + KBUILD_BASENAME".c", __func__);
  2490. +}
  2491. +module_exit(dpa_unload);
  2492. --- /dev/null
  2493. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
  2494. @@ -0,0 +1,695 @@
  2495. +/* Copyright 2008-2012 Freescale Semiconductor Inc.
  2496. + *
  2497. + * Redistribution and use in source and binary forms, with or without
  2498. + * modification, are permitted provided that the following conditions are met:
  2499. + * * Redistributions of source code must retain the above copyright
  2500. + * notice, this list of conditions and the following disclaimer.
  2501. + * * Redistributions in binary form must reproduce the above copyright
  2502. + * notice, this list of conditions and the following disclaimer in the
  2503. + * documentation and/or other materials provided with the distribution.
  2504. + * * Neither the name of Freescale Semiconductor nor the
  2505. + * names of its contributors may be used to endorse or promote products
  2506. + * derived from this software without specific prior written permission.
  2507. + *
  2508. + *
  2509. + * ALTERNATIVELY, this software may be distributed under the terms of the
  2510. + * GNU General Public License ("GPL") as published by the Free Software
  2511. + * Foundation, either version 2 of that License or (at your option) any
  2512. + * later version.
  2513. + *
  2514. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  2515. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  2516. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  2517. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  2518. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  2519. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  2520. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  2521. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2522. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  2523. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2524. + */
  2525. +
  2526. +#ifndef __DPA_H
  2527. +#define __DPA_H
  2528. +
  2529. +#include <linux/netdevice.h>
  2530. +#include <linux/fsl_qman.h> /* struct qman_fq */
  2531. +
  2532. +#include "fm_ext.h"
  2533. +#include "dpaa_eth_trace.h"
  2534. +
  2535. +extern int dpa_rx_extra_headroom;
  2536. +extern int dpa_max_frm;
  2537. +extern int dpa_num_cpus;
  2538. +
  2539. +#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
  2540. +#define dpa_get_max_frm() dpa_max_frm
  2541. +
  2542. +#define dpa_get_max_mtu() \
  2543. + (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
  2544. +
  2545. +#define __hot
  2546. +
  2547. +/* Simple enum of FQ types - used for array indexing */
  2548. +enum port_type {RX, TX};
  2549. +
  2550. +/* TODO: This structure should be renamed & moved to the FMD wrapper */
  2551. +struct dpa_buffer_layout_s {
  2552. + uint16_t priv_data_size;
  2553. + bool parse_results;
  2554. + bool time_stamp;
  2555. + bool hash_results;
  2556. + uint8_t manip_extra_space;
  2557. + uint16_t data_align;
  2558. +};
  2559. +
  2560. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  2561. +#define DPA_BUG_ON(cond) BUG_ON(cond)
  2562. +#else
  2563. +#define DPA_BUG_ON(cond)
  2564. +#endif
  2565. +
  2566. +#define DPA_TX_PRIV_DATA_SIZE 16
  2567. +#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
  2568. +#define DPA_TIME_STAMP_SIZE 8
  2569. +#define DPA_HASH_RESULTS_SIZE 8
  2570. +#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
  2571. + dpa_get_rx_extra_headroom())
  2572. +
  2573. +#define FM_FD_STAT_RX_ERRORS \
  2574. + (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
  2575. + FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
  2576. + FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
  2577. + FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
  2578. + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
  2579. +
  2580. +#define FM_FD_STAT_TX_ERRORS \
  2581. + (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
  2582. + FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
  2583. +
  2584. +#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
  2585. +/* The raw buffer size must be cacheline aligned.
  2586. + * Normally we use 2K buffers.
  2587. + */
  2588. +#define DPA_BP_RAW_SIZE 2048
  2589. +#else
  2590. +/* For jumbo frame optimizations, use buffers large enough to accommodate
  2591. + * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
  2592. + * space to account for further alignments.
  2593. + */
  2594. +#define DPA_MAX_FRM_SIZE 9600
  2595. +#define DPA_BP_RAW_SIZE \
  2596. + ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
  2597. + sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
  2598. +#endif
  2599. +
  2600. +/* This is what FMan is ever allowed to use.
  2601. + * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
  2602. + * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
  2603. + * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
  2604. + * half-page-aligned buffers (can we?), so we reserve some more space
  2605. + * for start-of-buffer alignment.
  2606. + */
  2607. +#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
  2608. + SMP_CACHE_BYTES)
  2609. +/* We must ensure that skb_shinfo is always cacheline-aligned. */
  2610. +#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
  2611. +
  2612. +/* Maximum size of a buffer for which recycling is allowed.
  2613. + * We need an upper limit such that forwarded skbs that get reallocated on Tx
  2614. + * aren't allowed to grow unboundedly. On the other hand, we need to make sure
  2615. + * that skbs allocated by us will not fail to be recycled due to their size.
  2616. + *
  2617. + * For a requested size, the kernel allocator provides the next power of two
  2618. + * sized block, which the stack will use as is, regardless of the actual size
  2619. + * it required; since we must accommodate at most 9.6K buffers (L2 maximum
  2620. + * supported frame size), set the recycling upper limit to 16K.
  2621. + */
  2622. +#define DPA_RECYCLE_MAX_SIZE 16384
  2623. +
  2624. +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
  2625. +/*TODO: temporary for fman pcd testing */
  2626. +#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
  2627. +#endif
  2628. +
  2629. +#define DPAA_ETH_FQ_DELTA 0x10000
  2630. +
  2631. +#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
  2632. + (((device_addr) & 0x1fffff) >> 6)
  2633. +
  2634. +#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
  2635. + (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
  2636. +
  2637. +/* Largest value that the FQD's OAL field can hold.
  2638. + * This is DPAA-1.x specific.
  2639. + * TODO: This rather belongs in fsl_qman.h
  2640. + */
  2641. +#define FSL_QMAN_MAX_OAL 127
  2642. +
  2643. +/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
  2644. +#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
  2645. +
  2646. +/* Default alignment for start of data in an Rx FD */
  2647. +#define DPA_FD_DATA_ALIGNMENT 16
  2648. +
  2649. +/* Values for the L3R field of the FM Parse Results
  2650. + */
  2651. +/* L3 Type field: First IP Present IPv4 */
  2652. +#define FM_L3_PARSE_RESULT_IPV4 0x8000
  2653. +/* L3 Type field: First IP Present IPv6 */
  2654. +#define FM_L3_PARSE_RESULT_IPV6 0x4000
  2655. +
  2656. +/* Values for the L4R field of the FM Parse Results
  2657. + * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
  2658. + */
  2659. +/* L4 Type field: UDP */
  2660. +#define FM_L4_PARSE_RESULT_UDP 0x40
  2661. +/* L4 Type field: TCP */
  2662. +#define FM_L4_PARSE_RESULT_TCP 0x20
  2663. +/* FD status field indicating whether the FM Parser has attempted to validate
  2664. + * the L4 csum of the frame.
  2665. + * Note that having this bit set doesn't necessarily imply that the checksum
  2666. + * is valid. One would have to check the parse results to find that out.
  2667. + */
  2668. +#define FM_FD_STAT_L4CV 0x00000004
  2669. +
  2670. +
  2671. +#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
  2672. +
  2673. +/* Check if the parsed frame was found to be a TCP segment.
  2674. + *
  2675. + * @parse_result_ptr must be of type (fm_prs_result_t *).
  2676. + */
  2677. +#define fm_l4_frame_is_tcp(parse_result_ptr) \
  2678. + ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
  2679. +
  2680. +/* number of Tx queues to FMan */
  2681. +#ifdef CONFIG_FMAN_PFC
  2682. +#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
  2683. +#else
  2684. +#define DPAA_ETH_TX_QUEUES NR_CPUS
  2685. +#endif
  2686. +
  2687. +#define DPAA_ETH_RX_QUEUES 128
  2688. +
  2689. +/* Convenience macros for storing/retrieving the skb back-pointers. They must
  2690. + * accommodate both recycling and confirmation paths - i.e. cases when the buf
  2691. + * was allocated by ourselves, respectively by the stack. In the former case,
  2692. + * we could store the skb at negative offset; in the latter case, we can't,
  2693. + * so we'll use 0 as offset.
  2694. + *
  2695. + * NB: @off is an offset from a (struct sk_buff **) pointer!
  2696. + */
  2697. +#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
  2698. +{ \
  2699. + skbh = (struct sk_buff **)addr; \
  2700. + *(skbh + (off)) = skb; \
  2701. +}
  2702. +#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
  2703. +{ \
  2704. + skbh = (struct sk_buff **)addr; \
  2705. + skb = *(skbh + (off)); \
  2706. +}
  2707. +
  2708. +#ifdef CONFIG_PM
  2709. +/* Magic Packet wakeup */
  2710. +#define DPAA_WOL_MAGIC 0x00000001
  2711. +#endif
  2712. +
  2713. +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
  2714. +struct pcd_range {
  2715. + uint32_t base;
  2716. + uint32_t count;
  2717. +};
  2718. +#endif
  2719. +
  2720. +/* More detailed FQ types - used for fine-grained WQ assignments */
  2721. +enum dpa_fq_type {
  2722. + FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
  2723. + FQ_TYPE_RX_ERROR, /* Rx Error FQs */
  2724. + FQ_TYPE_RX_PCD, /* User-defined PCDs */
  2725. + FQ_TYPE_TX, /* "Real" Tx FQs */
  2726. + FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
  2727. + FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
  2728. + FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
  2729. + FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
  2730. +};
  2731. +
  2732. +struct dpa_fq {
  2733. + struct qman_fq fq_base;
  2734. + struct list_head list;
  2735. + struct net_device *net_dev;
  2736. + bool init;
  2737. + uint32_t fqid;
  2738. + uint32_t flags;
  2739. + uint16_t channel;
  2740. + uint8_t wq;
  2741. + enum dpa_fq_type fq_type;
  2742. +};
  2743. +
  2744. +struct dpa_fq_cbs_t {
  2745. + struct qman_fq rx_defq;
  2746. + struct qman_fq tx_defq;
  2747. + struct qman_fq rx_errq;
  2748. + struct qman_fq tx_errq;
  2749. + struct qman_fq egress_ern;
  2750. +};
  2751. +
  2752. +struct fqid_cell {
  2753. + uint32_t start;
  2754. + uint32_t count;
  2755. +};
  2756. +
  2757. +struct dpa_bp {
  2758. + struct bman_pool *pool;
  2759. + uint8_t bpid;
  2760. + struct device *dev;
  2761. + union {
  2762. + /* The buffer pools used for the private ports are initialized
  2763. + * with target_count buffers for each CPU; at runtime the
  2764. + * number of buffers per CPU is constantly brought back to this
  2765. + * level
  2766. + */
  2767. + int target_count;
  2768. + /* The configured value for the number of buffers in the pool,
  2769. + * used for shared port buffer pools
  2770. + */
  2771. + int config_count;
  2772. + };
  2773. + size_t size;
  2774. + bool seed_pool;
  2775. + /* physical address of the contiguous memory used by the pool to store
  2776. + * the buffers
  2777. + */
  2778. + dma_addr_t paddr;
  2779. + /* virtual address of the contiguous memory used by the pool to store
  2780. + * the buffers
  2781. + */
  2782. + void __iomem *vaddr;
  2783. + /* current number of buffers in the bpool alloted to this CPU */
  2784. + int __percpu *percpu_count;
  2785. + atomic_t refs;
  2786. + /* some bpools need to be seeded before use by this cb */
  2787. + int (*seed_cb)(struct dpa_bp *);
  2788. + /* some bpools need to be emptied before freeing; this cb is used
  2789. + * for freeing of individual buffers taken from the pool
  2790. + */
  2791. + void (*free_buf_cb)(void *addr);
  2792. +};
  2793. +
  2794. +struct dpa_rx_errors {
  2795. + u64 dme; /* DMA Error */
  2796. + u64 fpe; /* Frame Physical Error */
  2797. + u64 fse; /* Frame Size Error */
  2798. + u64 phe; /* Header Error */
  2799. + u64 cse; /* Checksum Validation Error */
  2800. +};
  2801. +
  2802. +/* Counters for QMan ERN frames - one counter per rejection code */
  2803. +struct dpa_ern_cnt {
  2804. + u64 cg_tdrop; /* Congestion group taildrop */
  2805. + u64 wred; /* WRED congestion */
  2806. + u64 err_cond; /* Error condition */
  2807. + u64 early_window; /* Order restoration, frame too early */
  2808. + u64 late_window; /* Order restoration, frame too late */
  2809. + u64 fq_tdrop; /* FQ taildrop */
  2810. + u64 fq_retired; /* FQ is retired */
  2811. + u64 orp_zero; /* ORP disabled */
  2812. +};
  2813. +
  2814. +struct dpa_napi_portal {
  2815. + struct napi_struct napi;
  2816. + struct qman_portal *p;
  2817. +};
  2818. +
  2819. +struct dpa_percpu_priv_s {
  2820. + struct net_device *net_dev;
  2821. + struct dpa_napi_portal *np;
  2822. + u64 in_interrupt;
  2823. + u64 tx_returned;
  2824. + u64 tx_confirm;
  2825. + /* fragmented (non-linear) skbuffs received from the stack */
  2826. + u64 tx_frag_skbuffs;
  2827. + /* number of S/G frames received */
  2828. + u64 rx_sg;
  2829. +
  2830. + struct rtnl_link_stats64 stats;
  2831. + struct dpa_rx_errors rx_errors;
  2832. + struct dpa_ern_cnt ern_cnt;
  2833. +};
  2834. +
  2835. +struct dpa_priv_s {
  2836. + struct dpa_percpu_priv_s __percpu *percpu_priv;
  2837. + struct dpa_bp *dpa_bp;
  2838. + /* Store here the needed Tx headroom for convenience and speed
  2839. + * (even though it can be computed based on the fields of buf_layout)
  2840. + */
  2841. + uint16_t tx_headroom;
  2842. + struct net_device *net_dev;
  2843. + struct mac_device *mac_dev;
  2844. + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
  2845. + struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
  2846. +
  2847. + size_t bp_count;
  2848. +
  2849. + uint16_t channel; /* "fsl,qman-channel-id" */
  2850. + struct list_head dpa_fq_list;
  2851. +
  2852. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2853. + struct dentry *debugfs_loop_file;
  2854. +#endif
  2855. +
  2856. + uint32_t msg_enable; /* net_device message level */
  2857. +#ifdef CONFIG_FSL_DPAA_1588
  2858. + struct dpa_ptp_tsu *tsu;
  2859. +#endif
  2860. +
  2861. +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
  2862. +/* TODO: this is temporary until pcd support is implemented in dpaa */
  2863. + int priv_pcd_num_ranges;
  2864. + struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
  2865. +#endif
  2866. +
  2867. + struct {
  2868. + /**
  2869. + * All egress queues to a given net device belong to one
  2870. + * (and the same) congestion group.
  2871. + */
  2872. + struct qman_cgr cgr;
  2873. + /* If congested, when it began. Used for performance stats. */
  2874. + u32 congestion_start_jiffies;
  2875. + /* Number of jiffies the Tx port was congested. */
  2876. + u32 congested_jiffies;
  2877. + /**
  2878. + * Counter for the number of times the CGR
  2879. + * entered congestion state
  2880. + */
  2881. + u32 cgr_congested_count;
  2882. + } cgr_data;
  2883. + /* Use a per-port CGR for ingress traffic. */
  2884. + bool use_ingress_cgr;
  2885. + struct qman_cgr ingress_cgr;
  2886. +
  2887. +#ifdef CONFIG_FSL_DPAA_TS
  2888. + bool ts_tx_en; /* Tx timestamping enabled */
  2889. + bool ts_rx_en; /* Rx timestamping enabled */
  2890. +#endif /* CONFIG_FSL_DPAA_TS */
  2891. +
  2892. + struct dpa_buffer_layout_s *buf_layout;
  2893. + uint16_t rx_headroom;
  2894. + char if_type[30];
  2895. +
  2896. + void *peer;
  2897. +#ifdef CONFIG_PM
  2898. + u32 wol;
  2899. +#endif
  2900. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2901. + int loop_id;
  2902. + int loop_to;
  2903. +#endif
  2904. +#ifdef CONFIG_FSL_DPAA_CEETM
  2905. + bool ceetm_en; /* CEETM QoS enabled */
  2906. +#endif
  2907. +};
  2908. +
  2909. +struct fm_port_fqs {
  2910. + struct dpa_fq *tx_defq;
  2911. + struct dpa_fq *tx_errq;
  2912. + struct dpa_fq *rx_defq;
  2913. + struct dpa_fq *rx_errq;
  2914. +};
  2915. +
  2916. +
  2917. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  2918. +extern struct net_device *dpa_loop_netdevs[20];
  2919. +#endif
  2920. +
  2921. +/* functions with different implementation for SG and non-SG: */
  2922. +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
  2923. +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
  2924. +void __hot _dpa_rx(struct net_device *net_dev,
  2925. + struct qman_portal *portal,
  2926. + const struct dpa_priv_s *priv,
  2927. + struct dpa_percpu_priv_s *percpu_priv,
  2928. + const struct qm_fd *fd,
  2929. + u32 fqid,
  2930. + int *count_ptr);
  2931. +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
  2932. +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
  2933. + struct qman_fq *egress_fq, struct qman_fq *conf_fq);
  2934. +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
  2935. + const struct qm_fd *fd);
  2936. +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
  2937. + const struct qm_fd *fd,
  2938. + struct sk_buff *skb,
  2939. + int *use_gro);
  2940. +#ifndef CONFIG_FSL_DPAA_TS
  2941. +bool dpa_skb_is_recyclable(struct sk_buff *skb);
  2942. +bool dpa_buf_is_recyclable(struct sk_buff *skb,
  2943. + uint32_t min_size,
  2944. + uint16_t min_offset,
  2945. + unsigned char **new_buf_start);
  2946. +#endif
  2947. +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
  2948. + struct sk_buff *skb, struct qm_fd *fd,
  2949. + int *count_ptr, int *offset);
  2950. +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
  2951. + struct sk_buff *skb, struct qm_fd *fd);
  2952. +int __cold __attribute__((nonnull))
  2953. + _dpa_fq_free(struct device *dev, struct qman_fq *fq);
  2954. +
  2955. +/* Turn on HW checksum computation for this outgoing frame.
  2956. + * If the current protocol is not something we support in this regard
  2957. + * (or if the stack has already computed the SW checksum), we do nothing.
  2958. + *
  2959. + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
  2960. + * otherwise.
  2961. + *
  2962. + * Note that this function may modify the fd->cmd field and the skb data buffer
  2963. + * (the Parse Results area).
  2964. + */
  2965. +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
  2966. + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
  2967. +
  2968. +static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
  2969. + struct qman_portal *portal)
  2970. +{
  2971. + /* In case of threaded ISR for RT enable kernel,
  2972. + * in_irq() does not return appropriate value, so use
  2973. + * in_serving_softirq to distinguish softirq or irq context.
  2974. + */
  2975. + if (unlikely(in_irq() || !in_serving_softirq())) {
  2976. + /* Disable QMan IRQ and invoke NAPI */
  2977. + int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
  2978. + if (likely(!ret)) {
  2979. + const struct qman_portal_config *pc =
  2980. + qman_p_get_portal_config(portal);
  2981. + struct dpa_napi_portal *np =
  2982. + &percpu_priv->np[pc->index];
  2983. +
  2984. + np->p = portal;
  2985. + napi_schedule(&np->napi);
  2986. + percpu_priv->in_interrupt++;
  2987. + return 1;
  2988. + }
  2989. + }
  2990. + return 0;
  2991. +}
  2992. +
  2993. +static inline ssize_t __const __must_check __attribute__((nonnull))
  2994. +dpa_fd_length(const struct qm_fd *fd)
  2995. +{
  2996. + return fd->length20;
  2997. +}
  2998. +
  2999. +static inline ssize_t __const __must_check __attribute__((nonnull))
  3000. +dpa_fd_offset(const struct qm_fd *fd)
  3001. +{
  3002. + return fd->offset;
  3003. +}
  3004. +
  3005. +/* Verifies if the skb length is below the interface MTU */
  3006. +static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
  3007. +{
  3008. + if (unlikely(skb->len > mtu))
  3009. + if ((skb->protocol != htons(ETH_P_8021Q))
  3010. + || (skb->len > mtu + 4))
  3011. + return -1;
  3012. +
  3013. + return 0;
  3014. +}
  3015. +
  3016. +static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
  3017. +{
  3018. + uint16_t headroom;
  3019. + /* The frame headroom must accommodate:
  3020. + * - the driver private data area
  3021. + * - parse results, hash results, timestamp if selected
  3022. + * - manip extra space
  3023. + * If either hash results or time stamp are selected, both will
  3024. + * be copied to/from the frame headroom, as TS is located between PR and
  3025. + * HR in the IC and IC copy size has a granularity of 16bytes
  3026. + * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
  3027. + *
  3028. + * Also make sure the headroom is a multiple of data_align bytes
  3029. + */
  3030. + headroom = (uint16_t)(bl->priv_data_size +
  3031. + (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
  3032. + (bl->hash_results || bl->time_stamp ?
  3033. + DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
  3034. + bl->manip_extra_space);
  3035. +
  3036. + return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
  3037. +}
  3038. +
  3039. +int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
  3040. +int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
  3041. +int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
  3042. +
  3043. +void dpaa_eth_sysfs_remove(struct device *dev);
  3044. +void dpaa_eth_sysfs_init(struct device *dev);
  3045. +int dpaa_eth_poll(struct napi_struct *napi, int budget);
  3046. +
  3047. +void dpa_private_napi_del(struct net_device *net_dev);
  3048. +
  3049. +/* Equivalent to a memset(0), but works faster */
  3050. +static inline void clear_fd(struct qm_fd *fd)
  3051. +{
  3052. + fd->opaque_addr = 0;
  3053. + fd->opaque = 0;
  3054. + fd->cmd = 0;
  3055. +}
  3056. +
  3057. +static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
  3058. + struct qman_fq *tx_fq)
  3059. +{
  3060. + int i;
  3061. +
  3062. + for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
  3063. + if (priv->egress_fqs[i] == tx_fq)
  3064. + return i;
  3065. +
  3066. + return -EINVAL;
  3067. +}
  3068. +
  3069. +static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
  3070. + struct rtnl_link_stats64 *percpu_stats,
  3071. + struct qm_fd *fd, struct qman_fq *egress_fq,
  3072. + struct qman_fq *conf_fq)
  3073. +{
  3074. + int err, i;
  3075. +
  3076. + if (fd->bpid == 0xff)
  3077. + fd->cmd |= qman_fq_fqid(conf_fq);
  3078. +
  3079. + /* Trace this Tx fd */
  3080. + trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
  3081. +
  3082. + for (i = 0; i < 100000; i++) {
  3083. + err = qman_enqueue(egress_fq, fd, 0);
  3084. + if (err != -EBUSY)
  3085. + break;
  3086. + }
  3087. +
  3088. + if (unlikely(err < 0)) {
  3089. + /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
  3090. + percpu_stats->tx_errors++;
  3091. + percpu_stats->tx_fifo_errors++;
  3092. + return err;
  3093. + }
  3094. +
  3095. + percpu_stats->tx_packets++;
  3096. + percpu_stats->tx_bytes += dpa_fd_length(fd);
  3097. +
  3098. + return 0;
  3099. +}
  3100. +
  3101. +/* Use multiple WQs for FQ assignment:
  3102. + * - Tx Confirmation queues go to WQ1.
  3103. + * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
  3104. + * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
  3105. + * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
  3106. + * to be scheduled, in case there are many more FQs in WQ3).
  3107. + * This ensures that Tx-confirmed buffers are timely released. In particular,
  3108. + * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
  3109. + * are greatly outnumbered by other FQs in the system (usually PCDs), while
  3110. + * dequeue scheduling is round-robin.
  3111. + */
  3112. +static inline void _dpa_assign_wq(struct dpa_fq *fq)
  3113. +{
  3114. + switch (fq->fq_type) {
  3115. + case FQ_TYPE_TX_CONFIRM:
  3116. + case FQ_TYPE_TX_CONF_MQ:
  3117. + fq->wq = 1;
  3118. + break;
  3119. + case FQ_TYPE_RX_DEFAULT:
  3120. + case FQ_TYPE_TX:
  3121. + fq->wq = 3;
  3122. + break;
  3123. + case FQ_TYPE_RX_ERROR:
  3124. + case FQ_TYPE_TX_ERROR:
  3125. + case FQ_TYPE_RX_PCD_HI_PRIO:
  3126. + fq->wq = 2;
  3127. + break;
  3128. + case FQ_TYPE_RX_PCD:
  3129. + fq->wq = 5;
  3130. + break;
  3131. + default:
  3132. + WARN(1, "Invalid FQ type %d for FQID %d!\n",
  3133. + fq->fq_type, fq->fqid);
  3134. + }
  3135. +}
  3136. +
  3137. +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  3138. +/* Use in lieu of skb_get_queue_mapping() */
  3139. +#ifdef CONFIG_FMAN_PFC
  3140. +#define dpa_get_queue_mapping(skb) \
  3141. + (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
  3142. + ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
  3143. + ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
  3144. + dpa_num_cpus + smp_processor_id()));
  3145. +
  3146. +#else
  3147. +#define dpa_get_queue_mapping(skb) \
  3148. + raw_smp_processor_id()
  3149. +#endif
  3150. +#else
  3151. +/* Use the queue selected by XPS */
  3152. +#define dpa_get_queue_mapping(skb) \
  3153. + skb_get_queue_mapping(skb)
  3154. +#endif
  3155. +
  3156. +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
  3157. +struct ptp_priv_s {
  3158. + struct device_node *node;
  3159. + struct platform_device *of_dev;
  3160. + struct mac_device *mac_dev;
  3161. +};
  3162. +extern struct ptp_priv_s ptp_priv;
  3163. +#endif
  3164. +
  3165. +static inline void _dpa_bp_free_pf(void *addr)
  3166. +{
  3167. + put_page(virt_to_head_page(addr));
  3168. +}
  3169. +
  3170. +/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
  3171. + * manifests itself at high traffic rates when frames exceed 4K memory
  3172. + * boundaries; For the moment, we use a SW workaround to avoid frames larger
  3173. + * than 4K or that exceed 4K alignements.
  3174. + */
  3175. +
  3176. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  3177. +#define DPAA_LS1043A_DMA_4K_ISSUE 1
  3178. +#endif
  3179. +
  3180. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  3181. +#define HAS_DMA_ISSUE(start, size) \
  3182. + (((unsigned long)(start) ^ ((unsigned long)(start) + \
  3183. + (unsigned long)(size))) & ~0xFFF)
  3184. +
  3185. +#define BOUNDARY_4K(start, size) (((unsigned long)(start) + \
  3186. + (unsigned long)(size)) & ~0xFFF)
  3187. +#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
  3188. +
  3189. +#endif /* __DPA_H */
  3190. --- /dev/null
  3191. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
  3192. @@ -0,0 +1,263 @@
  3193. +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
  3194. + *
  3195. + * Redistribution and use in source and binary forms, with or without
  3196. + * modification, are permitted provided that the following conditions are met:
  3197. + * * Redistributions of source code must retain the above copyright
  3198. + * notice, this list of conditions and the following disclaimer.
  3199. + * * Redistributions in binary form must reproduce the above copyright
  3200. + * notice, this list of conditions and the following disclaimer in the
  3201. + * documentation and/or other materials provided with the distribution.
  3202. + * * Neither the name of Freescale Semiconductor nor the
  3203. + * names of its contributors may be used to endorse or promote products
  3204. + * derived from this software without specific prior written permission.
  3205. + *
  3206. + *
  3207. + * ALTERNATIVELY, this software may be distributed under the terms of the
  3208. + * GNU General Public License ("GPL") as published by the Free Software
  3209. + * Foundation, either version 2 of that License or (at your option) any
  3210. + * later version.
  3211. + *
  3212. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  3213. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  3214. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  3215. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  3216. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  3217. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  3218. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3219. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3220. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3221. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3222. + */
  3223. +
  3224. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  3225. +#define pr_fmt(fmt) \
  3226. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  3227. + KBUILD_BASENAME".c", __LINE__, __func__
  3228. +#else
  3229. +#define pr_fmt(fmt) \
  3230. + KBUILD_MODNAME ": " fmt
  3231. +#endif
  3232. +
  3233. +#include <linux/init.h>
  3234. +#include <linux/module.h>
  3235. +#include <linux/io.h>
  3236. +#include <linux/of_platform.h>
  3237. +#include <linux/of_net.h>
  3238. +#include <linux/etherdevice.h>
  3239. +#include <linux/kthread.h>
  3240. +#include <linux/percpu.h>
  3241. +#include <linux/highmem.h>
  3242. +#include <linux/sort.h>
  3243. +#include <linux/fsl_qman.h>
  3244. +#include "dpaa_eth.h"
  3245. +#include "dpaa_eth_common.h"
  3246. +#include "dpaa_eth_base.h"
  3247. +
  3248. +#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
  3249. +
  3250. +MODULE_LICENSE("Dual BSD/GPL");
  3251. +
  3252. +uint8_t advanced_debug = -1;
  3253. +module_param(advanced_debug, byte, S_IRUGO);
  3254. +MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
  3255. +EXPORT_SYMBOL(advanced_debug);
  3256. +
  3257. +static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
  3258. +{
  3259. + return ((struct dpa_bp *)dpa_bp0)->size -
  3260. + ((struct dpa_bp *)dpa_bp1)->size;
  3261. +}
  3262. +
  3263. +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
  3264. +dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
  3265. +{
  3266. + int i, lenp, na, ns, err;
  3267. + struct device *dev;
  3268. + struct device_node *dev_node;
  3269. + const __be32 *bpool_cfg;
  3270. + struct dpa_bp *dpa_bp;
  3271. + u32 bpid;
  3272. +
  3273. + dev = &_of_dev->dev;
  3274. +
  3275. + *count = of_count_phandle_with_args(dev->of_node,
  3276. + "fsl,bman-buffer-pools", NULL);
  3277. + if (*count < 1) {
  3278. + dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
  3279. + return ERR_PTR(-EINVAL);
  3280. + }
  3281. +
  3282. + dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
  3283. + if (dpa_bp == NULL) {
  3284. + dev_err(dev, "devm_kzalloc() failed\n");
  3285. + return ERR_PTR(-ENOMEM);
  3286. + }
  3287. +
  3288. + dev_node = of_find_node_by_path("/");
  3289. + if (unlikely(dev_node == NULL)) {
  3290. + dev_err(dev, "of_find_node_by_path(/) failed\n");
  3291. + return ERR_PTR(-EINVAL);
  3292. + }
  3293. +
  3294. + na = of_n_addr_cells(dev_node);
  3295. + ns = of_n_size_cells(dev_node);
  3296. +
  3297. + for (i = 0; i < *count; i++) {
  3298. + of_node_put(dev_node);
  3299. +
  3300. + dev_node = of_parse_phandle(dev->of_node,
  3301. + "fsl,bman-buffer-pools", i);
  3302. + if (dev_node == NULL) {
  3303. + dev_err(dev, "of_find_node_by_phandle() failed\n");
  3304. + return ERR_PTR(-EFAULT);
  3305. + }
  3306. +
  3307. + if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
  3308. + dev_err(dev,
  3309. + "!of_device_is_compatible(%s, fsl,bpool)\n",
  3310. + dev_node->full_name);
  3311. + dpa_bp = ERR_PTR(-EINVAL);
  3312. + goto _return_of_node_put;
  3313. + }
  3314. +
  3315. + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
  3316. + if (err) {
  3317. + dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
  3318. + dpa_bp = ERR_PTR(-EINVAL);
  3319. + goto _return_of_node_put;
  3320. + }
  3321. + dpa_bp[i].bpid = (uint8_t)bpid;
  3322. +
  3323. + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
  3324. + &lenp);
  3325. + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
  3326. + const uint32_t *seed_pool;
  3327. +
  3328. + dpa_bp[i].config_count =
  3329. + (int)of_read_number(bpool_cfg, ns);
  3330. + dpa_bp[i].size =
  3331. + (size_t)of_read_number(bpool_cfg + ns, ns);
  3332. + dpa_bp[i].paddr =
  3333. + of_read_number(bpool_cfg + 2 * ns, na);
  3334. +
  3335. + seed_pool = of_get_property(dev_node,
  3336. + "fsl,bpool-ethernet-seeds", &lenp);
  3337. + dpa_bp[i].seed_pool = !!seed_pool;
  3338. +
  3339. + } else {
  3340. + dev_err(dev,
  3341. + "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
  3342. + dev_node->full_name);
  3343. + dpa_bp = ERR_PTR(-EINVAL);
  3344. + goto _return_of_node_put;
  3345. + }
  3346. + }
  3347. +
  3348. + sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
  3349. +
  3350. + return dpa_bp;
  3351. +
  3352. +_return_of_node_put:
  3353. + if (dev_node)
  3354. + of_node_put(dev_node);
  3355. +
  3356. + return dpa_bp;
  3357. +}
  3358. +EXPORT_SYMBOL(dpa_bp_probe);
  3359. +
  3360. +int dpa_bp_shared_port_seed(struct dpa_bp *bp)
  3361. +{
  3362. + void __iomem **ptr;
  3363. +
  3364. + /* In MAC-less and Shared-MAC scenarios the physical
  3365. + * address of the buffer pool in device tree is set
  3366. + * to 0 to specify that another entity (USDPAA) will
  3367. + * allocate and seed the buffers
  3368. + */
  3369. + if (!bp->paddr)
  3370. + return 0;
  3371. +
  3372. + /* allocate memory region for buffers */
  3373. + devm_request_mem_region(bp->dev, bp->paddr,
  3374. + bp->size * bp->config_count, KBUILD_MODNAME);
  3375. + /* managed ioremap unmapping */
  3376. + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  3377. + if (!ptr)
  3378. + return -EIO;
  3379. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  3380. + bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count);
  3381. +#else
  3382. + bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0);
  3383. +#endif
  3384. + if (bp->vaddr == NULL) {
  3385. + pr_err("Could not map memory for pool %d\n", bp->bpid);
  3386. + devres_free(ptr);
  3387. + return -EIO;
  3388. + }
  3389. + *ptr = bp->vaddr;
  3390. + devres_add(bp->dev, ptr);
  3391. +
  3392. + /* seed pool with buffers from that memory region */
  3393. + if (bp->seed_pool) {
  3394. + int count = bp->target_count;
  3395. + dma_addr_t addr = bp->paddr;
  3396. +
  3397. + while (count) {
  3398. + struct bm_buffer bufs[8];
  3399. + uint8_t num_bufs = 0;
  3400. +
  3401. + do {
  3402. + BUG_ON(addr > 0xffffffffffffull);
  3403. + bufs[num_bufs].bpid = bp->bpid;
  3404. + bm_buffer_set64(&bufs[num_bufs++], addr);
  3405. + addr += bp->size;
  3406. +
  3407. + } while (--count && (num_bufs < 8));
  3408. +
  3409. + while (bman_release(bp->pool, bufs, num_bufs, 0))
  3410. + cpu_relax();
  3411. + }
  3412. + }
  3413. +
  3414. + return 0;
  3415. +}
  3416. +EXPORT_SYMBOL(dpa_bp_shared_port_seed);
  3417. +
  3418. +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
  3419. + size_t count)
  3420. +{
  3421. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  3422. + int i;
  3423. +
  3424. + priv->dpa_bp = dpa_bp;
  3425. + priv->bp_count = count;
  3426. +
  3427. + for (i = 0; i < count; i++) {
  3428. + int err;
  3429. + err = dpa_bp_alloc(&dpa_bp[i]);
  3430. + if (err < 0) {
  3431. + dpa_bp_free(priv);
  3432. + priv->dpa_bp = NULL;
  3433. + return err;
  3434. + }
  3435. + }
  3436. +
  3437. + return 0;
  3438. +}
  3439. +EXPORT_SYMBOL(dpa_bp_create);
  3440. +
  3441. +static int __init __cold dpa_advanced_load(void)
  3442. +{
  3443. + pr_info(DPA_DESCRIPTION "\n");
  3444. +
  3445. + return 0;
  3446. +}
  3447. +module_init(dpa_advanced_load);
  3448. +
  3449. +static void __exit __cold dpa_advanced_unload(void)
  3450. +{
  3451. + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
  3452. + KBUILD_BASENAME".c", __func__);
  3453. +
  3454. +}
  3455. +module_exit(dpa_advanced_unload);
  3456. --- /dev/null
  3457. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
  3458. @@ -0,0 +1,50 @@
  3459. +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
  3460. + *
  3461. + * Redistribution and use in source and binary forms, with or without
  3462. + * modification, are permitted provided that the following conditions are met:
  3463. + * * Redistributions of source code must retain the above copyright
  3464. + * notice, this list of conditions and the following disclaimer.
  3465. + * * Redistributions in binary form must reproduce the above copyright
  3466. + * notice, this list of conditions and the following disclaimer in the
  3467. + * documentation and/or other materials provided with the distribution.
  3468. + * * Neither the name of Freescale Semiconductor nor the
  3469. + * names of its contributors may be used to endorse or promote products
  3470. + * derived from this software without specific prior written permission.
  3471. + *
  3472. + *
  3473. + * ALTERNATIVELY, this software may be distributed under the terms of the
  3474. + * GNU General Public License ("GPL") as published by the Free Software
  3475. + * Foundation, either version 2 of that License or (at your option) any
  3476. + * later version.
  3477. + *
  3478. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  3479. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  3480. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  3481. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  3482. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  3483. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  3484. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3485. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3486. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3487. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3488. + */
  3489. +
  3490. +#ifndef __DPAA_ETH_BASE_H
  3491. +#define __DPAA_ETH_BASE_H
  3492. +
  3493. +#include <linux/etherdevice.h> /* struct net_device */
  3494. +#include <linux/fsl_bman.h> /* struct bm_buffer */
  3495. +#include <linux/of_platform.h> /* struct platform_device */
  3496. +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
  3497. +
  3498. +extern uint8_t advanced_debug;
  3499. +extern const struct dpa_fq_cbs_t shared_fq_cbs;
  3500. +extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
  3501. +
  3502. +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
  3503. +dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
  3504. +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
  3505. + size_t count);
  3506. +int dpa_bp_shared_port_seed(struct dpa_bp *bp);
  3507. +
  3508. +#endif /* __DPAA_ETH_BASE_H */
  3509. --- /dev/null
  3510. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
  3511. @@ -0,0 +1,1719 @@
  3512. +/* Copyright 2008-2016 Freescale Semiconductor Inc.
  3513. + *
  3514. + * Redistribution and use in source and binary forms, with or without
  3515. + * modification, are permitted provided that the following conditions are met:
  3516. + * * Redistributions of source code must retain the above copyright
  3517. + * notice, this list of conditions and the following disclaimer.
  3518. + * * Redistributions in binary form must reproduce the above copyright
  3519. + * notice, this list of conditions and the following disclaimer in the
  3520. + * documentation and/or other materials provided with the distribution.
  3521. + * * Neither the name of Freescale Semiconductor nor the
  3522. + * names of its contributors may be used to endorse or promote products
  3523. + * derived from this software without specific prior written permission.
  3524. + *
  3525. + *
  3526. + * ALTERNATIVELY, this software may be distributed under the terms of the
  3527. + * GNU General Public License ("GPL") as published by the Free Software
  3528. + * Foundation, either version 2 of that License or (at your option) any
  3529. + * later version.
  3530. + *
  3531. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  3532. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  3533. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  3534. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  3535. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  3536. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  3537. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3538. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3539. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3540. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3541. + */
  3542. +
  3543. +#include <linux/init.h>
  3544. +#include "dpaa_eth_ceetm.h"
  3545. +
  3546. +#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
  3547. +
  3548. +const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
  3549. + [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
  3550. + [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
  3551. +};
  3552. +
  3553. +struct Qdisc_ops ceetm_qdisc_ops;
  3554. +
  3555. +/* Obtain the DCP and the SP ids from the FMan port */
  3556. +static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
  3557. + unsigned int *sp_id)
  3558. +{
  3559. + uint32_t channel;
  3560. + t_LnxWrpFmPortDev *port_dev;
  3561. + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
  3562. + struct mac_device *mac_dev = dpa_priv->mac_dev;
  3563. +
  3564. + port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
  3565. + channel = port_dev->txCh;
  3566. +
  3567. + *sp_id = channel & CHANNEL_SP_MASK;
  3568. + pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
  3569. +
  3570. + if (channel < DCP0_MAX_CHANNEL) {
  3571. + *dcp_id = qm_dc_portal_fman0;
  3572. + pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
  3573. + } else {
  3574. + *dcp_id = qm_dc_portal_fman1;
  3575. + pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
  3576. + }
  3577. +}
  3578. +
  3579. +/* Enqueue Rejection Notification callback */
  3580. +static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
  3581. + const struct qm_mr_entry *msg)
  3582. +{
  3583. + struct net_device *net_dev;
  3584. + struct ceetm_class *cls;
  3585. + struct ceetm_class_stats *cstats = NULL;
  3586. + const struct dpa_priv_s *dpa_priv;
  3587. + struct dpa_percpu_priv_s *dpa_percpu_priv;
  3588. + struct sk_buff *skb;
  3589. + struct qm_fd fd = msg->ern.fd;
  3590. +
  3591. + net_dev = ((struct ceetm_fq *)fq)->net_dev;
  3592. + dpa_priv = netdev_priv(net_dev);
  3593. + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
  3594. +
  3595. + /* Increment DPA counters */
  3596. + dpa_percpu_priv->stats.tx_dropped++;
  3597. + dpa_percpu_priv->stats.tx_fifo_errors++;
  3598. +
  3599. + /* Increment CEETM counters */
  3600. + cls = ((struct ceetm_fq *)fq)->ceetm_cls;
  3601. + switch (cls->type) {
  3602. + case CEETM_PRIO:
  3603. + cstats = this_cpu_ptr(cls->prio.cstats);
  3604. + break;
  3605. + case CEETM_WBFS:
  3606. + cstats = this_cpu_ptr(cls->wbfs.cstats);
  3607. + break;
  3608. + }
  3609. +
  3610. + if (cstats)
  3611. + cstats->ern_drop_count++;
  3612. +
  3613. + if (fd.bpid != 0xff) {
  3614. + dpa_fd_release(net_dev, &fd);
  3615. + return;
  3616. + }
  3617. +
  3618. + skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
  3619. + dev_kfree_skb_any(skb);
  3620. +}
  3621. +
  3622. +/* Congestion State Change Notification callback */
  3623. +static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
  3624. +{
  3625. + struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
  3626. + struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
  3627. + struct ceetm_class *cls = ceetm_fq->ceetm_cls;
  3628. + struct ceetm_class_stats *cstats = NULL;
  3629. +
  3630. + switch (cls->type) {
  3631. + case CEETM_PRIO:
  3632. + cstats = this_cpu_ptr(cls->prio.cstats);
  3633. + break;
  3634. + case CEETM_WBFS:
  3635. + cstats = this_cpu_ptr(cls->wbfs.cstats);
  3636. + break;
  3637. + }
  3638. +
  3639. + if (congested) {
  3640. + dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
  3641. + netif_tx_stop_all_queues(dpa_priv->net_dev);
  3642. + dpa_priv->cgr_data.cgr_congested_count++;
  3643. + if (cstats)
  3644. + cstats->cgr_congested_count++;
  3645. + } else {
  3646. + dpa_priv->cgr_data.congested_jiffies +=
  3647. + (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
  3648. + netif_tx_wake_all_queues(dpa_priv->net_dev);
  3649. + }
  3650. +}
  3651. +
  3652. +/* Allocate a ceetm fq */
  3653. +static int ceetm_alloc_fq(struct ceetm_fq **fq,
  3654. + struct net_device *dev,
  3655. + struct ceetm_class *cls)
  3656. +{
  3657. + *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
  3658. + if (!*fq)
  3659. + return -ENOMEM;
  3660. +
  3661. + (*fq)->net_dev = dev;
  3662. + (*fq)->ceetm_cls = cls;
  3663. + return 0;
  3664. +}
  3665. +
  3666. +/* Configure a ceetm Class Congestion Group */
  3667. +static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
  3668. + struct qm_ceetm_channel *channel,
  3669. + unsigned int id,
  3670. + struct ceetm_fq *fq,
  3671. + u32 if_support)
  3672. +{
  3673. + int err;
  3674. + u32 cs_th;
  3675. + u16 ccg_mask;
  3676. + struct qm_ceetm_ccg_params ccg_params;
  3677. +
  3678. + err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
  3679. + if (err)
  3680. + return err;
  3681. +
  3682. + /* Configure the count mode (frames/bytes), enable
  3683. + * notifications, enable tail-drop, and configure the tail-drop
  3684. + * mode and threshold */
  3685. + ccg_mask = QM_CCGR_WE_MODE | QM_CCGR_WE_CSCN_EN |
  3686. + QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
  3687. + QM_CCGR_WE_TD_THRES;
  3688. +
  3689. + ccg_params.mode = 0; /* count bytes */
  3690. + ccg_params.cscn_en = 1; /* generate notifications */
  3691. + ccg_params.td_en = 1; /* enable tail-drop */
  3692. + ccg_params.td_mode = 1; /* tail-drop on threshold */
  3693. +
  3694. + /* Configure the tail-drop threshold according to the link
  3695. + * speed */
  3696. + if (if_support & SUPPORTED_10000baseT_Full)
  3697. + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
  3698. + else
  3699. + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
  3700. + qm_cgr_cs_thres_set64(&ccg_params.td_thres, cs_th, 1);
  3701. +
  3702. + err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
  3703. + if (err)
  3704. + return err;
  3705. +
  3706. + return 0;
  3707. +}
  3708. +
  3709. +/* Configure a ceetm Logical Frame Queue */
  3710. +static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
  3711. + struct qm_ceetm_lfq **lfq)
  3712. +{
  3713. + int err;
  3714. + u64 context_a;
  3715. + u32 context_b;
  3716. +
  3717. + err = qman_ceetm_lfq_claim(lfq, cq);
  3718. + if (err)
  3719. + return err;
  3720. +
  3721. + /* Get the former contexts in order to preserve context B */
  3722. + err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
  3723. + if (err)
  3724. + return err;
  3725. +
  3726. + context_a = CEETM_CONTEXT_A;
  3727. + err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
  3728. + if (err)
  3729. + return err;
  3730. +
  3731. + (*lfq)->ern = ceetm_ern;
  3732. +
  3733. + err = qman_ceetm_create_fq(*lfq, &fq->fq);
  3734. + if (err)
  3735. + return err;
  3736. +
  3737. + return 0;
  3738. +}
  3739. +
  3740. +/* Configure a prio ceetm class */
  3741. +static int ceetm_config_prio_cls(struct ceetm_class *cls, struct net_device *dev,
  3742. + struct qm_ceetm_channel *channel, unsigned int id)
  3743. +{
  3744. + int err;
  3745. + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
  3746. +
  3747. + err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
  3748. + if (err)
  3749. + return err;
  3750. +
  3751. + /* Claim and configure the CCG */
  3752. + err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
  3753. + dpa_priv->mac_dev->if_support);
  3754. + if (err)
  3755. + return err;
  3756. +
  3757. + /* Claim and configure the CQ */
  3758. + err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
  3759. + if (err)
  3760. + return err;
  3761. +
  3762. + if (cls->shaped) {
  3763. + err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
  3764. + if (err)
  3765. + return err;
  3766. +
  3767. + err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
  3768. + if (err)
  3769. + return err;
  3770. + }
  3771. +
  3772. + /* Claim and configure a LFQ */
  3773. + err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
  3774. + if (err)
  3775. + return err;
  3776. +
  3777. + return 0;
  3778. +}
  3779. +
  3780. +/* Configure a wbfs ceetm class */
  3781. +static int ceetm_config_wbfs_cls(struct ceetm_class *cls, struct net_device *dev,
  3782. + struct qm_ceetm_channel *channel, unsigned int id, int type)
  3783. +{
  3784. + int err;
  3785. + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
  3786. +
  3787. + err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
  3788. + if (err)
  3789. + return err;
  3790. +
  3791. + /* Claim and configure the CCG */
  3792. + err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
  3793. + dpa_priv->mac_dev->if_support);
  3794. + if (err)
  3795. + return err;
  3796. +
  3797. + /* Claim and configure the CQ */
  3798. + if (type == WBFS_GRP_B)
  3799. + err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
  3800. + cls->wbfs.ccg);
  3801. + else
  3802. + err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
  3803. + cls->wbfs.ccg);
  3804. + if (err)
  3805. + return err;
  3806. +
  3807. + /* Configure the CQ weight: real number mutiplied by 100 to get rid
  3808. + * of the fraction */
  3809. + err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
  3810. + cls->wbfs.weight * 100);
  3811. + if (err)
  3812. + return err;
  3813. +
  3814. + /* Claim and configure a LFQ */
  3815. + err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
  3816. + if (err)
  3817. + return err;
  3818. +
  3819. + return 0;
  3820. +}
  3821. +
  3822. +/* Find class in qdisc hash table using given handle */
  3823. +static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
  3824. +{
  3825. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  3826. + struct Qdisc_class_common *clc;
  3827. +
  3828. + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
  3829. + __func__, handle, sch->handle);
  3830. +
  3831. + clc = qdisc_class_find(&priv->clhash, handle);
  3832. + return clc ? container_of(clc, struct ceetm_class, common) : NULL;
  3833. +}
  3834. +
  3835. +/* Insert a class in the qdisc's class hash */
  3836. +static void ceetm_link_class(struct Qdisc *sch,
  3837. + struct Qdisc_class_hash *clhash,
  3838. + struct Qdisc_class_common *common)
  3839. +{
  3840. + sch_tree_lock(sch);
  3841. + qdisc_class_hash_insert(clhash, common);
  3842. + sch_tree_unlock(sch);
  3843. + qdisc_class_hash_grow(sch, clhash);
  3844. +}
  3845. +
  3846. +/* Destroy a ceetm class */
  3847. +static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
  3848. +{
  3849. + if (!cl)
  3850. + return;
  3851. +
  3852. + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
  3853. + __func__, cl->common.classid, sch->handle);
  3854. +
  3855. + switch (cl->type) {
  3856. + case CEETM_ROOT:
  3857. + if (cl->root.child) {
  3858. + qdisc_destroy(cl->root.child);
  3859. + cl->root.child = NULL;
  3860. + }
  3861. +
  3862. + if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
  3863. + pr_err(KBUILD_BASENAME
  3864. + " : %s : error releasing the channel %d\n",
  3865. + __func__, cl->root.ch->idx);
  3866. +
  3867. + break;
  3868. +
  3869. + case CEETM_PRIO:
  3870. + if (cl->prio.child) {
  3871. + qdisc_destroy(cl->prio.child);
  3872. + cl->prio.child = NULL;
  3873. + }
  3874. +
  3875. + if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
  3876. + pr_err(KBUILD_BASENAME
  3877. + " : %s : error releasing the LFQ %d\n",
  3878. + __func__, cl->prio.lfq->idx);
  3879. +
  3880. + if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
  3881. + pr_err(KBUILD_BASENAME
  3882. + " : %s : error releasing the CQ %d\n",
  3883. + __func__, cl->prio.cq->idx);
  3884. +
  3885. + if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
  3886. + pr_err(KBUILD_BASENAME
  3887. + " : %s : error releasing the CCG %d\n",
  3888. + __func__, cl->prio.ccg->idx);
  3889. +
  3890. + if (cl->prio.fq)
  3891. + kfree(cl->prio.fq);
  3892. +
  3893. + if (cl->prio.cstats)
  3894. + free_percpu(cl->prio.cstats);
  3895. +
  3896. + break;
  3897. +
  3898. + case CEETM_WBFS:
  3899. + if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
  3900. + pr_err(KBUILD_BASENAME
  3901. + " : %s : error releasing the LFQ %d\n",
  3902. + __func__, cl->wbfs.lfq->idx);
  3903. +
  3904. + if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
  3905. + pr_err(KBUILD_BASENAME
  3906. + " : %s : error releasing the CQ %d\n",
  3907. + __func__, cl->wbfs.cq->idx);
  3908. +
  3909. + if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
  3910. + pr_err(KBUILD_BASENAME
  3911. + " : %s : error releasing the CCG %d\n",
  3912. + __func__, cl->wbfs.ccg->idx);
  3913. +
  3914. + if (cl->wbfs.fq)
  3915. + kfree(cl->wbfs.fq);
  3916. +
  3917. + if (cl->wbfs.cstats)
  3918. + free_percpu(cl->wbfs.cstats);
  3919. + }
  3920. +
  3921. + tcf_destroy_chain(&cl->filter_list);
  3922. + kfree(cl);
  3923. +}
  3924. +
  3925. +/* Destroy a ceetm qdisc */
  3926. +static void ceetm_destroy(struct Qdisc *sch)
  3927. +{
  3928. + unsigned int ntx, i;
  3929. + struct hlist_node *next;
  3930. + struct ceetm_class *cl;
  3931. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  3932. + struct net_device *dev = qdisc_dev(sch);
  3933. +
  3934. + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
  3935. + __func__, sch->handle);
  3936. +
  3937. + /* All filters need to be removed before destroying the classes */
  3938. + tcf_destroy_chain(&priv->filter_list);
  3939. +
  3940. + for (i = 0; i < priv->clhash.hashsize; i++) {
  3941. + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
  3942. + tcf_destroy_chain(&cl->filter_list);
  3943. + }
  3944. +
  3945. + for (i = 0; i < priv->clhash.hashsize; i++) {
  3946. + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
  3947. + common.hnode)
  3948. + ceetm_cls_destroy(sch, cl);
  3949. + }
  3950. +
  3951. + qdisc_class_hash_destroy(&priv->clhash);
  3952. +
  3953. + switch (priv->type) {
  3954. + case CEETM_ROOT:
  3955. + dpa_disable_ceetm(dev);
  3956. +
  3957. + if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
  3958. + pr_err(KBUILD_BASENAME
  3959. + " : %s : error releasing the LNI %d\n",
  3960. + __func__, priv->root.lni->idx);
  3961. +
  3962. + if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
  3963. + pr_err(KBUILD_BASENAME
  3964. + " : %s : error releasing the SP %d\n",
  3965. + __func__, priv->root.sp->idx);
  3966. +
  3967. + if (priv->root.qstats)
  3968. + free_percpu(priv->root.qstats);
  3969. +
  3970. + if (!priv->root.qdiscs)
  3971. + break;
  3972. +
  3973. + /* Remove the pfifo qdiscs */
  3974. + for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
  3975. + if (priv->root.qdiscs[ntx])
  3976. + qdisc_destroy(priv->root.qdiscs[ntx]);
  3977. +
  3978. + kfree(priv->root.qdiscs);
  3979. + break;
  3980. +
  3981. + case CEETM_PRIO:
  3982. + if (priv->prio.parent)
  3983. + priv->prio.parent->root.child = NULL;
  3984. + break;
  3985. +
  3986. + case CEETM_WBFS:
  3987. + if (priv->wbfs.parent)
  3988. + priv->wbfs.parent->prio.child = NULL;
  3989. + break;
  3990. + }
  3991. +}
  3992. +
  3993. +static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
  3994. +{
  3995. + struct Qdisc *qdisc;
  3996. + unsigned int ntx, i;
  3997. + struct nlattr *nest;
  3998. + struct tc_ceetm_qopt qopt;
  3999. + struct ceetm_qdisc_stats *qstats;
  4000. + struct net_device *dev = qdisc_dev(sch);
  4001. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  4002. +
  4003. + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
  4004. +
  4005. + sch_tree_lock(sch);
  4006. + memset(&qopt, 0, sizeof(qopt));
  4007. + qopt.type = priv->type;
  4008. + qopt.shaped = priv->shaped;
  4009. +
  4010. + switch (priv->type) {
  4011. + case CEETM_ROOT:
  4012. + /* Gather statistics from the underlying pfifo qdiscs */
  4013. + sch->q.qlen = 0;
  4014. + memset(&sch->bstats, 0, sizeof(sch->bstats));
  4015. + memset(&sch->qstats, 0, sizeof(sch->qstats));
  4016. +
  4017. + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
  4018. + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
  4019. + sch->q.qlen += qdisc->q.qlen;
  4020. + sch->bstats.bytes += qdisc->bstats.bytes;
  4021. + sch->bstats.packets += qdisc->bstats.packets;
  4022. + sch->qstats.qlen += qdisc->qstats.qlen;
  4023. + sch->qstats.backlog += qdisc->qstats.backlog;
  4024. + sch->qstats.drops += qdisc->qstats.drops;
  4025. + sch->qstats.requeues += qdisc->qstats.requeues;
  4026. + sch->qstats.overlimits += qdisc->qstats.overlimits;
  4027. + }
  4028. +
  4029. + for_each_online_cpu(i) {
  4030. + qstats = per_cpu_ptr(priv->root.qstats, i);
  4031. + sch->qstats.drops += qstats->drops;
  4032. + }
  4033. +
  4034. + qopt.rate = priv->root.rate;
  4035. + qopt.ceil = priv->root.ceil;
  4036. + qopt.overhead = priv->root.overhead;
  4037. + break;
  4038. +
  4039. + case CEETM_PRIO:
  4040. + qopt.qcount = priv->prio.qcount;
  4041. + break;
  4042. +
  4043. + case CEETM_WBFS:
  4044. + qopt.qcount = priv->wbfs.qcount;
  4045. + qopt.cr = priv->wbfs.cr;
  4046. + qopt.er = priv->wbfs.er;
  4047. + break;
  4048. +
  4049. + default:
  4050. + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
  4051. + sch_tree_unlock(sch);
  4052. + return -EINVAL;
  4053. + }
  4054. +
  4055. + nest = nla_nest_start(skb, TCA_OPTIONS);
  4056. + if (nest == NULL)
  4057. + goto nla_put_failure;
  4058. + if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
  4059. + goto nla_put_failure;
  4060. + nla_nest_end(skb, nest);
  4061. +
  4062. + sch_tree_unlock(sch);
  4063. + return skb->len;
  4064. +
  4065. +nla_put_failure:
  4066. + sch_tree_unlock(sch);
  4067. + nla_nest_cancel(skb, nest);
  4068. + return -EMSGSIZE;
  4069. +}
  4070. +
  4071. +/* Configure a root ceetm qdisc */
  4072. +static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
  4073. + struct tc_ceetm_qopt *qopt)
  4074. +{
  4075. + struct netdev_queue *dev_queue;
  4076. + struct Qdisc *qdisc;
  4077. + enum qm_dc_portal dcp_id;
  4078. + unsigned int i, sp_id;
  4079. + int err;
  4080. + u64 bps;
  4081. + struct qm_ceetm_sp *sp;
  4082. + struct qm_ceetm_lni *lni;
  4083. + struct net_device *dev = qdisc_dev(sch);
  4084. + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
  4085. + struct mac_device *mac_dev = dpa_priv->mac_dev;
  4086. +
  4087. + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
  4088. +
  4089. + /* Validate inputs */
  4090. + if (sch->parent != TC_H_ROOT) {
  4091. + pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
  4092. + tcf_destroy_chain(&priv->filter_list);
  4093. + qdisc_class_hash_destroy(&priv->clhash);
  4094. + return -EINVAL;
  4095. + }
  4096. +
  4097. + if (!mac_dev) {
  4098. + pr_err("CEETM: the interface is lacking a mac\n");
  4099. + err = -EINVAL;
  4100. + goto err_init_root;
  4101. + }
  4102. +
  4103. + /* pre-allocate underlying pfifo qdiscs */
  4104. + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
  4105. + sizeof(priv->root.qdiscs[0]),
  4106. + GFP_KERNEL);
  4107. + if (priv->root.qdiscs == NULL) {
  4108. + err = -ENOMEM;
  4109. + goto err_init_root;
  4110. + }
  4111. +
  4112. + for (i = 0; i < dev->num_tx_queues; i++) {
  4113. + dev_queue = netdev_get_tx_queue(dev, i);
  4114. + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
  4115. + TC_H_MAKE(TC_H_MAJ(sch->handle),
  4116. + TC_H_MIN(i + PFIFO_MIN_OFFSET)));
  4117. + if (qdisc == NULL) {
  4118. + err = -ENOMEM;
  4119. + goto err_init_root;
  4120. + }
  4121. +
  4122. + priv->root.qdiscs[i] = qdisc;
  4123. + qdisc->flags |= TCQ_F_ONETXQUEUE;
  4124. + }
  4125. +
  4126. + sch->flags |= TCQ_F_MQROOT;
  4127. +
  4128. + priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
  4129. + if (!priv->root.qstats) {
  4130. + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
  4131. + __func__);
  4132. + err = -ENOMEM;
  4133. + goto err_init_root;
  4134. + }
  4135. +
  4136. + priv->shaped = qopt->shaped;
  4137. + priv->root.rate = qopt->rate;
  4138. + priv->root.ceil = qopt->ceil;
  4139. + priv->root.overhead = qopt->overhead;
  4140. +
  4141. + /* Claim the SP */
  4142. + get_dcp_and_sp(dev, &dcp_id, &sp_id);
  4143. + err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
  4144. + if (err) {
  4145. + pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
  4146. + __func__);
  4147. + goto err_init_root;
  4148. + }
  4149. +
  4150. + priv->root.sp = sp;
  4151. +
  4152. + /* Claim the LNI - will use the same id as the SP id since SPs 0-7
  4153. + * are connected to the TX FMan ports */
  4154. + err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
  4155. + if (err) {
  4156. + pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
  4157. + __func__);
  4158. + goto err_init_root;
  4159. + }
  4160. +
  4161. + priv->root.lni = lni;
  4162. +
  4163. + err = qman_ceetm_sp_set_lni(sp, lni);
  4164. + if (err) {
  4165. + pr_err(KBUILD_BASENAME " : %s : failed to link the SP and "
  4166. + "LNI\n", __func__);
  4167. + goto err_init_root;
  4168. + }
  4169. +
  4170. + lni->sp = sp;
  4171. +
  4172. + /* Configure the LNI shaper */
  4173. + if (priv->shaped) {
  4174. + err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
  4175. + if (err) {
  4176. + pr_err(KBUILD_BASENAME " : %s : failed to configure "
  4177. + "the LNI shaper\n", __func__);
  4178. + goto err_init_root;
  4179. + }
  4180. +
  4181. + bps = priv->root.rate << 3; /* Bps -> bps */
  4182. + err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
  4183. + if (err) {
  4184. + pr_err(KBUILD_BASENAME " : %s : failed to configure "
  4185. + "the LNI shaper\n", __func__);
  4186. + goto err_init_root;
  4187. + }
  4188. +
  4189. + bps = priv->root.ceil << 3; /* Bps -> bps */
  4190. + err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
  4191. + if (err) {
  4192. + pr_err(KBUILD_BASENAME " : %s : failed to configure "
  4193. + "the LNI shaper\n", __func__);
  4194. + goto err_init_root;
  4195. + }
  4196. + }
  4197. +
  4198. + /* TODO default configuration */
  4199. +
  4200. + dpa_enable_ceetm(dev);
  4201. + return 0;
  4202. +
  4203. +err_init_root:
  4204. + ceetm_destroy(sch);
  4205. + return err;
  4206. +}
  4207. +
  4208. +/* Configure a prio ceetm qdisc */
  4209. +static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
  4210. + struct tc_ceetm_qopt *qopt)
  4211. +{
  4212. + int err;
  4213. + unsigned int i;
  4214. + struct ceetm_class *parent_cl, *child_cl;
  4215. + struct Qdisc *parent_qdisc;
  4216. + struct net_device *dev = qdisc_dev(sch);
  4217. +
  4218. + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
  4219. +
  4220. + if (sch->parent == TC_H_ROOT) {
  4221. + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
  4222. + err = -EINVAL;
  4223. + goto err_init_prio;
  4224. + }
  4225. +
  4226. + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
  4227. + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
  4228. + pr_err("CEETM: a ceetm qdisc can not be attached to other "
  4229. + "qdisc/class types\n");
  4230. + err = -EINVAL;
  4231. + goto err_init_prio;
  4232. + }
  4233. +
  4234. + /* Obtain the parent root ceetm_class */
  4235. + parent_cl = ceetm_find(sch->parent, parent_qdisc);
  4236. +
  4237. + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
  4238. + pr_err("CEETM: a prio ceetm qdiscs can be added only under a "
  4239. + "root ceetm class\n");
  4240. + err = -EINVAL;
  4241. + goto err_init_prio;
  4242. + }
  4243. +
  4244. + priv->prio.parent = parent_cl;
  4245. + parent_cl->root.child = sch;
  4246. +
  4247. + priv->shaped = parent_cl->shaped;
  4248. + priv->prio.qcount = qopt->qcount;
  4249. +
  4250. + /* Create and configure qcount child classes */
  4251. + for (i = 0; i < priv->prio.qcount; i++) {
  4252. + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
  4253. + if (!child_cl) {
  4254. + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
  4255. + __func__);
  4256. + err = -ENOMEM;
  4257. + goto err_init_prio;
  4258. + }
  4259. +
  4260. + child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
  4261. + if (!child_cl->prio.cstats) {
  4262. + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
  4263. + __func__);
  4264. + err = -ENOMEM;
  4265. + goto err_init_prio_cls;
  4266. + }
  4267. +
  4268. + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
  4269. + child_cl->refcnt = 1;
  4270. + child_cl->parent = sch;
  4271. + child_cl->type = CEETM_PRIO;
  4272. + child_cl->shaped = priv->shaped;
  4273. + child_cl->prio.child = NULL;
  4274. +
  4275. + /* All shaped CQs have CR and ER enabled by default */
  4276. + child_cl->prio.cr = child_cl->shaped;
  4277. + child_cl->prio.er = child_cl->shaped;
  4278. + child_cl->prio.fq = NULL;
  4279. + child_cl->prio.cq = NULL;
  4280. +
  4281. + /* Configure the corresponding hardware CQ */
  4282. + err = ceetm_config_prio_cls(child_cl, dev,
  4283. + parent_cl->root.ch, i);
  4284. + if (err) {
  4285. + pr_err(KBUILD_BASENAME " : %s : failed to configure "
  4286. + "the ceetm prio class %X\n",
  4287. + __func__,
  4288. + child_cl->common.classid);
  4289. + goto err_init_prio_cls;
  4290. + }
  4291. +
  4292. + /* Add class handle in Qdisc */
  4293. + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
  4294. + pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X "
  4295. + "associated with CQ %d and CCG %d\n",
  4296. + __func__,
  4297. + child_cl->common.classid,
  4298. + child_cl->prio.cq->idx,
  4299. + child_cl->prio.ccg->idx);
  4300. + }
  4301. +
  4302. + return 0;
  4303. +
  4304. +err_init_prio_cls:
  4305. + ceetm_cls_destroy(sch, child_cl);
  4306. +err_init_prio:
  4307. + ceetm_destroy(sch);
  4308. + return err;
  4309. +}
  4310. +
  4311. +/* Configure a wbfs ceetm qdisc */
  4312. +static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
  4313. + struct tc_ceetm_qopt *qopt)
  4314. +{
  4315. + int err, group_b, small_group;
  4316. + unsigned int i, id, prio_a, prio_b;
  4317. + struct ceetm_class *parent_cl, *child_cl, *root_cl;
  4318. + struct Qdisc *parent_qdisc;
  4319. + struct ceetm_qdisc *parent_priv;
  4320. + struct qm_ceetm_channel *channel;
  4321. + struct net_device *dev = qdisc_dev(sch);
  4322. +
  4323. + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
  4324. +
  4325. + /* Validate inputs */
  4326. + if (sch->parent == TC_H_ROOT) {
  4327. + pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
  4328. + err = -EINVAL;
  4329. + goto err_init_wbfs;
  4330. + }
  4331. +
  4332. + /* Obtain the parent prio ceetm qdisc */
  4333. + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
  4334. + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
  4335. + pr_err("CEETM: a ceetm qdisc can not be attached to other "
  4336. + "qdisc/class types\n");
  4337. + err = -EINVAL;
  4338. + goto err_init_wbfs;
  4339. + }
  4340. +
  4341. + /* Obtain the parent prio ceetm class */
  4342. + parent_cl = ceetm_find(sch->parent, parent_qdisc);
  4343. + parent_priv = qdisc_priv(parent_qdisc);
  4344. +
  4345. + if (!parent_cl || parent_cl->type != CEETM_PRIO) {
  4346. + pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a "
  4347. + "prio ceetm class\n");
  4348. + err = -EINVAL;
  4349. + goto err_init_wbfs;
  4350. + }
  4351. +
  4352. + priv->shaped = parent_cl->shaped;
  4353. +
  4354. + if (!priv->shaped && (qopt->cr || qopt->er)) {
  4355. + pr_err("CEETM: CR/ER can be enabled only for shaped wbfs "
  4356. + "ceetm qdiscs\n");
  4357. + err = -EINVAL;
  4358. + goto err_init_wbfs;
  4359. + }
  4360. +
  4361. + if (priv->shaped && !(qopt->cr || qopt->er)) {
  4362. + pr_err("CEETM: either CR or ER must be enabled for shaped "
  4363. + "wbfs ceetm qdiscs\n");
  4364. + err = -EINVAL;
  4365. + goto err_init_wbfs;
  4366. + }
  4367. +
  4368. + /* Obtain the parent root ceetm class */
  4369. + root_cl = parent_priv->prio.parent;
  4370. + if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b)
  4371. + || root_cl->root.wbfs_grp_large) {
  4372. + pr_err("CEETM: no more wbfs classes are available\n");
  4373. + err = -EINVAL;
  4374. + goto err_init_wbfs;
  4375. + }
  4376. +
  4377. + if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b)
  4378. + && qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
  4379. + pr_err("CEETM: only %d wbfs classes are available\n",
  4380. + CEETM_MIN_WBFS_QCOUNT);
  4381. + err = -EINVAL;
  4382. + goto err_init_wbfs;
  4383. + }
  4384. +
  4385. + priv->wbfs.parent = parent_cl;
  4386. + parent_cl->prio.child = sch;
  4387. +
  4388. + priv->wbfs.qcount = qopt->qcount;
  4389. + priv->wbfs.cr = qopt->cr;
  4390. + priv->wbfs.er = qopt->er;
  4391. +
  4392. + channel = root_cl->root.ch;
  4393. +
  4394. + /* Configure the hardware wbfs channel groups */
  4395. + if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
  4396. + /* Configure the large group A */
  4397. + priv->wbfs.group_type = WBFS_GRP_LARGE;
  4398. + small_group = false;
  4399. + group_b = false;
  4400. + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
  4401. + prio_b = prio_a;
  4402. +
  4403. + } else if (root_cl->root.wbfs_grp_a) {
  4404. + /* Configure the group B */
  4405. + priv->wbfs.group_type = WBFS_GRP_B;
  4406. +
  4407. + err = qman_ceetm_channel_get_group(channel, &small_group,
  4408. + &prio_a, &prio_b);
  4409. + if (err) {
  4410. + pr_err(KBUILD_BASENAME " : %s : failed to get group "
  4411. + "details\n", __func__);
  4412. + goto err_init_wbfs;
  4413. + }
  4414. +
  4415. + small_group = true;
  4416. + group_b = true;
  4417. + prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
  4418. + /* If group A isn't configured, configure it as group B */
  4419. + prio_a = prio_a ? : prio_b;
  4420. +
  4421. + } else {
  4422. + /* Configure the small group A */
  4423. + priv->wbfs.group_type = WBFS_GRP_A;
  4424. +
  4425. + err = qman_ceetm_channel_get_group(channel, &small_group,
  4426. + &prio_a, &prio_b);
  4427. + if (err) {
  4428. + pr_err(KBUILD_BASENAME " : %s : failed to get group "
  4429. + "details\n", __func__);
  4430. + goto err_init_wbfs;
  4431. + }
  4432. +
  4433. + small_group = true;
  4434. + group_b = false;
  4435. + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
  4436. + /* If group B isn't configured, configure it as group A */
  4437. + prio_b = prio_b ? : prio_a;
  4438. + }
  4439. +
  4440. + err = qman_ceetm_channel_set_group(channel, small_group, prio_a, prio_b);
  4441. + if (err)
  4442. + goto err_init_wbfs;
  4443. +
  4444. + if (priv->shaped) {
  4445. + err = qman_ceetm_channel_set_group_cr_eligibility(channel,
  4446. + group_b,
  4447. + priv->wbfs.cr);
  4448. + if (err) {
  4449. + pr_err(KBUILD_BASENAME " : %s : failed to set group "
  4450. + "CR eligibility\n", __func__);
  4451. + goto err_init_wbfs;
  4452. + }
  4453. +
  4454. + err = qman_ceetm_channel_set_group_er_eligibility(channel,
  4455. + group_b,
  4456. + priv->wbfs.er);
  4457. + if (err) {
  4458. + pr_err(KBUILD_BASENAME " : %s : failed to set group "
  4459. + "ER eligibility\n", __func__);
  4460. + goto err_init_wbfs;
  4461. + }
  4462. + }
  4463. +
  4464. + /* Create qcount child classes */
  4465. + for (i = 0; i < priv->wbfs.qcount; i++) {
  4466. + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
  4467. + if (!child_cl) {
  4468. + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
  4469. + __func__);
  4470. + err = -ENOMEM;
  4471. + goto err_init_wbfs;
  4472. + }
  4473. +
  4474. + child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
  4475. + if (!child_cl->wbfs.cstats) {
  4476. + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
  4477. + __func__);
  4478. + err = -ENOMEM;
  4479. + goto err_init_wbfs_cls;
  4480. + }
  4481. +
  4482. + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
  4483. + child_cl->refcnt = 1;
  4484. + child_cl->parent = sch;
  4485. + child_cl->type = CEETM_WBFS;
  4486. + child_cl->shaped = priv->shaped;
  4487. + child_cl->wbfs.fq = NULL;
  4488. + child_cl->wbfs.cq = NULL;
  4489. + child_cl->wbfs.weight = qopt->qweight[i];
  4490. +
  4491. + if (priv->wbfs.group_type == WBFS_GRP_B)
  4492. + id = WBFS_GRP_B_OFFSET + i;
  4493. + else
  4494. + id = WBFS_GRP_A_OFFSET + i;
  4495. +
  4496. + err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
  4497. + priv->wbfs.group_type);
  4498. + if (err) {
  4499. + pr_err(KBUILD_BASENAME " : %s : failed to configure "
  4500. + "the ceetm wbfs class %X\n",
  4501. + __func__,
  4502. + child_cl->common.classid);
  4503. + goto err_init_wbfs_cls;
  4504. + }
  4505. +
  4506. + /* Add class handle in Qdisc */
  4507. + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
  4508. + pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X "
  4509. + "associated with CQ %d and CCG %d\n",
  4510. + __func__,
  4511. + child_cl->common.classid,
  4512. + child_cl->wbfs.cq->idx,
  4513. + child_cl->wbfs.ccg->idx);
  4514. + }
  4515. +
  4516. + /* Signal the root class that a group has been configured */
  4517. + switch (priv->wbfs.group_type) {
  4518. + case WBFS_GRP_LARGE:
  4519. + root_cl->root.wbfs_grp_large = true;
  4520. + break;
  4521. + case WBFS_GRP_A:
  4522. + root_cl->root.wbfs_grp_a = true;
  4523. + break;
  4524. + case WBFS_GRP_B:
  4525. + root_cl->root.wbfs_grp_b = true;
  4526. + break;
  4527. + }
  4528. +
  4529. + return 0;
  4530. +
  4531. +err_init_wbfs_cls:
  4532. + ceetm_cls_destroy(sch, child_cl);
  4533. +err_init_wbfs:
  4534. + ceetm_destroy(sch);
  4535. + return err;
  4536. +}
  4537. +
  4538. +/* Configure a generic ceetm qdisc */
  4539. +static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
  4540. +{
  4541. + struct tc_ceetm_qopt *qopt;
  4542. + struct nlattr *tb[TCA_CEETM_QOPS + 1];
  4543. + int ret;
  4544. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  4545. + struct net_device *dev = qdisc_dev(sch);
  4546. +
  4547. + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
  4548. +
  4549. + if (!netif_is_multiqueue(dev))
  4550. + return -EOPNOTSUPP;
  4551. +
  4552. + ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
  4553. + if (ret < 0) {
  4554. + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
  4555. + return ret;
  4556. + }
  4557. +
  4558. + if (tb[TCA_CEETM_QOPS] == NULL) {
  4559. + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
  4560. + return -EINVAL;
  4561. + }
  4562. +
  4563. + if (TC_H_MIN(sch->handle)) {
  4564. + pr_err("CEETM: a qdisc should not have a minor\n");
  4565. + return -EINVAL;
  4566. + }
  4567. +
  4568. + qopt = nla_data(tb[TCA_CEETM_QOPS]);
  4569. +
  4570. + /* Initialize the class hash list. Each qdisc has its own class hash */
  4571. + ret = qdisc_class_hash_init(&priv->clhash);
  4572. + if (ret < 0) {
  4573. + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init "
  4574. + "failed\n", __func__);
  4575. + return ret;
  4576. + }
  4577. +
  4578. + priv->type = qopt->type;
  4579. +
  4580. + switch (priv->type) {
  4581. + case CEETM_ROOT:
  4582. + ret = ceetm_init_root(sch, priv, qopt);
  4583. + break;
  4584. + case CEETM_PRIO:
  4585. + ret = ceetm_init_prio(sch, priv, qopt);
  4586. + break;
  4587. + case CEETM_WBFS:
  4588. + ret = ceetm_init_wbfs(sch, priv, qopt);
  4589. + break;
  4590. + default:
  4591. + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
  4592. + ceetm_destroy(sch);
  4593. + ret = -EINVAL;
  4594. + }
  4595. +
  4596. + return ret;
  4597. +}
  4598. +
  4599. +/* Attach the underlying pfifo qdiscs */
  4600. +static void ceetm_attach(struct Qdisc *sch)
  4601. +{
  4602. + struct net_device *dev = qdisc_dev(sch);
  4603. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  4604. + struct Qdisc *qdisc, *old_qdisc;
  4605. + unsigned int i;
  4606. +
  4607. + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
  4608. +
  4609. + for (i = 0; i < dev->num_tx_queues; i++) {
  4610. + qdisc = priv->root.qdiscs[i];
  4611. + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
  4612. + if (old_qdisc)
  4613. + qdisc_destroy(old_qdisc);
  4614. + }
  4615. +}
  4616. +
  4617. +static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
  4618. +{
  4619. + struct ceetm_class *cl;
  4620. + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
  4621. + __func__, classid, sch->handle);
  4622. + cl = ceetm_find(classid, sch);
  4623. +
  4624. + if (cl)
  4625. + cl->refcnt++; /* Will decrement in put() */
  4626. + return (unsigned long)cl;
  4627. +}
  4628. +
  4629. +static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
  4630. +{
  4631. + struct ceetm_class *cl = (struct ceetm_class *)arg;
  4632. + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
  4633. + __func__, cl->common.classid, sch->handle);
  4634. + cl->refcnt--;
  4635. +
  4636. + if (cl->refcnt == 0)
  4637. + ceetm_cls_destroy(sch, cl);
  4638. +}
  4639. +
  4640. +/* Add a ceetm root class or configure a ceetm prio class */
  4641. +static int ceetm_cls_change(struct Qdisc *sch, u32 classid,
  4642. + u32 parentid, struct nlattr **tca,
  4643. + unsigned long *arg)
  4644. +{
  4645. + int err;
  4646. + u64 bps;
  4647. + struct ceetm_qdisc *priv;
  4648. + struct ceetm_class *cl = (struct ceetm_class *)*arg;
  4649. + struct nlattr *opt = tca[TCA_OPTIONS];
  4650. + struct nlattr *tb[__TCA_CEETM_MAX];
  4651. + struct tc_ceetm_copt *copt;
  4652. + struct qm_ceetm_channel *channel;
  4653. + struct net_device *dev = qdisc_dev(sch);
  4654. +
  4655. + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
  4656. + __func__, classid, sch->handle);
  4657. +
  4658. + if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
  4659. + pr_err("CEETM: a ceetm class can not be attached to other "
  4660. + "qdisc/class types\n");
  4661. + return -EINVAL;
  4662. + }
  4663. +
  4664. + priv = qdisc_priv(sch);
  4665. +
  4666. + if (!opt) {
  4667. + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
  4668. + return -EINVAL;
  4669. + }
  4670. +
  4671. + if (!cl && sch->handle != parentid) {
  4672. + pr_err("CEETM: classes can be attached to the root ceetm "
  4673. + "qdisc only\n");
  4674. + return -EINVAL;
  4675. + }
  4676. +
  4677. + if (!cl && priv->type != CEETM_ROOT) {
  4678. + pr_err("CEETM: only root ceetm classes can be attached to the "
  4679. + "root ceetm qdisc\n");
  4680. + return -EINVAL;
  4681. + }
  4682. +
  4683. + err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
  4684. + if (err < 0) {
  4685. + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
  4686. + return -EINVAL;
  4687. + }
  4688. +
  4689. + if (tb[TCA_CEETM_COPT] == NULL) {
  4690. + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
  4691. + return -EINVAL;
  4692. + }
  4693. +
  4694. + if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
  4695. + pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm "
  4696. + "root classes\n");
  4697. + return -EINVAL;
  4698. + }
  4699. +
  4700. + copt = nla_data(tb[TCA_CEETM_COPT]);
  4701. +
  4702. + /* Configure an existing ceetm prio class */
  4703. + if (cl) {
  4704. + if (copt->type != CEETM_PRIO) {
  4705. + pr_err("CEETM: only prio ceetm classes can be changed\n");
  4706. + return -EINVAL;
  4707. + }
  4708. +
  4709. + if (!cl->shaped && (copt->cr || copt->er)) {
  4710. + pr_err("CEETM: only shaped classes can have CR and "
  4711. + "ER enabled\n");
  4712. + return -EINVAL;
  4713. + }
  4714. +
  4715. + if (cl->prio.cr != (bool)copt->cr)
  4716. + err = qman_ceetm_channel_set_cq_cr_eligibility(
  4717. + cl->prio.cq->parent,
  4718. + cl->prio.cq->idx,
  4719. + copt->cr);
  4720. +
  4721. + if (!err && cl->prio.er != (bool)copt->er)
  4722. + err = qman_ceetm_channel_set_cq_er_eligibility(
  4723. + cl->prio.cq->parent,
  4724. + cl->prio.cq->idx,
  4725. + copt->er);
  4726. +
  4727. + if (err) {
  4728. + pr_err(KBUILD_BASENAME " : %s : failed to configure "
  4729. + "the ceetm prio class %X\n",
  4730. + __func__,
  4731. + cl->common.classid);
  4732. + return err;
  4733. + }
  4734. +
  4735. + cl->prio.cr = copt->cr;
  4736. + cl->prio.er = copt->er;
  4737. + return 0;
  4738. + }
  4739. +
  4740. + /* Add a new root ceetm class */
  4741. + if (copt->type != CEETM_ROOT) {
  4742. + pr_err("CEETM: only root ceetm classes can be attached to the "
  4743. + "root ceetm qdisc\n");
  4744. + return -EINVAL;
  4745. + }
  4746. +
  4747. + if (copt->shaped && !priv->shaped) {
  4748. + pr_err("CEETM: can not add a shaped ceetm root class under an "
  4749. + "unshaped ceetm root qdisc\n");
  4750. + return -EINVAL;
  4751. + }
  4752. +
  4753. + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
  4754. + if (!cl) {
  4755. + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", __func__);
  4756. + return -ENOMEM;
  4757. + }
  4758. +
  4759. + cl->type = copt->type;
  4760. + cl->shaped = copt->shaped;
  4761. + cl->root.rate = copt->rate;
  4762. + cl->root.ceil = copt->ceil;
  4763. + cl->root.tbl = copt->tbl;
  4764. +
  4765. + cl->common.classid = classid;
  4766. + cl->refcnt = 1;
  4767. + cl->parent = sch;
  4768. + cl->root.child = NULL;
  4769. + cl->root.wbfs_grp_a = false;
  4770. + cl->root.wbfs_grp_b = false;
  4771. + cl->root.wbfs_grp_large = false;
  4772. +
  4773. + /* Claim a CEETM channel */
  4774. + err = qman_ceetm_channel_claim(&channel, priv->root.lni);
  4775. + if (err) {
  4776. + pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
  4777. + __func__);
  4778. + goto claim_err;
  4779. + }
  4780. +
  4781. + cl->root.ch = channel;
  4782. +
  4783. + if (cl->shaped) {
  4784. + /* Configure the channel shaper */
  4785. + err = qman_ceetm_channel_enable_shaper(channel, 1);
  4786. + if (err)
  4787. + goto channel_err;
  4788. +
  4789. + bps = cl->root.rate << 3; /* Bps -> bps */
  4790. + err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
  4791. + dev->mtu);
  4792. + if (err)
  4793. + goto channel_err;
  4794. +
  4795. + bps = cl->root.ceil << 3; /* Bps -> bps */
  4796. + err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
  4797. + dev->mtu);
  4798. + if (err)
  4799. + goto channel_err;
  4800. +
  4801. + } else {
  4802. + /* Configure the uFQ algorithm */
  4803. + err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
  4804. + if (err)
  4805. + goto channel_err;
  4806. + }
  4807. +
  4808. + /* Add class handle in Qdisc */
  4809. + ceetm_link_class(sch, &priv->clhash, &cl->common);
  4810. +
  4811. + pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with "
  4812. + "channel %d\n", __func__, classid, channel->idx);
  4813. + *arg = (unsigned long)cl;
  4814. + return 0;
  4815. +
  4816. +channel_err:
  4817. + pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
  4818. + __func__, channel->idx);
  4819. + if (qman_ceetm_channel_release(channel))
  4820. + pr_err(KBUILD_BASENAME " : %s : failed to release the channel "
  4821. + "%d\n", __func__, channel->idx);
  4822. +claim_err:
  4823. + if (cl) {
  4824. + kfree(cl);
  4825. + }
  4826. + return err;
  4827. +}
  4828. +
  4829. +static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  4830. +{
  4831. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  4832. + struct ceetm_class *cl;
  4833. + unsigned int i;
  4834. +
  4835. + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
  4836. +
  4837. + if (arg->stop)
  4838. + return;
  4839. +
  4840. + for (i = 0; i < priv->clhash.hashsize; i++) {
  4841. + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
  4842. + if (arg->count < arg->skip) {
  4843. + arg->count++;
  4844. + continue;
  4845. + }
  4846. + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
  4847. + arg->stop = 1;
  4848. + return;
  4849. + }
  4850. + arg->count++;
  4851. + }
  4852. + }
  4853. +}
  4854. +
  4855. +static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
  4856. + struct sk_buff *skb, struct tcmsg *tcm)
  4857. +{
  4858. + struct ceetm_class *cl = (struct ceetm_class *)arg;
  4859. + struct nlattr *nest;
  4860. + struct tc_ceetm_copt copt;
  4861. +
  4862. + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
  4863. + __func__, cl->common.classid, sch->handle);
  4864. +
  4865. + sch_tree_lock(sch);
  4866. +
  4867. + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
  4868. + tcm->tcm_handle = cl->common.classid;
  4869. +
  4870. + memset(&copt, 0, sizeof(copt));
  4871. +
  4872. + copt.shaped = cl->shaped;
  4873. + copt.type = cl->type;
  4874. +
  4875. + switch (cl->type) {
  4876. + case CEETM_ROOT:
  4877. + if (cl->root.child)
  4878. + tcm->tcm_info = cl->root.child->handle;
  4879. +
  4880. + copt.rate = cl->root.rate;
  4881. + copt.ceil = cl->root.ceil;
  4882. + copt.tbl = cl->root.tbl;
  4883. + break;
  4884. +
  4885. + case CEETM_PRIO:
  4886. + if (cl->prio.child)
  4887. + tcm->tcm_info = cl->prio.child->handle;
  4888. +
  4889. + copt.cr = cl->prio.cr;
  4890. + copt.er = cl->prio.er;
  4891. + break;
  4892. +
  4893. + case CEETM_WBFS:
  4894. + copt.weight = cl->wbfs.weight;
  4895. + break;
  4896. + }
  4897. +
  4898. + nest = nla_nest_start(skb, TCA_OPTIONS);
  4899. + if (nest == NULL)
  4900. + goto nla_put_failure;
  4901. + if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
  4902. + goto nla_put_failure;
  4903. + nla_nest_end(skb, nest);
  4904. + sch_tree_unlock(sch);
  4905. + return skb->len;
  4906. +
  4907. +nla_put_failure:
  4908. + sch_tree_unlock(sch);
  4909. + nla_nest_cancel(skb, nest);
  4910. + return -EMSGSIZE;
  4911. +}
  4912. +
  4913. +static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
  4914. +{
  4915. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  4916. + struct ceetm_class *cl = (struct ceetm_class *)arg;
  4917. +
  4918. + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
  4919. + __func__, cl->common.classid, sch->handle);
  4920. +
  4921. + sch_tree_lock(sch);
  4922. + qdisc_class_hash_remove(&priv->clhash, &cl->common);
  4923. + cl->refcnt--;
  4924. +
  4925. + /* The refcnt should be at least 1 since we have incremented it in
  4926. + get(). Will decrement again in put() where we will call destroy()
  4927. + to actually free the memory if it reaches 0. */
  4928. + BUG_ON(cl->refcnt == 0);
  4929. +
  4930. + sch_tree_unlock(sch);
  4931. + return 0;
  4932. +}
  4933. +
  4934. +/* Get the class' child qdisc, if any */
  4935. +static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
  4936. +{
  4937. + struct ceetm_class *cl = (struct ceetm_class *)arg;
  4938. +
  4939. + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
  4940. + __func__, cl->common.classid, sch->handle);
  4941. +
  4942. + switch (cl->type) {
  4943. + case CEETM_ROOT:
  4944. + return cl->root.child;
  4945. + break;
  4946. +
  4947. + case CEETM_PRIO:
  4948. + return cl->prio.child;
  4949. + break;
  4950. + }
  4951. +
  4952. + return NULL;
  4953. +}
  4954. +
  4955. +static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
  4956. + struct Qdisc *new, struct Qdisc **old)
  4957. +{
  4958. + if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
  4959. + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm "
  4960. + "classes\n");
  4961. + return -EOPNOTSUPP;
  4962. + }
  4963. +
  4964. + return 0;
  4965. +}
  4966. +
  4967. +static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
  4968. + struct gnet_dump *d)
  4969. +{
  4970. + unsigned int i;
  4971. + struct ceetm_class *cl = (struct ceetm_class *)arg;
  4972. + struct gnet_stats_basic_packed tmp_bstats;
  4973. + struct ceetm_class_stats *cstats = NULL;
  4974. + struct qm_ceetm_cq *cq = NULL;
  4975. + struct tc_ceetm_xstats xstats;
  4976. +
  4977. + memset(&xstats, 0, sizeof(xstats));
  4978. + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
  4979. +
  4980. + switch (cl->type) {
  4981. + case CEETM_ROOT:
  4982. + return 0;
  4983. + case CEETM_PRIO:
  4984. + cq = cl->prio.cq;
  4985. + break;
  4986. + case CEETM_WBFS:
  4987. + cq = cl->wbfs.cq;
  4988. + break;
  4989. + }
  4990. +
  4991. + for_each_online_cpu(i) {
  4992. + switch (cl->type) {
  4993. + case CEETM_PRIO:
  4994. + cstats = per_cpu_ptr(cl->prio.cstats, i);
  4995. + break;
  4996. + case CEETM_WBFS:
  4997. + cstats = per_cpu_ptr(cl->wbfs.cstats, i);
  4998. + break;
  4999. + }
  5000. +
  5001. + if (cstats) {
  5002. + xstats.ern_drop_count += cstats->ern_drop_count;
  5003. + xstats.cgr_congested_count += cstats->cgr_congested_count;
  5004. + tmp_bstats.bytes += cstats->bstats.bytes;
  5005. + tmp_bstats.packets += cstats->bstats.packets;
  5006. + }
  5007. + }
  5008. +
  5009. + if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0)
  5010. + return -1;
  5011. +
  5012. + if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
  5013. + &xstats.frame_count, &xstats.byte_count))
  5014. + return -1;
  5015. +
  5016. + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  5017. +}
  5018. +
  5019. +static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
  5020. +{
  5021. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  5022. + struct ceetm_class *cl = (struct ceetm_class *)arg;
  5023. + struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
  5024. +
  5025. + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
  5026. + cl ? cl->common.classid : 0, sch->handle);
  5027. + return fl;
  5028. +}
  5029. +
  5030. +static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
  5031. + u32 classid)
  5032. +{
  5033. + struct ceetm_class *cl = ceetm_find(classid, sch);
  5034. + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
  5035. + cl ? cl->common.classid : 0, sch->handle);
  5036. + return (unsigned long)cl;
  5037. +}
  5038. +
  5039. +static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
  5040. +{
  5041. + struct ceetm_class *cl = (struct ceetm_class *)arg;
  5042. + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
  5043. + cl ? cl->common.classid : 0, sch->handle);
  5044. +}
  5045. +
  5046. +const struct Qdisc_class_ops ceetm_cls_ops = {
  5047. + .graft = ceetm_cls_graft,
  5048. + .leaf = ceetm_cls_leaf,
  5049. + .get = ceetm_cls_get,
  5050. + .put = ceetm_cls_put,
  5051. + .change = ceetm_cls_change,
  5052. + .delete = ceetm_cls_delete,
  5053. + .walk = ceetm_cls_walk,
  5054. + .tcf_chain = ceetm_tcf_chain,
  5055. + .bind_tcf = ceetm_tcf_bind,
  5056. + .unbind_tcf = ceetm_tcf_unbind,
  5057. + .dump = ceetm_cls_dump,
  5058. + .dump_stats = ceetm_cls_dump_stats,
  5059. +};
  5060. +
  5061. +struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
  5062. + .id = "ceetm",
  5063. + .priv_size = sizeof(struct ceetm_qdisc),
  5064. + .cl_ops = &ceetm_cls_ops,
  5065. + .init = ceetm_init,
  5066. + .destroy = ceetm_destroy,
  5067. + .dump = ceetm_dump,
  5068. + .attach = ceetm_attach,
  5069. + .owner = THIS_MODULE,
  5070. +};
  5071. +
  5072. +/* Run the filters and classifiers attached to the qdisc on the provided skb */
  5073. +static struct ceetm_class *ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
  5074. + int *qerr, bool *act_drop)
  5075. +{
  5076. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  5077. + struct ceetm_class *cl = NULL, *wbfs_cl;
  5078. + struct tcf_result res;
  5079. + struct tcf_proto *tcf;
  5080. + int result;
  5081. +
  5082. + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  5083. + tcf = priv->filter_list;
  5084. + while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
  5085. +#ifdef CONFIG_NET_CLS_ACT
  5086. + switch (result) {
  5087. + case TC_ACT_QUEUED:
  5088. + case TC_ACT_STOLEN:
  5089. + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  5090. + case TC_ACT_SHOT:
  5091. + /* No valid class found due to action */
  5092. + *act_drop = true;
  5093. + return NULL;
  5094. + }
  5095. +#endif
  5096. + cl = (void *)res.class;
  5097. + if (!cl) {
  5098. + if (res.classid == sch->handle) {
  5099. + /* The filter leads to the qdisc */
  5100. + /* TODO default qdisc */
  5101. + return NULL;
  5102. + }
  5103. +
  5104. + cl = ceetm_find(res.classid, sch);
  5105. + if (!cl)
  5106. + /* The filter leads to an invalid class */
  5107. + break;
  5108. + }
  5109. +
  5110. + /* The class might have its own filters attached */
  5111. + tcf = cl->filter_list;
  5112. + }
  5113. +
  5114. + if (!cl) {
  5115. + /* No valid class found */
  5116. + /* TODO default qdisc */
  5117. + return NULL;
  5118. + }
  5119. +
  5120. + switch (cl->type) {
  5121. + case CEETM_ROOT:
  5122. + if (cl->root.child) {
  5123. + /* Run the prio qdisc classifiers */
  5124. + return ceetm_classify(skb, cl->root.child, qerr,
  5125. + act_drop);
  5126. + } else {
  5127. + /* The root class does not have a child prio qdisc */
  5128. + /* TODO default qdisc */
  5129. + return NULL;
  5130. + }
  5131. + case CEETM_PRIO:
  5132. + if (cl->prio.child) {
  5133. + /* If filters lead to a wbfs class, return it.
  5134. + * Otherwise, return the prio class */
  5135. + wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
  5136. + act_drop);
  5137. + /* A NULL result might indicate either an erroneous
  5138. + * filter, or no filters at all. We will assume the
  5139. + * latter */
  5140. + return wbfs_cl ? : cl;
  5141. + }
  5142. + }
  5143. +
  5144. + /* For wbfs and childless prio classes, return the class directly */
  5145. + return cl;
  5146. +}
  5147. +
  5148. +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
  5149. +{
  5150. + int ret;
  5151. + bool act_drop = false;
  5152. + struct Qdisc *sch = net_dev->qdisc;
  5153. + struct ceetm_class *cl;
  5154. + struct dpa_priv_s *priv_dpa;
  5155. + struct qman_fq *egress_fq, *conf_fq;
  5156. + struct ceetm_qdisc *priv = qdisc_priv(sch);
  5157. + struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
  5158. + struct ceetm_class_stats *cstats;
  5159. + const int queue_mapping = dpa_get_queue_mapping(skb);
  5160. + spinlock_t *root_lock = qdisc_lock(sch);
  5161. +
  5162. + spin_lock(root_lock);
  5163. + cl = ceetm_classify(skb, sch, &ret, &act_drop);
  5164. + spin_unlock(root_lock);
  5165. +
  5166. +#ifdef CONFIG_NET_CLS_ACT
  5167. + if (act_drop) {
  5168. + if (ret & __NET_XMIT_BYPASS)
  5169. + qstats->drops++;
  5170. + goto drop;
  5171. + }
  5172. +#endif
  5173. + /* TODO default class */
  5174. + if (unlikely(!cl)) {
  5175. + qstats->drops++;
  5176. + goto drop;
  5177. + }
  5178. +
  5179. + priv_dpa = netdev_priv(net_dev);
  5180. + conf_fq = priv_dpa->conf_fqs[queue_mapping];
  5181. +
  5182. + /* Choose the proper tx fq and update the basic stats (bytes and
  5183. + * packets sent by the class) */
  5184. + switch (cl->type) {
  5185. + case CEETM_PRIO:
  5186. + egress_fq = &(cl->prio.fq->fq);
  5187. + cstats = this_cpu_ptr(cl->prio.cstats);
  5188. + break;
  5189. + case CEETM_WBFS:
  5190. + egress_fq = &(cl->wbfs.fq->fq);
  5191. + cstats = this_cpu_ptr(cl->wbfs.cstats);
  5192. + break;
  5193. + default:
  5194. + qstats->drops++;
  5195. + goto drop;
  5196. + }
  5197. +
  5198. + bstats_update(&cstats->bstats, skb);
  5199. + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
  5200. +
  5201. +drop:
  5202. + dev_kfree_skb_any(skb);
  5203. + return NET_XMIT_SUCCESS;
  5204. +}
  5205. +
  5206. +static int __init ceetm_register(void)
  5207. +{
  5208. + int _errno = 0;
  5209. +
  5210. + pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
  5211. +
  5212. + _errno = register_qdisc(&ceetm_qdisc_ops);
  5213. + if (unlikely(_errno))
  5214. + pr_err(KBUILD_MODNAME
  5215. + ": %s:%hu:%s(): register_qdisc() = %d\n",
  5216. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  5217. +
  5218. + return _errno;
  5219. +}
  5220. +
  5221. +static void __exit ceetm_unregister(void)
  5222. +{
  5223. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  5224. + KBUILD_BASENAME".c", __func__);
  5225. +
  5226. + unregister_qdisc(&ceetm_qdisc_ops);
  5227. +}
  5228. +
  5229. +module_init(ceetm_register);
  5230. +module_exit(ceetm_unregister);
  5231. --- /dev/null
  5232. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
  5233. @@ -0,0 +1,230 @@
  5234. +/* Copyright 2008-2016 Freescale Semiconductor Inc.
  5235. + *
  5236. + * Redistribution and use in source and binary forms, with or without
  5237. + * modification, are permitted provided that the following conditions are met:
  5238. + * * Redistributions of source code must retain the above copyright
  5239. + * notice, this list of conditions and the following disclaimer.
  5240. + * * Redistributions in binary form must reproduce the above copyright
  5241. + * notice, this list of conditions and the following disclaimer in the
  5242. + * documentation and/or other materials provided with the distribution.
  5243. + * * Neither the name of Freescale Semiconductor nor the
  5244. + * names of its contributors may be used to endorse or promote products
  5245. + * derived from this software without specific prior written permission.
  5246. + *
  5247. + *
  5248. + * ALTERNATIVELY, this software may be distributed under the terms of the
  5249. + * GNU General Public License ("GPL") as published by the Free Software
  5250. + * Foundation, either version 2 of that License or (at your option) any
  5251. + * later version.
  5252. + *
  5253. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  5254. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  5255. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  5256. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  5257. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  5258. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  5259. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  5260. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5261. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  5262. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5263. + */
  5264. +
  5265. +#ifndef __DPAA_ETH_CEETM_H
  5266. +#define __DPAA_ETH_CEETM_H
  5267. +
  5268. +#include <net/pkt_sched.h>
  5269. +#include <net/netlink.h>
  5270. +#include <lnxwrp_fm.h>
  5271. +
  5272. +#include "mac.h"
  5273. +#include "dpaa_eth_common.h"
  5274. +
  5275. +/* Mask to determine the sub-portal id from a channel number */
  5276. +#define CHANNEL_SP_MASK 0x1f
  5277. +/* The number of the last channel that services DCP0, connected to FMan 0.
  5278. + * Value validated for B4 and T series platforms.
  5279. + */
  5280. +#define DCP0_MAX_CHANNEL 0x80f
  5281. +/* A2V=1 - field A2 is valid
  5282. + * A0V=1 - field A0 is valid - enables frame confirmation
  5283. + * OVOM=1 - override operation mode bits with values from A2
  5284. + * EBD=1 - external buffers are deallocated at the end of the FMan flow
  5285. + * NL=0 - the BMI releases all the internal buffers
  5286. + */
  5287. +#define CEETM_CONTEXT_A 0x1a00000080000000
  5288. +
  5289. +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
  5290. + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
  5291. + * are reserved for the maximum 32 CEETM channels (majors and minors are in
  5292. + * hex).
  5293. + */
  5294. +#define PFIFO_MIN_OFFSET 0x21
  5295. +
  5296. +/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
  5297. +#define CEETM_MAX_PRIO_QCOUNT 8
  5298. +#define CEETM_MAX_WBFS_QCOUNT 8
  5299. +#define CEETM_MIN_WBFS_QCOUNT 4
  5300. +
  5301. +/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
  5302. + * and/or 12-15 for group B).
  5303. + */
  5304. +#define WBFS_GRP_A_OFFSET 8
  5305. +#define WBFS_GRP_B_OFFSET 12
  5306. +
  5307. +#define WBFS_GRP_A 1
  5308. +#define WBFS_GRP_B 2
  5309. +#define WBFS_GRP_LARGE 3
  5310. +
  5311. +enum {
  5312. + TCA_CEETM_UNSPEC,
  5313. + TCA_CEETM_COPT,
  5314. + TCA_CEETM_QOPS,
  5315. + __TCA_CEETM_MAX,
  5316. +};
  5317. +
  5318. +/* CEETM configuration types */
  5319. +enum {
  5320. + CEETM_ROOT = 1,
  5321. + CEETM_PRIO,
  5322. + CEETM_WBFS
  5323. +};
  5324. +
  5325. +#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
  5326. +extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
  5327. +
  5328. +struct ceetm_class;
  5329. +struct ceetm_qdisc_stats;
  5330. +struct ceetm_class_stats;
  5331. +
  5332. +struct ceetm_fq {
  5333. + struct qman_fq fq;
  5334. + struct net_device *net_dev;
  5335. + struct ceetm_class *ceetm_cls;
  5336. +};
  5337. +
  5338. +struct root_q {
  5339. + struct Qdisc **qdiscs;
  5340. + __u16 overhead;
  5341. + __u32 rate;
  5342. + __u32 ceil;
  5343. + struct qm_ceetm_sp *sp;
  5344. + struct qm_ceetm_lni *lni;
  5345. + struct ceetm_qdisc_stats __percpu *qstats;
  5346. +};
  5347. +
  5348. +struct prio_q {
  5349. + __u16 qcount;
  5350. + struct ceetm_class *parent;
  5351. +};
  5352. +
  5353. +struct wbfs_q {
  5354. + __u16 qcount;
  5355. + int group_type;
  5356. + struct ceetm_class *parent;
  5357. + __u16 cr;
  5358. + __u16 er;
  5359. +};
  5360. +
  5361. +struct ceetm_qdisc {
  5362. + int type; /* LNI/CHNL/WBFS */
  5363. + bool shaped;
  5364. + union {
  5365. + struct root_q root;
  5366. + struct prio_q prio;
  5367. + struct wbfs_q wbfs;
  5368. + };
  5369. + struct Qdisc_class_hash clhash;
  5370. + struct tcf_proto *filter_list; /* qdisc attached filters */
  5371. +};
  5372. +
  5373. +/* CEETM Qdisc configuration parameters */
  5374. +struct tc_ceetm_qopt {
  5375. + __u32 type;
  5376. + __u16 shaped;
  5377. + __u16 qcount;
  5378. + __u16 overhead;
  5379. + __u32 rate;
  5380. + __u32 ceil;
  5381. + __u16 cr;
  5382. + __u16 er;
  5383. + __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
  5384. +};
  5385. +
  5386. +struct root_c {
  5387. + unsigned int rate;
  5388. + unsigned int ceil;
  5389. + unsigned int tbl;
  5390. + bool wbfs_grp_a;
  5391. + bool wbfs_grp_b;
  5392. + bool wbfs_grp_large;
  5393. + struct Qdisc *child;
  5394. + struct qm_ceetm_channel *ch;
  5395. +};
  5396. +
  5397. +struct prio_c {
  5398. + bool cr;
  5399. + bool er;
  5400. + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
  5401. + struct qm_ceetm_lfq *lfq;
  5402. + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
  5403. + struct qm_ceetm_ccg *ccg;
  5404. + /* only one wbfs can be linked to one priority CQ */
  5405. + struct Qdisc *child;
  5406. + struct ceetm_class_stats __percpu *cstats;
  5407. +};
  5408. +
  5409. +struct wbfs_c {
  5410. + __u8 weight; /* The weight of the class between 1 and 248 */
  5411. + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
  5412. + struct qm_ceetm_lfq *lfq;
  5413. + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
  5414. + struct qm_ceetm_ccg *ccg;
  5415. + struct ceetm_class_stats __percpu *cstats;
  5416. +};
  5417. +
  5418. +struct ceetm_class {
  5419. + struct Qdisc_class_common common;
  5420. + int refcnt; /* usage count of this class */
  5421. + struct tcf_proto *filter_list; /* class attached filters */
  5422. + struct Qdisc *parent;
  5423. + bool shaped;
  5424. + int type; /* ROOT/PRIO/WBFS */
  5425. + union {
  5426. + struct root_c root;
  5427. + struct prio_c prio;
  5428. + struct wbfs_c wbfs;
  5429. + };
  5430. +};
  5431. +
  5432. +/* CEETM Class configuration parameters */
  5433. +struct tc_ceetm_copt {
  5434. + __u32 type;
  5435. + __u16 shaped;
  5436. + __u32 rate;
  5437. + __u32 ceil;
  5438. + __u16 tbl;
  5439. + __u16 cr;
  5440. + __u16 er;
  5441. + __u8 weight;
  5442. +};
  5443. +
  5444. +/* CEETM stats */
  5445. +struct ceetm_qdisc_stats {
  5446. + __u32 drops;
  5447. +};
  5448. +
  5449. +struct ceetm_class_stats {
  5450. + struct gnet_stats_basic_packed bstats;
  5451. + __u32 ern_drop_count;
  5452. + __u32 cgr_congested_count;
  5453. +};
  5454. +
  5455. +struct tc_ceetm_xstats {
  5456. + __u32 ern_drop_count;
  5457. + __u32 cgr_congested_count;
  5458. + __u64 frame_count;
  5459. + __u64 byte_count;
  5460. +};
  5461. +
  5462. +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
  5463. +#endif
  5464. --- /dev/null
  5465. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
  5466. @@ -0,0 +1,1787 @@
  5467. +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
  5468. + *
  5469. + * Redistribution and use in source and binary forms, with or without
  5470. + * modification, are permitted provided that the following conditions are met:
  5471. + * * Redistributions of source code must retain the above copyright
  5472. + * notice, this list of conditions and the following disclaimer.
  5473. + * * Redistributions in binary form must reproduce the above copyright
  5474. + * notice, this list of conditions and the following disclaimer in the
  5475. + * documentation and/or other materials provided with the distribution.
  5476. + * * Neither the name of Freescale Semiconductor nor the
  5477. + * names of its contributors may be used to endorse or promote products
  5478. + * derived from this software without specific prior written permission.
  5479. + *
  5480. + *
  5481. + * ALTERNATIVELY, this software may be distributed under the terms of the
  5482. + * GNU General Public License ("GPL") as published by the Free Software
  5483. + * Foundation, either version 2 of that License or (at your option) any
  5484. + * later version.
  5485. + *
  5486. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  5487. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  5488. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  5489. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  5490. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  5491. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  5492. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  5493. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5494. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  5495. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5496. + */
  5497. +
  5498. +#include <linux/init.h>
  5499. +#include <linux/module.h>
  5500. +#include <linux/of_platform.h>
  5501. +#include <linux/of_net.h>
  5502. +#include <linux/etherdevice.h>
  5503. +#include <linux/kthread.h>
  5504. +#include <linux/percpu.h>
  5505. +#include <linux/highmem.h>
  5506. +#include <linux/sort.h>
  5507. +#include <linux/fsl_qman.h>
  5508. +#include <linux/ip.h>
  5509. +#include <linux/ipv6.h>
  5510. +#include <linux/if_vlan.h> /* vlan_eth_hdr */
  5511. +#include "dpaa_eth.h"
  5512. +#include "dpaa_eth_common.h"
  5513. +#ifdef CONFIG_FSL_DPAA_1588
  5514. +#include "dpaa_1588.h"
  5515. +#endif
  5516. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  5517. +#include "dpaa_debugfs.h"
  5518. +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
  5519. +#include "mac.h"
  5520. +
  5521. +/* DPAA platforms benefit from hardware-assisted queue management */
  5522. +#define DPA_NETIF_FEATURES NETIF_F_HW_ACCEL_MQ
  5523. +
  5524. +/* Size in bytes of the FQ taildrop threshold */
  5525. +#define DPA_FQ_TD 0x200000
  5526. +
  5527. +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
  5528. +struct ptp_priv_s ptp_priv;
  5529. +#endif
  5530. +
  5531. +static struct dpa_bp *dpa_bp_array[64];
  5532. +
  5533. +int dpa_max_frm;
  5534. +EXPORT_SYMBOL(dpa_max_frm);
  5535. +
  5536. +int dpa_rx_extra_headroom;
  5537. +EXPORT_SYMBOL(dpa_rx_extra_headroom);
  5538. +
  5539. +int dpa_num_cpus = NR_CPUS;
  5540. +
  5541. +static const struct fqid_cell tx_confirm_fqids[] = {
  5542. + {0, DPAA_ETH_TX_QUEUES}
  5543. +};
  5544. +
  5545. +static struct fqid_cell default_fqids[][3] = {
  5546. + [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
  5547. + [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
  5548. +};
  5549. +
  5550. +static const char fsl_qman_frame_queues[][25] = {
  5551. + [RX] = "fsl,qman-frame-queues-rx",
  5552. + [TX] = "fsl,qman-frame-queues-tx"
  5553. +};
  5554. +#ifdef CONFIG_FSL_DPAA_HOOKS
  5555. +/* A set of callbacks for hooking into the fastpath at different points. */
  5556. +struct dpaa_eth_hooks_s dpaa_eth_hooks;
  5557. +EXPORT_SYMBOL(dpaa_eth_hooks);
  5558. +/* This function should only be called on the probe paths, since it makes no
  5559. + * effort to guarantee consistency of the destination hooks structure.
  5560. + */
  5561. +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
  5562. +{
  5563. + if (hooks)
  5564. + dpaa_eth_hooks = *hooks;
  5565. + else
  5566. + pr_err("NULL pointer to hooks!\n");
  5567. +}
  5568. +EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
  5569. +#endif
  5570. +
  5571. +int dpa_netdev_init(struct net_device *net_dev,
  5572. + const uint8_t *mac_addr,
  5573. + uint16_t tx_timeout)
  5574. +{
  5575. + int err;
  5576. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  5577. + struct device *dev = net_dev->dev.parent;
  5578. +
  5579. + net_dev->hw_features |= DPA_NETIF_FEATURES;
  5580. +
  5581. + net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
  5582. +
  5583. + net_dev->features |= net_dev->hw_features;
  5584. + net_dev->vlan_features = net_dev->features;
  5585. +
  5586. + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
  5587. + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
  5588. +
  5589. + net_dev->ethtool_ops = &dpa_ethtool_ops;
  5590. +
  5591. + net_dev->needed_headroom = priv->tx_headroom;
  5592. + net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
  5593. +
  5594. + err = register_netdev(net_dev);
  5595. + if (err < 0) {
  5596. + dev_err(dev, "register_netdev() = %d\n", err);
  5597. + return err;
  5598. + }
  5599. +
  5600. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  5601. + /* create debugfs entry for this net_device */
  5602. + err = dpa_netdev_debugfs_create(net_dev);
  5603. + if (err) {
  5604. + unregister_netdev(net_dev);
  5605. + return err;
  5606. + }
  5607. +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
  5608. +
  5609. + return 0;
  5610. +}
  5611. +EXPORT_SYMBOL(dpa_netdev_init);
  5612. +
  5613. +int __cold dpa_start(struct net_device *net_dev)
  5614. +{
  5615. + int err, i;
  5616. + struct dpa_priv_s *priv;
  5617. + struct mac_device *mac_dev;
  5618. +
  5619. + priv = netdev_priv(net_dev);
  5620. + mac_dev = priv->mac_dev;
  5621. +
  5622. + err = mac_dev->init_phy(net_dev, priv->mac_dev);
  5623. + if (err < 0) {
  5624. + if (netif_msg_ifup(priv))
  5625. + netdev_err(net_dev, "init_phy() = %d\n", err);
  5626. + return err;
  5627. + }
  5628. +
  5629. + for_each_port_device(i, mac_dev->port_dev) {
  5630. + err = fm_port_enable(mac_dev->port_dev[i]);
  5631. + if (err)
  5632. + goto mac_start_failed;
  5633. + }
  5634. +
  5635. + err = priv->mac_dev->start(mac_dev);
  5636. + if (err < 0) {
  5637. + if (netif_msg_ifup(priv))
  5638. + netdev_err(net_dev, "mac_dev->start() = %d\n", err);
  5639. + goto mac_start_failed;
  5640. + }
  5641. +
  5642. + netif_tx_start_all_queues(net_dev);
  5643. +
  5644. + return 0;
  5645. +
  5646. +mac_start_failed:
  5647. + for_each_port_device(i, mac_dev->port_dev)
  5648. + fm_port_disable(mac_dev->port_dev[i]);
  5649. +
  5650. + return err;
  5651. +}
  5652. +EXPORT_SYMBOL(dpa_start);
  5653. +
  5654. +int __cold dpa_stop(struct net_device *net_dev)
  5655. +{
  5656. + int _errno, i, err;
  5657. + struct dpa_priv_s *priv;
  5658. + struct mac_device *mac_dev;
  5659. +
  5660. + priv = netdev_priv(net_dev);
  5661. + mac_dev = priv->mac_dev;
  5662. +
  5663. + netif_tx_stop_all_queues(net_dev);
  5664. + /* Allow the Fman (Tx) port to process in-flight frames before we
  5665. + * try switching it off.
  5666. + */
  5667. + usleep_range(5000, 10000);
  5668. +
  5669. + _errno = mac_dev->stop(mac_dev);
  5670. + if (unlikely(_errno < 0))
  5671. + if (netif_msg_ifdown(priv))
  5672. + netdev_err(net_dev, "mac_dev->stop() = %d\n",
  5673. + _errno);
  5674. +
  5675. + for_each_port_device(i, mac_dev->port_dev) {
  5676. + err = fm_port_disable(mac_dev->port_dev[i]);
  5677. + _errno = err ? err : _errno;
  5678. + }
  5679. +
  5680. + if (mac_dev->phy_dev)
  5681. + phy_disconnect(mac_dev->phy_dev);
  5682. + mac_dev->phy_dev = NULL;
  5683. +
  5684. + return _errno;
  5685. +}
  5686. +EXPORT_SYMBOL(dpa_stop);
  5687. +
  5688. +void __cold dpa_timeout(struct net_device *net_dev)
  5689. +{
  5690. + const struct dpa_priv_s *priv;
  5691. + struct dpa_percpu_priv_s *percpu_priv;
  5692. +
  5693. + priv = netdev_priv(net_dev);
  5694. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  5695. +
  5696. + if (netif_msg_timer(priv))
  5697. + netdev_crit(net_dev, "Transmit timeout latency: %u ms\n",
  5698. + jiffies_to_msecs(jiffies - net_dev->trans_start));
  5699. +
  5700. + percpu_priv->stats.tx_errors++;
  5701. +}
  5702. +EXPORT_SYMBOL(dpa_timeout);
  5703. +
  5704. +/* net_device */
  5705. +
  5706. +/**
  5707. + * @param net_dev the device for which statistics are calculated
  5708. + * @param stats the function fills this structure with the device's statistics
  5709. + * @return the address of the structure containing the statistics
  5710. + *
  5711. + * Calculates the statistics for the given device by adding the statistics
  5712. + * collected by each CPU.
  5713. + */
  5714. +struct rtnl_link_stats64 * __cold
  5715. +dpa_get_stats64(struct net_device *net_dev,
  5716. + struct rtnl_link_stats64 *stats)
  5717. +{
  5718. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  5719. + u64 *cpustats;
  5720. + u64 *netstats = (u64 *)stats;
  5721. + int i, j;
  5722. + struct dpa_percpu_priv_s *percpu_priv;
  5723. + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
  5724. +
  5725. + for_each_possible_cpu(i) {
  5726. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  5727. +
  5728. + cpustats = (u64 *)&percpu_priv->stats;
  5729. +
  5730. + for (j = 0; j < numstats; j++)
  5731. + netstats[j] += cpustats[j];
  5732. + }
  5733. +
  5734. + return stats;
  5735. +}
  5736. +EXPORT_SYMBOL(dpa_get_stats64);
  5737. +
  5738. +int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
  5739. +{
  5740. + const int max_mtu = dpa_get_max_mtu();
  5741. +
  5742. + /* Make sure we don't exceed the Ethernet controller's MAXFRM */
  5743. + if (new_mtu < 68 || new_mtu > max_mtu) {
  5744. + netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
  5745. + new_mtu, 68, max_mtu);
  5746. + return -EINVAL;
  5747. + }
  5748. + net_dev->mtu = new_mtu;
  5749. +
  5750. + return 0;
  5751. +}
  5752. +EXPORT_SYMBOL(dpa_change_mtu);
  5753. +
  5754. +/* .ndo_init callback */
  5755. +int dpa_ndo_init(struct net_device *net_dev)
  5756. +{
  5757. + /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
  5758. + * we choose conservatively and let the user explicitly set a higher
  5759. + * MTU via ifconfig. Otherwise, the user may end up with different MTUs
  5760. + * in the same LAN.
  5761. + * If on the other hand fsl_fm_max_frm has been chosen below 1500,
  5762. + * start with the maximum allowed.
  5763. + */
  5764. + int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
  5765. +
  5766. + pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
  5767. + net_dev->mtu = init_mtu;
  5768. +
  5769. + return 0;
  5770. +}
  5771. +EXPORT_SYMBOL(dpa_ndo_init);
  5772. +
  5773. +int dpa_set_features(struct net_device *dev, netdev_features_t features)
  5774. +{
  5775. + /* Not much to do here for now */
  5776. + dev->features = features;
  5777. + return 0;
  5778. +}
  5779. +EXPORT_SYMBOL(dpa_set_features);
  5780. +
  5781. +netdev_features_t dpa_fix_features(struct net_device *dev,
  5782. + netdev_features_t features)
  5783. +{
  5784. + netdev_features_t unsupported_features = 0;
  5785. +
  5786. + /* In theory we should never be requested to enable features that
  5787. + * we didn't set in netdev->features and netdev->hw_features at probe
  5788. + * time, but double check just to be on the safe side.
  5789. + * We don't support enabling Rx csum through ethtool yet
  5790. + */
  5791. + unsupported_features |= NETIF_F_RXCSUM;
  5792. +
  5793. + features &= ~unsupported_features;
  5794. +
  5795. + return features;
  5796. +}
  5797. +EXPORT_SYMBOL(dpa_fix_features);
  5798. +
  5799. +#ifdef CONFIG_FSL_DPAA_TS
  5800. +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
  5801. + const void *data)
  5802. +{
  5803. + u64 *ts, ns;
  5804. +
  5805. + ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
  5806. + data);
  5807. +
  5808. + if (!ts || *ts == 0)
  5809. + return 0;
  5810. +
  5811. + be64_to_cpus(ts);
  5812. +
  5813. + /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
  5814. + ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
  5815. +
  5816. + return ns;
  5817. +}
  5818. +
  5819. +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
  5820. + struct skb_shared_hwtstamps *shhwtstamps, const void *data)
  5821. +{
  5822. + u64 ns;
  5823. +
  5824. + ns = dpa_get_timestamp_ns(priv, rx_tx, data);
  5825. +
  5826. + if (ns == 0)
  5827. + return -EINVAL;
  5828. +
  5829. + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  5830. + shhwtstamps->hwtstamp = ns_to_ktime(ns);
  5831. +
  5832. + return 0;
  5833. +}
  5834. +
  5835. +static void dpa_ts_tx_enable(struct net_device *dev)
  5836. +{
  5837. + struct dpa_priv_s *priv = netdev_priv(dev);
  5838. + struct mac_device *mac_dev = priv->mac_dev;
  5839. +
  5840. + if (mac_dev->fm_rtc_enable)
  5841. + mac_dev->fm_rtc_enable(get_fm_handle(dev));
  5842. + if (mac_dev->ptp_enable)
  5843. + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
  5844. +
  5845. + priv->ts_tx_en = true;
  5846. +}
  5847. +
  5848. +static void dpa_ts_tx_disable(struct net_device *dev)
  5849. +{
  5850. + struct dpa_priv_s *priv = netdev_priv(dev);
  5851. +
  5852. +#if 0
  5853. +/* the RTC might be needed by the Rx Ts, cannot disable here
  5854. + * no separate ptp_disable API for Rx/Tx, cannot disable here
  5855. + */
  5856. + struct mac_device *mac_dev = priv->mac_dev;
  5857. +
  5858. + if (mac_dev->fm_rtc_disable)
  5859. + mac_dev->fm_rtc_disable(get_fm_handle(dev));
  5860. +
  5861. + if (mac_dev->ptp_disable)
  5862. + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
  5863. +#endif
  5864. +
  5865. + priv->ts_tx_en = false;
  5866. +}
  5867. +
  5868. +static void dpa_ts_rx_enable(struct net_device *dev)
  5869. +{
  5870. + struct dpa_priv_s *priv = netdev_priv(dev);
  5871. + struct mac_device *mac_dev = priv->mac_dev;
  5872. +
  5873. + if (mac_dev->fm_rtc_enable)
  5874. + mac_dev->fm_rtc_enable(get_fm_handle(dev));
  5875. + if (mac_dev->ptp_enable)
  5876. + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
  5877. +
  5878. + priv->ts_rx_en = true;
  5879. +}
  5880. +
  5881. +static void dpa_ts_rx_disable(struct net_device *dev)
  5882. +{
  5883. + struct dpa_priv_s *priv = netdev_priv(dev);
  5884. +
  5885. +#if 0
  5886. +/* the RTC might be needed by the Tx Ts, cannot disable here
  5887. + * no separate ptp_disable API for Rx/Tx, cannot disable here
  5888. + */
  5889. + struct mac_device *mac_dev = priv->mac_dev;
  5890. +
  5891. + if (mac_dev->fm_rtc_disable)
  5892. + mac_dev->fm_rtc_disable(get_fm_handle(dev));
  5893. +
  5894. + if (mac_dev->ptp_disable)
  5895. + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
  5896. +#endif
  5897. +
  5898. + priv->ts_rx_en = false;
  5899. +}
  5900. +
  5901. +static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  5902. +{
  5903. + struct hwtstamp_config config;
  5904. +
  5905. + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
  5906. + return -EFAULT;
  5907. +
  5908. + switch (config.tx_type) {
  5909. + case HWTSTAMP_TX_OFF:
  5910. + dpa_ts_tx_disable(dev);
  5911. + break;
  5912. + case HWTSTAMP_TX_ON:
  5913. + dpa_ts_tx_enable(dev);
  5914. + break;
  5915. + default:
  5916. + return -ERANGE;
  5917. + }
  5918. +
  5919. + if (config.rx_filter == HWTSTAMP_FILTER_NONE)
  5920. + dpa_ts_rx_disable(dev);
  5921. + else {
  5922. + dpa_ts_rx_enable(dev);
  5923. + /* TS is set for all frame types, not only those requested */
  5924. + config.rx_filter = HWTSTAMP_FILTER_ALL;
  5925. + }
  5926. +
  5927. + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
  5928. + -EFAULT : 0;
  5929. +}
  5930. +#endif /* CONFIG_FSL_DPAA_TS */
  5931. +
  5932. +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  5933. +{
  5934. +#ifdef CONFIG_FSL_DPAA_1588
  5935. + struct dpa_priv_s *priv = netdev_priv(dev);
  5936. +#endif
  5937. + int ret = 0;
  5938. +
  5939. + /* at least one timestamping feature must be enabled */
  5940. +#ifdef CONFIG_FSL_DPAA_TS
  5941. + if (!netif_running(dev))
  5942. +#endif
  5943. + return -EINVAL;
  5944. +
  5945. +#ifdef CONFIG_FSL_DPAA_TS
  5946. + if (cmd == SIOCSHWTSTAMP)
  5947. + return dpa_ts_ioctl(dev, rq, cmd);
  5948. +#endif /* CONFIG_FSL_DPAA_TS */
  5949. +
  5950. +#ifdef CONFIG_FSL_DPAA_1588
  5951. + if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
  5952. + if (priv->tsu && priv->tsu->valid)
  5953. + ret = dpa_ioctl_1588(dev, rq, cmd);
  5954. + else
  5955. + ret = -ENODEV;
  5956. + }
  5957. +#endif
  5958. +
  5959. + return ret;
  5960. +}
  5961. +EXPORT_SYMBOL(dpa_ioctl);
  5962. +
  5963. +int __cold dpa_remove(struct platform_device *of_dev)
  5964. +{
  5965. + int err;
  5966. + struct device *dev;
  5967. + struct net_device *net_dev;
  5968. + struct dpa_priv_s *priv;
  5969. +
  5970. + dev = &of_dev->dev;
  5971. + net_dev = dev_get_drvdata(dev);
  5972. +
  5973. + priv = netdev_priv(net_dev);
  5974. +
  5975. + dpaa_eth_sysfs_remove(dev);
  5976. +
  5977. + dev_set_drvdata(dev, NULL);
  5978. + unregister_netdev(net_dev);
  5979. +
  5980. + err = dpa_fq_free(dev, &priv->dpa_fq_list);
  5981. +
  5982. + qman_delete_cgr_safe(&priv->ingress_cgr);
  5983. + qman_release_cgrid(priv->ingress_cgr.cgrid);
  5984. + qman_delete_cgr_safe(&priv->cgr_data.cgr);
  5985. + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
  5986. +
  5987. + dpa_private_napi_del(net_dev);
  5988. +
  5989. + dpa_bp_free(priv);
  5990. +
  5991. + if (priv->buf_layout)
  5992. + devm_kfree(dev, priv->buf_layout);
  5993. +
  5994. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  5995. + /* remove debugfs entry for this net_device */
  5996. + dpa_netdev_debugfs_remove(net_dev);
  5997. +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
  5998. +
  5999. +#ifdef CONFIG_FSL_DPAA_1588
  6000. + if (priv->tsu && priv->tsu->valid)
  6001. + dpa_ptp_cleanup(priv);
  6002. +#endif
  6003. +
  6004. + free_netdev(net_dev);
  6005. +
  6006. + return err;
  6007. +}
  6008. +EXPORT_SYMBOL(dpa_remove);
  6009. +
  6010. +struct mac_device * __cold __must_check
  6011. +__attribute__((nonnull))
  6012. +dpa_mac_probe(struct platform_device *_of_dev)
  6013. +{
  6014. + struct device *dpa_dev, *dev;
  6015. + struct device_node *mac_node;
  6016. + struct platform_device *of_dev;
  6017. + struct mac_device *mac_dev;
  6018. +#ifdef CONFIG_FSL_DPAA_1588
  6019. + int lenp;
  6020. + const phandle *phandle_prop;
  6021. + struct net_device *net_dev = NULL;
  6022. + struct dpa_priv_s *priv = NULL;
  6023. + struct device_node *timer_node;
  6024. +#endif
  6025. + dpa_dev = &_of_dev->dev;
  6026. +
  6027. + mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
  6028. + if (unlikely(mac_node == NULL)) {
  6029. + dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
  6030. + return ERR_PTR(-EFAULT);
  6031. + }
  6032. +
  6033. + of_dev = of_find_device_by_node(mac_node);
  6034. + if (unlikely(of_dev == NULL)) {
  6035. + dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
  6036. + mac_node->full_name);
  6037. + of_node_put(mac_node);
  6038. + return ERR_PTR(-EINVAL);
  6039. + }
  6040. + of_node_put(mac_node);
  6041. +
  6042. + dev = &of_dev->dev;
  6043. +
  6044. + mac_dev = dev_get_drvdata(dev);
  6045. + if (unlikely(mac_dev == NULL)) {
  6046. + dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
  6047. + dev_name(dev));
  6048. + return ERR_PTR(-EINVAL);
  6049. + }
  6050. +
  6051. +#ifdef CONFIG_FSL_DPAA_1588
  6052. + phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
  6053. + if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
  6054. + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
  6055. + (mac_dev->speed == SPEED_1000)))) {
  6056. + timer_node = of_find_node_by_phandle(*phandle_prop);
  6057. + if (timer_node)
  6058. + net_dev = dev_get_drvdata(dpa_dev);
  6059. + if (timer_node && net_dev) {
  6060. + priv = netdev_priv(net_dev);
  6061. + if (!dpa_ptp_init(priv))
  6062. + dev_info(dev, "%s: ptp 1588 is initialized.\n",
  6063. + mac_node->full_name);
  6064. + }
  6065. + }
  6066. +#endif
  6067. +
  6068. +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
  6069. + if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
  6070. + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
  6071. + (mac_dev->speed == SPEED_1000))) {
  6072. + ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0);
  6073. + if (ptp_priv.node) {
  6074. + ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
  6075. + if (unlikely(ptp_priv.of_dev == NULL)) {
  6076. + dev_err(dpa_dev,
  6077. + "Cannot find device represented by timer_node\n");
  6078. + of_node_put(ptp_priv.node);
  6079. + return ERR_PTR(-EINVAL);
  6080. + }
  6081. + ptp_priv.mac_dev = mac_dev;
  6082. + }
  6083. + }
  6084. +#endif
  6085. + return mac_dev;
  6086. +}
  6087. +EXPORT_SYMBOL(dpa_mac_probe);
  6088. +
  6089. +int dpa_set_mac_address(struct net_device *net_dev, void *addr)
  6090. +{
  6091. + const struct dpa_priv_s *priv;
  6092. + int _errno;
  6093. + struct mac_device *mac_dev;
  6094. +
  6095. + priv = netdev_priv(net_dev);
  6096. +
  6097. + _errno = eth_mac_addr(net_dev, addr);
  6098. + if (_errno < 0) {
  6099. + if (netif_msg_drv(priv))
  6100. + netdev_err(net_dev,
  6101. + "eth_mac_addr() = %d\n",
  6102. + _errno);
  6103. + return _errno;
  6104. + }
  6105. +
  6106. + mac_dev = priv->mac_dev;
  6107. +
  6108. + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
  6109. + net_dev->dev_addr);
  6110. + if (_errno < 0) {
  6111. + if (netif_msg_drv(priv))
  6112. + netdev_err(net_dev,
  6113. + "mac_dev->change_addr() = %d\n",
  6114. + _errno);
  6115. + return _errno;
  6116. + }
  6117. +
  6118. + return 0;
  6119. +}
  6120. +EXPORT_SYMBOL(dpa_set_mac_address);
  6121. +
  6122. +void dpa_set_rx_mode(struct net_device *net_dev)
  6123. +{
  6124. + int _errno;
  6125. + const struct dpa_priv_s *priv;
  6126. +
  6127. + priv = netdev_priv(net_dev);
  6128. +
  6129. + if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
  6130. + priv->mac_dev->promisc = !priv->mac_dev->promisc;
  6131. + _errno = priv->mac_dev->set_promisc(
  6132. + priv->mac_dev->get_mac_handle(priv->mac_dev),
  6133. + priv->mac_dev->promisc);
  6134. + if (unlikely(_errno < 0) && netif_msg_drv(priv))
  6135. + netdev_err(net_dev,
  6136. + "mac_dev->set_promisc() = %d\n",
  6137. + _errno);
  6138. + }
  6139. +
  6140. + _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
  6141. + if (unlikely(_errno < 0) && netif_msg_drv(priv))
  6142. + netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
  6143. +}
  6144. +EXPORT_SYMBOL(dpa_set_rx_mode);
  6145. +
  6146. +void dpa_set_buffers_layout(struct mac_device *mac_dev,
  6147. + struct dpa_buffer_layout_s *layout)
  6148. +{
  6149. + struct fm_port_params params;
  6150. +
  6151. + /* Rx */
  6152. + layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
  6153. + layout[RX].parse_results = true;
  6154. + layout[RX].hash_results = true;
  6155. +#ifdef CONFIG_FSL_DPAA_TS
  6156. + layout[RX].time_stamp = true;
  6157. +#endif
  6158. + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
  6159. + layout[RX].manip_extra_space = params.manip_extra_space;
  6160. + /* a value of zero for data alignment means "don't care", so align to
  6161. + * a non-zero value to prevent FMD from using its own default
  6162. + */
  6163. + layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
  6164. +
  6165. + /* Tx */
  6166. + layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
  6167. + layout[TX].parse_results = true;
  6168. + layout[TX].hash_results = true;
  6169. +#ifdef CONFIG_FSL_DPAA_TS
  6170. + layout[TX].time_stamp = true;
  6171. +#endif
  6172. + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
  6173. + layout[TX].manip_extra_space = params.manip_extra_space;
  6174. + layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
  6175. +}
  6176. +EXPORT_SYMBOL(dpa_set_buffers_layout);
  6177. +
  6178. +int __attribute__((nonnull))
  6179. +dpa_bp_alloc(struct dpa_bp *dpa_bp)
  6180. +{
  6181. + int err;
  6182. + struct bman_pool_params bp_params;
  6183. + struct platform_device *pdev;
  6184. +
  6185. + if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
  6186. + pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
  6187. + return -EINVAL;
  6188. + }
  6189. +
  6190. + memset(&bp_params, 0, sizeof(struct bman_pool_params));
  6191. +#ifdef CONFIG_FMAN_PFC
  6192. + bp_params.flags = BMAN_POOL_FLAG_THRESH;
  6193. + bp_params.thresholds[0] = bp_params.thresholds[2] =
  6194. + CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
  6195. + bp_params.thresholds[1] = bp_params.thresholds[3] =
  6196. + CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
  6197. +#endif
  6198. +
  6199. + /* If the pool is already specified, we only create one per bpid */
  6200. + if (dpa_bpid2pool_use(dpa_bp->bpid))
  6201. + return 0;
  6202. +
  6203. + if (dpa_bp->bpid == 0)
  6204. + bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
  6205. + else
  6206. + bp_params.bpid = dpa_bp->bpid;
  6207. +
  6208. + dpa_bp->pool = bman_new_pool(&bp_params);
  6209. + if (unlikely(dpa_bp->pool == NULL)) {
  6210. + pr_err("bman_new_pool() failed\n");
  6211. + return -ENODEV;
  6212. + }
  6213. +
  6214. + dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
  6215. +
  6216. + pdev = platform_device_register_simple("dpaa_eth_bpool",
  6217. + dpa_bp->bpid, NULL, 0);
  6218. + if (IS_ERR(pdev)) {
  6219. + err = PTR_ERR(pdev);
  6220. + goto pdev_register_failed;
  6221. + }
  6222. +
  6223. + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(40));
  6224. + if (err)
  6225. + goto pdev_mask_failed;
  6226. + if (!pdev->dev.dma_mask)
  6227. + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  6228. + else {
  6229. + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
  6230. + if (err)
  6231. + goto pdev_mask_failed;
  6232. + }
  6233. +
  6234. +#ifdef CONFIG_FMAN_ARM
  6235. + /* force coherency */
  6236. + pdev->dev.archdata.dma_coherent = true;
  6237. + arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
  6238. +#endif
  6239. +
  6240. + dpa_bp->dev = &pdev->dev;
  6241. +
  6242. + if (dpa_bp->seed_cb) {
  6243. + err = dpa_bp->seed_cb(dpa_bp);
  6244. + if (err)
  6245. + goto pool_seed_failed;
  6246. + }
  6247. +
  6248. + dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
  6249. +
  6250. + return 0;
  6251. +
  6252. +pool_seed_failed:
  6253. +pdev_mask_failed:
  6254. + platform_device_unregister(pdev);
  6255. +pdev_register_failed:
  6256. + bman_free_pool(dpa_bp->pool);
  6257. +
  6258. + return err;
  6259. +}
  6260. +EXPORT_SYMBOL(dpa_bp_alloc);
  6261. +
  6262. +void dpa_bp_drain(struct dpa_bp *bp)
  6263. +{
  6264. + int ret, num = 8;
  6265. +
  6266. + do {
  6267. + struct bm_buffer bmb[8];
  6268. + int i;
  6269. +
  6270. + ret = bman_acquire(bp->pool, bmb, num, 0);
  6271. + if (ret < 0) {
  6272. + if (num == 8) {
  6273. + /* we have less than 8 buffers left;
  6274. + * drain them one by one
  6275. + */
  6276. + num = 1;
  6277. + ret = 1;
  6278. + continue;
  6279. + } else {
  6280. + /* Pool is fully drained */
  6281. + break;
  6282. + }
  6283. + }
  6284. +
  6285. + for (i = 0; i < num; i++) {
  6286. + dma_addr_t addr = bm_buf_addr(&bmb[i]);
  6287. +
  6288. + dma_unmap_single(bp->dev, addr, bp->size,
  6289. + DMA_BIDIRECTIONAL);
  6290. +
  6291. + bp->free_buf_cb(phys_to_virt(addr));
  6292. + }
  6293. + } while (ret > 0);
  6294. +}
  6295. +EXPORT_SYMBOL(dpa_bp_drain);
  6296. +
  6297. +static void __cold __attribute__((nonnull))
  6298. +_dpa_bp_free(struct dpa_bp *dpa_bp)
  6299. +{
  6300. + struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
  6301. +
  6302. + /* the mapping between bpid and dpa_bp is done very late in the
  6303. + * allocation procedure; if something failed before the mapping, the bp
  6304. + * was not configured, therefore we don't need the below instructions
  6305. + */
  6306. + if (!bp)
  6307. + return;
  6308. +
  6309. + if (!atomic_dec_and_test(&bp->refs))
  6310. + return;
  6311. +
  6312. + if (bp->free_buf_cb)
  6313. + dpa_bp_drain(bp);
  6314. +
  6315. + dpa_bp_array[bp->bpid] = NULL;
  6316. + bman_free_pool(bp->pool);
  6317. +
  6318. + if (bp->dev)
  6319. + platform_device_unregister(to_platform_device(bp->dev));
  6320. +}
  6321. +
  6322. +void __cold __attribute__((nonnull))
  6323. +dpa_bp_free(struct dpa_priv_s *priv)
  6324. +{
  6325. + int i;
  6326. +
  6327. + for (i = 0; i < priv->bp_count; i++)
  6328. + _dpa_bp_free(&priv->dpa_bp[i]);
  6329. +}
  6330. +EXPORT_SYMBOL(dpa_bp_free);
  6331. +
  6332. +struct dpa_bp *dpa_bpid2pool(int bpid)
  6333. +{
  6334. + return dpa_bp_array[bpid];
  6335. +}
  6336. +EXPORT_SYMBOL(dpa_bpid2pool);
  6337. +
  6338. +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
  6339. +{
  6340. + dpa_bp_array[bpid] = dpa_bp;
  6341. + atomic_set(&dpa_bp->refs, 1);
  6342. +}
  6343. +
  6344. +bool dpa_bpid2pool_use(int bpid)
  6345. +{
  6346. + if (dpa_bpid2pool(bpid)) {
  6347. + atomic_inc(&dpa_bp_array[bpid]->refs);
  6348. + return true;
  6349. + }
  6350. +
  6351. + return false;
  6352. +}
  6353. +
  6354. +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  6355. +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
  6356. + void *accel_priv, select_queue_fallback_t fallback)
  6357. +{
  6358. + return dpa_get_queue_mapping(skb);
  6359. +}
  6360. +EXPORT_SYMBOL(dpa_select_queue);
  6361. +#endif
  6362. +
  6363. +struct dpa_fq *dpa_fq_alloc(struct device *dev,
  6364. + u32 fq_start,
  6365. + u32 fq_count,
  6366. + struct list_head *list,
  6367. + enum dpa_fq_type fq_type)
  6368. +{
  6369. + int i;
  6370. + struct dpa_fq *dpa_fq;
  6371. +
  6372. + dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
  6373. + if (dpa_fq == NULL)
  6374. + return NULL;
  6375. +
  6376. + for (i = 0; i < fq_count; i++) {
  6377. + dpa_fq[i].fq_type = fq_type;
  6378. + if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
  6379. + dpa_fq[i].fqid = fq_start ?
  6380. + DPAA_ETH_FQ_DELTA + fq_start + i : 0;
  6381. + else
  6382. + dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
  6383. +
  6384. + list_add_tail(&dpa_fq[i].list, list);
  6385. + }
  6386. +
  6387. +#ifdef CONFIG_FMAN_PFC
  6388. + if (fq_type == FQ_TYPE_TX)
  6389. + for (i = 0; i < fq_count; i++)
  6390. + dpa_fq[i].wq = i / dpa_num_cpus;
  6391. + else
  6392. +#endif
  6393. + for (i = 0; i < fq_count; i++)
  6394. + _dpa_assign_wq(dpa_fq + i);
  6395. +
  6396. + return dpa_fq;
  6397. +}
  6398. +EXPORT_SYMBOL(dpa_fq_alloc);
  6399. +
  6400. +/* Probing of FQs for MACful ports */
  6401. +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
  6402. + struct fm_port_fqs *port_fqs,
  6403. + bool alloc_tx_conf_fqs,
  6404. + enum port_type ptype)
  6405. +{
  6406. + struct fqid_cell *fqids = NULL;
  6407. + const void *fqids_off = NULL;
  6408. + struct dpa_fq *dpa_fq = NULL;
  6409. + struct device_node *np = dev->of_node;
  6410. + int num_ranges;
  6411. + int i, lenp;
  6412. +
  6413. + if (ptype == TX && alloc_tx_conf_fqs) {
  6414. + if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
  6415. + tx_confirm_fqids->count, list,
  6416. + FQ_TYPE_TX_CONF_MQ))
  6417. + goto fq_alloc_failed;
  6418. + }
  6419. +
  6420. + fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
  6421. + if (fqids_off == NULL) {
  6422. + /* No dts definition, so use the defaults. */
  6423. + fqids = default_fqids[ptype];
  6424. + num_ranges = 3;
  6425. + } else {
  6426. + num_ranges = lenp / sizeof(*fqids);
  6427. +
  6428. + fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
  6429. + GFP_KERNEL);
  6430. + if (fqids == NULL)
  6431. + goto fqids_alloc_failed;
  6432. +
  6433. + /* convert to CPU endianess */
  6434. + for (i = 0; i < num_ranges; i++) {
  6435. + fqids[i].start = be32_to_cpup(fqids_off +
  6436. + i * sizeof(*fqids));
  6437. + fqids[i].count = be32_to_cpup(fqids_off +
  6438. + i * sizeof(*fqids) + sizeof(__be32));
  6439. + }
  6440. + }
  6441. +
  6442. + for (i = 0; i < num_ranges; i++) {
  6443. + switch (i) {
  6444. + case 0:
  6445. + /* The first queue is the error queue */
  6446. + if (fqids[i].count != 1)
  6447. + goto invalid_error_queue;
  6448. +
  6449. + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
  6450. + fqids[i].count, list,
  6451. + ptype == RX ?
  6452. + FQ_TYPE_RX_ERROR :
  6453. + FQ_TYPE_TX_ERROR);
  6454. + if (dpa_fq == NULL)
  6455. + goto fq_alloc_failed;
  6456. +
  6457. + if (ptype == RX)
  6458. + port_fqs->rx_errq = &dpa_fq[0];
  6459. + else
  6460. + port_fqs->tx_errq = &dpa_fq[0];
  6461. + break;
  6462. + case 1:
  6463. + /* the second queue is the default queue */
  6464. + if (fqids[i].count != 1)
  6465. + goto invalid_default_queue;
  6466. +
  6467. + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
  6468. + fqids[i].count, list,
  6469. + ptype == RX ?
  6470. + FQ_TYPE_RX_DEFAULT :
  6471. + FQ_TYPE_TX_CONFIRM);
  6472. + if (dpa_fq == NULL)
  6473. + goto fq_alloc_failed;
  6474. +
  6475. + if (ptype == RX)
  6476. + port_fqs->rx_defq = &dpa_fq[0];
  6477. + else
  6478. + port_fqs->tx_defq = &dpa_fq[0];
  6479. + break;
  6480. + default:
  6481. + /* all subsequent queues are either RX* PCD or Tx */
  6482. + if (ptype == RX) {
  6483. + if (!dpa_fq_alloc(dev, fqids[i].start,
  6484. + fqids[i].count, list,
  6485. + FQ_TYPE_RX_PCD) ||
  6486. + !dpa_fq_alloc(dev, fqids[i].start,
  6487. + fqids[i].count, list,
  6488. + FQ_TYPE_RX_PCD_HI_PRIO))
  6489. + goto fq_alloc_failed;
  6490. + } else {
  6491. + if (!dpa_fq_alloc(dev, fqids[i].start,
  6492. + fqids[i].count, list,
  6493. + FQ_TYPE_TX))
  6494. + goto fq_alloc_failed;
  6495. + }
  6496. + break;
  6497. + }
  6498. + }
  6499. +
  6500. + return 0;
  6501. +
  6502. +fq_alloc_failed:
  6503. +fqids_alloc_failed:
  6504. + dev_err(dev, "Cannot allocate memory for frame queues\n");
  6505. + return -ENOMEM;
  6506. +
  6507. +invalid_default_queue:
  6508. +invalid_error_queue:
  6509. + dev_err(dev, "Too many default or error queues\n");
  6510. + return -EINVAL;
  6511. +}
  6512. +EXPORT_SYMBOL(dpa_fq_probe_mac);
  6513. +
  6514. +static u32 rx_pool_channel;
  6515. +static DEFINE_SPINLOCK(rx_pool_channel_init);
  6516. +
  6517. +int dpa_get_channel(void)
  6518. +{
  6519. + spin_lock(&rx_pool_channel_init);
  6520. + if (!rx_pool_channel) {
  6521. + u32 pool;
  6522. + int ret = qman_alloc_pool(&pool);
  6523. + if (!ret)
  6524. + rx_pool_channel = pool;
  6525. + }
  6526. + spin_unlock(&rx_pool_channel_init);
  6527. + if (!rx_pool_channel)
  6528. + return -ENOMEM;
  6529. + return rx_pool_channel;
  6530. +}
  6531. +EXPORT_SYMBOL(dpa_get_channel);
  6532. +
  6533. +void dpa_release_channel(void)
  6534. +{
  6535. + qman_release_pool(rx_pool_channel);
  6536. +}
  6537. +EXPORT_SYMBOL(dpa_release_channel);
  6538. +
  6539. +int dpaa_eth_add_channel(void *__arg)
  6540. +{
  6541. + const cpumask_t *cpus = qman_affine_cpus();
  6542. + u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
  6543. + int cpu;
  6544. + struct qman_portal *portal;
  6545. +
  6546. + for_each_cpu(cpu, cpus) {
  6547. + portal = (struct qman_portal *)qman_get_affine_portal(cpu);
  6548. + qman_p_static_dequeue_add(portal, pool);
  6549. + }
  6550. + return 0;
  6551. +}
  6552. +EXPORT_SYMBOL(dpaa_eth_add_channel);
  6553. +
  6554. +/**
  6555. + * Congestion group state change notification callback.
  6556. + * Stops the device's egress queues while they are congested and
  6557. + * wakes them upon exiting congested state.
  6558. + * Also updates some CGR-related stats.
  6559. + */
  6560. +static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
  6561. +
  6562. + int congested)
  6563. +{
  6564. + struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
  6565. + struct dpa_priv_s, cgr_data.cgr);
  6566. +
  6567. + if (congested) {
  6568. + priv->cgr_data.congestion_start_jiffies = jiffies;
  6569. + netif_tx_stop_all_queues(priv->net_dev);
  6570. + priv->cgr_data.cgr_congested_count++;
  6571. + } else {
  6572. + priv->cgr_data.congested_jiffies +=
  6573. + (jiffies - priv->cgr_data.congestion_start_jiffies);
  6574. + netif_tx_wake_all_queues(priv->net_dev);
  6575. + }
  6576. +}
  6577. +
  6578. +int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
  6579. +{
  6580. + struct qm_mcc_initcgr initcgr;
  6581. + u32 cs_th;
  6582. + int err;
  6583. +
  6584. + err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
  6585. + if (err < 0) {
  6586. + pr_err("Error %d allocating CGR ID\n", err);
  6587. + goto out_error;
  6588. + }
  6589. + priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
  6590. +
  6591. + /* Enable Congestion State Change Notifications and CS taildrop */
  6592. + initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
  6593. + initcgr.cgr.cscn_en = QM_CGR_EN;
  6594. +
  6595. + /* Set different thresholds based on the MAC speed.
  6596. + * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
  6597. + * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
  6598. + * In such cases, we ought to reconfigure the threshold, too.
  6599. + */
  6600. + if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
  6601. + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
  6602. + else
  6603. + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
  6604. + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
  6605. +
  6606. + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
  6607. + initcgr.cgr.cstd_en = QM_CGR_EN;
  6608. +
  6609. + err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
  6610. + &initcgr);
  6611. + if (err < 0) {
  6612. + pr_err("Error %d creating CGR with ID %d\n", err,
  6613. + priv->cgr_data.cgr.cgrid);
  6614. + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
  6615. + goto out_error;
  6616. + }
  6617. + pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
  6618. + priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
  6619. + priv->cgr_data.cgr.chan);
  6620. +
  6621. +out_error:
  6622. + return err;
  6623. +}
  6624. +EXPORT_SYMBOL(dpaa_eth_cgr_init);
  6625. +
  6626. +static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
  6627. + struct dpa_fq *fq,
  6628. + const struct qman_fq *template)
  6629. +{
  6630. + fq->fq_base = *template;
  6631. + fq->net_dev = priv->net_dev;
  6632. +
  6633. + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
  6634. + fq->channel = priv->channel;
  6635. +}
  6636. +
  6637. +static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
  6638. + struct dpa_fq *fq,
  6639. + struct fm_port *port,
  6640. + const struct qman_fq *template)
  6641. +{
  6642. + fq->fq_base = *template;
  6643. + fq->net_dev = priv->net_dev;
  6644. +
  6645. + if (port) {
  6646. + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
  6647. + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
  6648. + } else {
  6649. + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
  6650. + }
  6651. +}
  6652. +
  6653. +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
  6654. + struct fm_port *tx_port)
  6655. +{
  6656. + struct dpa_fq *fq;
  6657. + uint16_t portals[NR_CPUS];
  6658. + int cpu, portal_cnt = 0, num_portals = 0;
  6659. + uint32_t pcd_fqid, pcd_fqid_hi_prio;
  6660. + const cpumask_t *affine_cpus = qman_affine_cpus();
  6661. + int egress_cnt = 0, conf_cnt = 0;
  6662. +
  6663. + /* Prepare for PCD FQs init */
  6664. + for_each_cpu(cpu, affine_cpus)
  6665. + portals[num_portals++] = qman_affine_channel(cpu);
  6666. + if (num_portals == 0)
  6667. + dev_err(priv->net_dev->dev.parent,
  6668. + "No Qman software (affine) channels found");
  6669. +
  6670. + pcd_fqid = (priv->mac_dev) ?
  6671. + DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
  6672. + pcd_fqid_hi_prio = (priv->mac_dev) ?
  6673. + DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
  6674. +
  6675. + /* Initialize each FQ in the list */
  6676. + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
  6677. + switch (fq->fq_type) {
  6678. + case FQ_TYPE_RX_DEFAULT:
  6679. + BUG_ON(!priv->mac_dev);
  6680. + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
  6681. + break;
  6682. + case FQ_TYPE_RX_ERROR:
  6683. + BUG_ON(!priv->mac_dev);
  6684. + dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
  6685. + break;
  6686. + case FQ_TYPE_RX_PCD:
  6687. + /* For MACless we can't have dynamic Rx queues */
  6688. + BUG_ON(!priv->mac_dev && !fq->fqid);
  6689. + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
  6690. + if (!fq->fqid)
  6691. + fq->fqid = pcd_fqid++;
  6692. + fq->channel = portals[portal_cnt];
  6693. + portal_cnt = (portal_cnt + 1) % num_portals;
  6694. + break;
  6695. + case FQ_TYPE_RX_PCD_HI_PRIO:
  6696. + /* For MACless we can't have dynamic Hi Pri Rx queues */
  6697. + BUG_ON(!priv->mac_dev && !fq->fqid);
  6698. + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
  6699. + if (!fq->fqid)
  6700. + fq->fqid = pcd_fqid_hi_prio++;
  6701. + fq->channel = portals[portal_cnt];
  6702. + portal_cnt = (portal_cnt + 1) % num_portals;
  6703. + break;
  6704. + case FQ_TYPE_TX:
  6705. + dpa_setup_egress(priv, fq, tx_port,
  6706. + &fq_cbs->egress_ern);
  6707. + /* If we have more Tx queues than the number of cores,
  6708. + * just ignore the extra ones.
  6709. + */
  6710. + if (egress_cnt < DPAA_ETH_TX_QUEUES)
  6711. + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
  6712. + break;
  6713. + case FQ_TYPE_TX_CONFIRM:
  6714. + BUG_ON(!priv->mac_dev);
  6715. + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
  6716. + break;
  6717. + case FQ_TYPE_TX_CONF_MQ:
  6718. + BUG_ON(!priv->mac_dev);
  6719. + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
  6720. + priv->conf_fqs[conf_cnt++] = &fq->fq_base;
  6721. + break;
  6722. + case FQ_TYPE_TX_ERROR:
  6723. + BUG_ON(!priv->mac_dev);
  6724. + dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
  6725. + break;
  6726. + default:
  6727. + dev_warn(priv->net_dev->dev.parent,
  6728. + "Unknown FQ type detected!\n");
  6729. + break;
  6730. + }
  6731. + }
  6732. +
  6733. + /* The number of Tx queues may be smaller than the number of cores, if
  6734. + * the Tx queue range is specified in the device tree instead of being
  6735. + * dynamically allocated.
  6736. + * Make sure all CPUs receive a corresponding Tx queue.
  6737. + */
  6738. + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
  6739. + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
  6740. + if (fq->fq_type != FQ_TYPE_TX)
  6741. + continue;
  6742. + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
  6743. + if (egress_cnt == DPAA_ETH_TX_QUEUES)
  6744. + break;
  6745. + }
  6746. + }
  6747. +}
  6748. +EXPORT_SYMBOL(dpa_fq_setup);
  6749. +
  6750. +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
  6751. +{
  6752. + int _errno;
  6753. + const struct dpa_priv_s *priv;
  6754. + struct device *dev;
  6755. + struct qman_fq *fq;
  6756. + struct qm_mcc_initfq initfq;
  6757. + struct qman_fq *confq;
  6758. + int queue_id;
  6759. +
  6760. + priv = netdev_priv(dpa_fq->net_dev);
  6761. + dev = dpa_fq->net_dev->dev.parent;
  6762. +
  6763. + if (dpa_fq->fqid == 0)
  6764. + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
  6765. +
  6766. + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
  6767. +
  6768. + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
  6769. + if (_errno) {
  6770. + dev_err(dev, "qman_create_fq() failed\n");
  6771. + return _errno;
  6772. + }
  6773. + fq = &dpa_fq->fq_base;
  6774. +
  6775. + if (dpa_fq->init) {
  6776. + memset(&initfq, 0, sizeof(initfq));
  6777. +
  6778. + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
  6779. + /* FIXME: why would we want to keep an empty FQ in cache? */
  6780. + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
  6781. +
  6782. + /* Try to reduce the number of portal interrupts for
  6783. + * Tx Confirmation FQs.
  6784. + */
  6785. + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
  6786. + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
  6787. +
  6788. + /* FQ placement */
  6789. + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
  6790. +
  6791. + initfq.fqd.dest.channel = dpa_fq->channel;
  6792. + initfq.fqd.dest.wq = dpa_fq->wq;
  6793. +
  6794. + /* Put all egress queues in a congestion group of their own.
  6795. + * Sensu stricto, the Tx confirmation queues are Rx FQs,
  6796. + * rather than Tx - but they nonetheless account for the
  6797. + * memory footprint on behalf of egress traffic. We therefore
  6798. + * place them in the netdev's CGR, along with the Tx FQs.
  6799. + */
  6800. + if (dpa_fq->fq_type == FQ_TYPE_TX ||
  6801. + dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
  6802. + dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
  6803. + initfq.we_mask |= QM_INITFQ_WE_CGID;
  6804. + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
  6805. + initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
  6806. + /* Set a fixed overhead accounting, in an attempt to
  6807. + * reduce the impact of fixed-size skb shells and the
  6808. + * driver's needed headroom on system memory. This is
  6809. + * especially the case when the egress traffic is
  6810. + * composed of small datagrams.
  6811. + * Unfortunately, QMan's OAL value is capped to an
  6812. + * insufficient value, but even that is better than
  6813. + * no overhead accounting at all.
  6814. + */
  6815. + initfq.we_mask |= QM_INITFQ_WE_OAC;
  6816. + initfq.fqd.oac_init.oac = QM_OAC_CG;
  6817. + initfq.fqd.oac_init.oal =
  6818. + (signed char)(min(sizeof(struct sk_buff) +
  6819. + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
  6820. + }
  6821. +
  6822. + if (td_enable) {
  6823. + initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
  6824. + qm_fqd_taildrop_set(&initfq.fqd.td,
  6825. + DPA_FQ_TD, 1);
  6826. + initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
  6827. + }
  6828. +
  6829. + /* Configure the Tx confirmation queue, now that we know
  6830. + * which Tx queue it pairs with.
  6831. + */
  6832. + if (dpa_fq->fq_type == FQ_TYPE_TX) {
  6833. + queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
  6834. + if (queue_id >= 0) {
  6835. + confq = priv->conf_fqs[queue_id];
  6836. + if (confq) {
  6837. + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
  6838. + /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
  6839. + * A2V=1 (contextA A2 field is valid)
  6840. + * A0V=1 (contextA A0 field is valid)
  6841. + * B0V=1 (contextB field is valid)
  6842. + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
  6843. + * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
  6844. + */
  6845. + initfq.fqd.context_a.hi = 0x1e000000;
  6846. + initfq.fqd.context_a.lo = 0x80000000;
  6847. + }
  6848. + }
  6849. + }
  6850. +
  6851. + /* Put all *private* ingress queues in our "ingress CGR". */
  6852. + if (priv->use_ingress_cgr &&
  6853. + (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
  6854. + dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
  6855. + dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
  6856. + dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
  6857. + initfq.we_mask |= QM_INITFQ_WE_CGID;
  6858. + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
  6859. + initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
  6860. + /* Set a fixed overhead accounting, just like for the
  6861. + * egress CGR.
  6862. + */
  6863. + initfq.we_mask |= QM_INITFQ_WE_OAC;
  6864. + initfq.fqd.oac_init.oac = QM_OAC_CG;
  6865. + initfq.fqd.oac_init.oal =
  6866. + (signed char)(min(sizeof(struct sk_buff) +
  6867. + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
  6868. + }
  6869. +
  6870. + /* Initialization common to all ingress queues */
  6871. + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
  6872. + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
  6873. + initfq.fqd.fq_ctrl |=
  6874. + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
  6875. + initfq.fqd.context_a.stashing.exclusive =
  6876. + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
  6877. + QM_STASHING_EXCL_ANNOTATION;
  6878. + initfq.fqd.context_a.stashing.data_cl = 2;
  6879. + initfq.fqd.context_a.stashing.annotation_cl = 1;
  6880. + initfq.fqd.context_a.stashing.context_cl =
  6881. + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
  6882. + }
  6883. +
  6884. + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
  6885. + if (_errno < 0) {
  6886. + dev_err(dev, "qman_init_fq(%u) = %d\n",
  6887. + qman_fq_fqid(fq), _errno);
  6888. + qman_destroy_fq(fq, 0);
  6889. + return _errno;
  6890. + }
  6891. + }
  6892. +
  6893. + dpa_fq->fqid = qman_fq_fqid(fq);
  6894. +
  6895. + return 0;
  6896. +}
  6897. +EXPORT_SYMBOL(dpa_fq_init);
  6898. +
  6899. +int __cold __attribute__((nonnull))
  6900. +_dpa_fq_free(struct device *dev, struct qman_fq *fq)
  6901. +{
  6902. + int _errno, __errno;
  6903. + struct dpa_fq *dpa_fq;
  6904. + const struct dpa_priv_s *priv;
  6905. +
  6906. + _errno = 0;
  6907. +
  6908. + dpa_fq = container_of(fq, struct dpa_fq, fq_base);
  6909. + priv = netdev_priv(dpa_fq->net_dev);
  6910. +
  6911. + if (dpa_fq->init) {
  6912. + _errno = qman_retire_fq(fq, NULL);
  6913. + if (unlikely(_errno < 0) && netif_msg_drv(priv))
  6914. + dev_err(dev, "qman_retire_fq(%u) = %d\n",
  6915. + qman_fq_fqid(fq), _errno);
  6916. +
  6917. + __errno = qman_oos_fq(fq);
  6918. + if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
  6919. + dev_err(dev, "qman_oos_fq(%u) = %d\n",
  6920. + qman_fq_fqid(fq), __errno);
  6921. + if (_errno >= 0)
  6922. + _errno = __errno;
  6923. + }
  6924. + }
  6925. +
  6926. + qman_destroy_fq(fq, 0);
  6927. + list_del(&dpa_fq->list);
  6928. +
  6929. + return _errno;
  6930. +}
  6931. +EXPORT_SYMBOL(_dpa_fq_free);
  6932. +
  6933. +int __cold __attribute__((nonnull))
  6934. +dpa_fq_free(struct device *dev, struct list_head *list)
  6935. +{
  6936. + int _errno, __errno;
  6937. + struct dpa_fq *dpa_fq, *tmp;
  6938. +
  6939. + _errno = 0;
  6940. + list_for_each_entry_safe(dpa_fq, tmp, list, list) {
  6941. + __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
  6942. + if (unlikely(__errno < 0) && _errno >= 0)
  6943. + _errno = __errno;
  6944. + }
  6945. +
  6946. + return _errno;
  6947. +}
  6948. +EXPORT_SYMBOL(dpa_fq_free);
  6949. +
  6950. +static void
  6951. +dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
  6952. + struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
  6953. +{
  6954. + struct fm_port_params tx_port_param;
  6955. + bool frag_enabled = false;
  6956. +
  6957. + memset(&tx_port_param, 0, sizeof(tx_port_param));
  6958. + dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
  6959. + buf_layout, frag_enabled);
  6960. +}
  6961. +
  6962. +static void
  6963. +dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
  6964. + struct dpa_fq *errq, struct dpa_fq *defq,
  6965. + struct dpa_buffer_layout_s *buf_layout)
  6966. +{
  6967. + struct fm_port_params rx_port_param;
  6968. + int i;
  6969. + bool frag_enabled = false;
  6970. +
  6971. + memset(&rx_port_param, 0, sizeof(rx_port_param));
  6972. + count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
  6973. + rx_port_param.num_pools = (uint8_t)count;
  6974. + for (i = 0; i < count; i++) {
  6975. + if (i >= rx_port_param.num_pools)
  6976. + break;
  6977. + rx_port_param.pool_param[i].id = bp[i].bpid;
  6978. + rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
  6979. + }
  6980. +
  6981. + dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
  6982. + buf_layout, frag_enabled);
  6983. +}
  6984. +
  6985. +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
  6986. +/* Defined as weak, to be implemented by fman pcd tester. */
  6987. +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
  6988. +__attribute__((weak));
  6989. +
  6990. +int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
  6991. +#else
  6992. +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
  6993. +
  6994. +int dpa_free_pcd_fqids(struct device *, uint32_t);
  6995. +
  6996. +#endif /* CONFIG_FSL_SDK_FMAN_TEST */
  6997. +
  6998. +
  6999. +int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
  7000. + uint8_t alignment, uint32_t *base_fqid)
  7001. +{
  7002. + dev_crit(dev, "callback not implemented!\n");
  7003. +
  7004. + return 0;
  7005. +}
  7006. +
  7007. +int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
  7008. +{
  7009. +
  7010. + dev_crit(dev, "callback not implemented!\n");
  7011. +
  7012. + return 0;
  7013. +}
  7014. +
  7015. +void dpaa_eth_init_ports(struct mac_device *mac_dev,
  7016. + struct dpa_bp *bp, size_t count,
  7017. + struct fm_port_fqs *port_fqs,
  7018. + struct dpa_buffer_layout_s *buf_layout,
  7019. + struct device *dev)
  7020. +{
  7021. + struct fm_port_pcd_param rx_port_pcd_param;
  7022. + struct fm_port *rxport = mac_dev->port_dev[RX];
  7023. + struct fm_port *txport = mac_dev->port_dev[TX];
  7024. +
  7025. + dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
  7026. + port_fqs->tx_defq, &buf_layout[TX]);
  7027. + dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
  7028. + port_fqs->rx_defq, &buf_layout[RX]);
  7029. +
  7030. + rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
  7031. + rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
  7032. + rx_port_pcd_param.dev = dev;
  7033. + fm_port_pcd_bind(rxport, &rx_port_pcd_param);
  7034. +}
  7035. +EXPORT_SYMBOL(dpaa_eth_init_ports);
  7036. +
  7037. +void dpa_release_sgt(struct qm_sg_entry *sgt)
  7038. +{
  7039. + struct dpa_bp *dpa_bp;
  7040. + struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
  7041. + uint8_t i = 0, j;
  7042. +
  7043. + memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
  7044. +
  7045. + do {
  7046. + dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
  7047. + DPA_BUG_ON(!dpa_bp);
  7048. +
  7049. + j = 0;
  7050. + do {
  7051. + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
  7052. + bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
  7053. +
  7054. + j++; i++;
  7055. + } while (j < ARRAY_SIZE(bmb) &&
  7056. + !qm_sg_entry_get_final(&sgt[i-1]) &&
  7057. + qm_sg_entry_get_bpid(&sgt[i-1]) ==
  7058. + qm_sg_entry_get_bpid(&sgt[i]));
  7059. +
  7060. + while (bman_release(dpa_bp->pool, bmb, j, 0))
  7061. + cpu_relax();
  7062. + } while (!qm_sg_entry_get_final(&sgt[i-1]));
  7063. +}
  7064. +EXPORT_SYMBOL(dpa_release_sgt);
  7065. +
  7066. +void __attribute__((nonnull))
  7067. +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
  7068. +{
  7069. + struct qm_sg_entry *sgt;
  7070. + struct dpa_bp *dpa_bp;
  7071. + struct bm_buffer bmb;
  7072. + dma_addr_t addr;
  7073. + void *vaddr;
  7074. +
  7075. + memset(&bmb, 0, sizeof(struct bm_buffer));
  7076. + bm_buffer_set64(&bmb, fd->addr);
  7077. +
  7078. + dpa_bp = dpa_bpid2pool(fd->bpid);
  7079. + DPA_BUG_ON(!dpa_bp);
  7080. +
  7081. + if (fd->format == qm_fd_sg) {
  7082. + vaddr = phys_to_virt(fd->addr);
  7083. + sgt = vaddr + dpa_fd_offset(fd);
  7084. +
  7085. + dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
  7086. + DMA_BIDIRECTIONAL);
  7087. +
  7088. + dpa_release_sgt(sgt);
  7089. + addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
  7090. + DMA_BIDIRECTIONAL);
  7091. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  7092. + dev_err(dpa_bp->dev, "DMA mapping failed");
  7093. + return;
  7094. + }
  7095. + bm_buffer_set64(&bmb, addr);
  7096. + }
  7097. +
  7098. + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
  7099. + cpu_relax();
  7100. +}
  7101. +EXPORT_SYMBOL(dpa_fd_release);
  7102. +
  7103. +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
  7104. + const struct qm_mr_entry *msg)
  7105. +{
  7106. + switch (msg->ern.rc & QM_MR_RC_MASK) {
  7107. + case QM_MR_RC_CGR_TAILDROP:
  7108. + percpu_priv->ern_cnt.cg_tdrop++;
  7109. + break;
  7110. + case QM_MR_RC_WRED:
  7111. + percpu_priv->ern_cnt.wred++;
  7112. + break;
  7113. + case QM_MR_RC_ERROR:
  7114. + percpu_priv->ern_cnt.err_cond++;
  7115. + break;
  7116. + case QM_MR_RC_ORPWINDOW_EARLY:
  7117. + percpu_priv->ern_cnt.early_window++;
  7118. + break;
  7119. + case QM_MR_RC_ORPWINDOW_LATE:
  7120. + percpu_priv->ern_cnt.late_window++;
  7121. + break;
  7122. + case QM_MR_RC_FQ_TAILDROP:
  7123. + percpu_priv->ern_cnt.fq_tdrop++;
  7124. + break;
  7125. + case QM_MR_RC_ORPWINDOW_RETIRED:
  7126. + percpu_priv->ern_cnt.fq_retired++;
  7127. + break;
  7128. + case QM_MR_RC_ORP_ZERO:
  7129. + percpu_priv->ern_cnt.orp_zero++;
  7130. + break;
  7131. + }
  7132. +}
  7133. +EXPORT_SYMBOL(count_ern);
  7134. +
  7135. +/**
  7136. + * Turn on HW checksum computation for this outgoing frame.
  7137. + * If the current protocol is not something we support in this regard
  7138. + * (or if the stack has already computed the SW checksum), we do nothing.
  7139. + *
  7140. + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
  7141. + * otherwise.
  7142. + *
  7143. + * Note that this function may modify the fd->cmd field and the skb data buffer
  7144. + * (the Parse Results area).
  7145. + */
  7146. +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
  7147. + struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
  7148. +{
  7149. + fm_prs_result_t *parse_result;
  7150. + struct iphdr *iph;
  7151. + struct ipv6hdr *ipv6h = NULL;
  7152. + u8 l4_proto;
  7153. + u16 ethertype = ntohs(skb->protocol);
  7154. + int retval = 0;
  7155. +
  7156. + if (skb->ip_summed != CHECKSUM_PARTIAL)
  7157. + return 0;
  7158. +
  7159. + /* Note: L3 csum seems to be already computed in sw, but we can't choose
  7160. + * L4 alone from the FM configuration anyway.
  7161. + */
  7162. +
  7163. + /* Fill in some fields of the Parse Results array, so the FMan
  7164. + * can find them as if they came from the FMan Parser.
  7165. + */
  7166. + parse_result = (fm_prs_result_t *)parse_results;
  7167. +
  7168. + /* If we're dealing with VLAN, get the real Ethernet type */
  7169. + if (ethertype == ETH_P_8021Q) {
  7170. + /* We can't always assume the MAC header is set correctly
  7171. + * by the stack, so reset to beginning of skb->data
  7172. + */
  7173. + skb_reset_mac_header(skb);
  7174. + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
  7175. + }
  7176. +
  7177. + /* Fill in the relevant L3 parse result fields
  7178. + * and read the L4 protocol type
  7179. + */
  7180. + switch (ethertype) {
  7181. + case ETH_P_IP:
  7182. + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
  7183. + iph = ip_hdr(skb);
  7184. + DPA_BUG_ON(iph == NULL);
  7185. + l4_proto = iph->protocol;
  7186. + break;
  7187. + case ETH_P_IPV6:
  7188. + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
  7189. + ipv6h = ipv6_hdr(skb);
  7190. + DPA_BUG_ON(ipv6h == NULL);
  7191. + l4_proto = ipv6h->nexthdr;
  7192. + break;
  7193. + default:
  7194. + /* We shouldn't even be here */
  7195. + if (netif_msg_tx_err(priv) && net_ratelimit())
  7196. + netdev_alert(priv->net_dev,
  7197. + "Can't compute HW csum for L3 proto 0x%x\n",
  7198. + ntohs(skb->protocol));
  7199. + retval = -EIO;
  7200. + goto return_error;
  7201. + }
  7202. +
  7203. + /* Fill in the relevant L4 parse result fields */
  7204. + switch (l4_proto) {
  7205. + case IPPROTO_UDP:
  7206. + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
  7207. + break;
  7208. + case IPPROTO_TCP:
  7209. + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
  7210. + break;
  7211. + default:
  7212. + /* This can as well be a BUG() */
  7213. + if (netif_msg_tx_err(priv) && net_ratelimit())
  7214. + netdev_alert(priv->net_dev,
  7215. + "Can't compute HW csum for L4 proto 0x%x\n",
  7216. + l4_proto);
  7217. + retval = -EIO;
  7218. + goto return_error;
  7219. + }
  7220. +
  7221. + /* At index 0 is IPOffset_1 as defined in the Parse Results */
  7222. + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
  7223. + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
  7224. +
  7225. + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
  7226. + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
  7227. +
  7228. + /* On P1023 and similar platforms fd->cmd interpretation could
  7229. + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
  7230. + * is not set so we do not need to check; in the future, if/when
  7231. + * using context_a we need to check this bit
  7232. + */
  7233. +
  7234. +return_error:
  7235. + return retval;
  7236. +}
  7237. +EXPORT_SYMBOL(dpa_enable_tx_csum);
  7238. +
  7239. +#ifdef CONFIG_FSL_DPAA_CEETM
  7240. +void dpa_enable_ceetm(struct net_device *dev)
  7241. +{
  7242. + struct dpa_priv_s *priv = netdev_priv(dev);
  7243. + priv->ceetm_en = true;
  7244. +}
  7245. +EXPORT_SYMBOL(dpa_enable_ceetm);
  7246. +
  7247. +void dpa_disable_ceetm(struct net_device *dev)
  7248. +{
  7249. + struct dpa_priv_s *priv = netdev_priv(dev);
  7250. + priv->ceetm_en = false;
  7251. +}
  7252. +EXPORT_SYMBOL(dpa_disable_ceetm);
  7253. +#endif
  7254. --- /dev/null
  7255. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
  7256. @@ -0,0 +1,227 @@
  7257. +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
  7258. + *
  7259. + * Redistribution and use in source and binary forms, with or without
  7260. + * modification, are permitted provided that the following conditions are met:
  7261. + * * Redistributions of source code must retain the above copyright
  7262. + * notice, this list of conditions and the following disclaimer.
  7263. + * * Redistributions in binary form must reproduce the above copyright
  7264. + * notice, this list of conditions and the following disclaimer in the
  7265. + * documentation and/or other materials provided with the distribution.
  7266. + * * Neither the name of Freescale Semiconductor nor the
  7267. + * names of its contributors may be used to endorse or promote products
  7268. + * derived from this software without specific prior written permission.
  7269. + *
  7270. + *
  7271. + * ALTERNATIVELY, this software may be distributed under the terms of the
  7272. + * GNU General Public License ("GPL") as published by the Free Software
  7273. + * Foundation, either version 2 of that License or (at your option) any
  7274. + * later version.
  7275. + *
  7276. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  7277. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  7278. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  7279. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  7280. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  7281. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  7282. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  7283. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7284. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  7285. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7286. + */
  7287. +
  7288. +#ifndef __DPAA_ETH_COMMON_H
  7289. +#define __DPAA_ETH_COMMON_H
  7290. +
  7291. +#include <linux/etherdevice.h> /* struct net_device */
  7292. +#include <linux/fsl_bman.h> /* struct bm_buffer */
  7293. +#include <linux/of_platform.h> /* struct platform_device */
  7294. +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
  7295. +
  7296. +#include "dpaa_eth.h"
  7297. +#include "lnxwrp_fsl_fman.h"
  7298. +
  7299. +#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
  7300. + frag_enabled) \
  7301. +{ \
  7302. + param.errq = errq_id; \
  7303. + param.defq = defq_id; \
  7304. + param.priv_data_size = buf_layout->priv_data_size; \
  7305. + param.parse_results = buf_layout->parse_results; \
  7306. + param.hash_results = buf_layout->hash_results; \
  7307. + param.frag_enable = frag_enabled; \
  7308. + param.time_stamp = buf_layout->time_stamp; \
  7309. + param.manip_extra_space = buf_layout->manip_extra_space; \
  7310. + param.data_align = buf_layout->data_align; \
  7311. + fm_set_##type##_port_params(port, &param); \
  7312. +}
  7313. +
  7314. +#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
  7315. +
  7316. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  7317. +/* each S/G entry can be divided into two S/G entries */
  7318. +#define DPA_SGT_ENTRIES_THRESHOLD 7
  7319. +#else
  7320. +#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
  7321. +#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
  7322. +
  7323. +
  7324. +#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
  7325. +
  7326. +/* return codes for the dpaa-eth hooks */
  7327. +enum dpaa_eth_hook_result {
  7328. + /* fd/skb was retained by the hook.
  7329. + *
  7330. + * On the Rx path, this means the Ethernet driver will _not_
  7331. + * deliver the skb to the stack. Instead, the hook implementation
  7332. + * is expected to properly dispose of the skb.
  7333. + *
  7334. + * On the Tx path, the Ethernet driver's dpa_tx() function will
  7335. + * immediately return NETDEV_TX_OK. The hook implementation is expected
  7336. + * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
  7337. + * unless you know exactly what you're doing!
  7338. + *
  7339. + * On the confirmation/error paths, the Ethernet driver will _not_
  7340. + * perform any fd cleanup, nor update the interface statistics.
  7341. + */
  7342. + DPAA_ETH_STOLEN,
  7343. + /* fd/skb was returned to the Ethernet driver for regular processing.
  7344. + * The hook is not allowed to, for instance, reallocate the skb (as if
  7345. + * by linearizing, copying, cloning or reallocating the headroom).
  7346. + */
  7347. + DPAA_ETH_CONTINUE
  7348. +};
  7349. +
  7350. +typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
  7351. + struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
  7352. +typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
  7353. + struct sk_buff *skb, struct net_device *net_dev);
  7354. +typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
  7355. + struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
  7356. +
  7357. +/* used in napi related functions */
  7358. +extern u16 qman_portal_max;
  7359. +
  7360. +/* from dpa_ethtool.c */
  7361. +extern const struct ethtool_ops dpa_ethtool_ops;
  7362. +
  7363. +#ifdef CONFIG_FSL_DPAA_HOOKS
  7364. +/* Various hooks used for unit-testing and/or fastpath optimizations.
  7365. + * Currently only one set of such hooks is supported.
  7366. + */
  7367. +struct dpaa_eth_hooks_s {
  7368. + /* Invoked on the Tx private path, immediately after receiving the skb
  7369. + * from the stack.
  7370. + */
  7371. + dpaa_eth_egress_hook_t tx;
  7372. +
  7373. + /* Invoked on the Rx private path, right before passing the skb
  7374. + * up the stack. At that point, the packet's protocol id has already
  7375. + * been set. The skb's data pointer is now at the L3 header, and
  7376. + * skb->mac_header points to the L2 header. skb->len has been adjusted
  7377. + * to be the length of L3+payload (i.e., the length of the
  7378. + * original frame minus the L2 header len).
  7379. + * For more details on what the skb looks like, see eth_type_trans().
  7380. + */
  7381. + dpaa_eth_ingress_hook_t rx_default;
  7382. +
  7383. + /* Driver hook for the Rx error private path. */
  7384. + dpaa_eth_confirm_hook_t rx_error;
  7385. + /* Driver hook for the Tx confirmation private path. */
  7386. + dpaa_eth_confirm_hook_t tx_confirm;
  7387. + /* Driver hook for the Tx error private path. */
  7388. + dpaa_eth_confirm_hook_t tx_error;
  7389. +};
  7390. +
  7391. +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
  7392. +
  7393. +extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
  7394. +#endif
  7395. +
  7396. +int dpa_netdev_init(struct net_device *net_dev,
  7397. + const uint8_t *mac_addr,
  7398. + uint16_t tx_timeout);
  7399. +int __cold dpa_start(struct net_device *net_dev);
  7400. +int __cold dpa_stop(struct net_device *net_dev);
  7401. +void __cold dpa_timeout(struct net_device *net_dev);
  7402. +struct rtnl_link_stats64 * __cold
  7403. +dpa_get_stats64(struct net_device *net_dev,
  7404. + struct rtnl_link_stats64 *stats);
  7405. +int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
  7406. +int dpa_ndo_init(struct net_device *net_dev);
  7407. +int dpa_set_features(struct net_device *dev, netdev_features_t features);
  7408. +netdev_features_t dpa_fix_features(struct net_device *dev,
  7409. + netdev_features_t features);
  7410. +#ifdef CONFIG_FSL_DPAA_TS
  7411. +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
  7412. + enum port_type rx_tx, const void *data);
  7413. +/* Updates the skb shared hw timestamp from the hardware timestamp */
  7414. +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
  7415. + struct skb_shared_hwtstamps *shhwtstamps, const void *data);
  7416. +#endif /* CONFIG_FSL_DPAA_TS */
  7417. +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  7418. +int __cold dpa_remove(struct platform_device *of_dev);
  7419. +struct mac_device * __cold __must_check
  7420. +__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
  7421. +int dpa_set_mac_address(struct net_device *net_dev, void *addr);
  7422. +void dpa_set_rx_mode(struct net_device *net_dev);
  7423. +void dpa_set_buffers_layout(struct mac_device *mac_dev,
  7424. + struct dpa_buffer_layout_s *layout);
  7425. +int __attribute__((nonnull))
  7426. +dpa_bp_alloc(struct dpa_bp *dpa_bp);
  7427. +void __cold __attribute__((nonnull))
  7428. +dpa_bp_free(struct dpa_priv_s *priv);
  7429. +struct dpa_bp *dpa_bpid2pool(int bpid);
  7430. +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
  7431. +bool dpa_bpid2pool_use(int bpid);
  7432. +void dpa_bp_drain(struct dpa_bp *bp);
  7433. +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  7434. +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
  7435. + void *accel_priv, select_queue_fallback_t fallback);
  7436. +#endif
  7437. +struct dpa_fq *dpa_fq_alloc(struct device *dev,
  7438. + u32 fq_start,
  7439. + u32 fq_count,
  7440. + struct list_head *list,
  7441. + enum dpa_fq_type fq_type);
  7442. +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
  7443. + struct fm_port_fqs *port_fqs,
  7444. + bool tx_conf_fqs_per_core,
  7445. + enum port_type ptype);
  7446. +int dpa_get_channel(void);
  7447. +void dpa_release_channel(void);
  7448. +int dpaa_eth_add_channel(void *__arg);
  7449. +int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
  7450. +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
  7451. + struct fm_port *tx_port);
  7452. +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
  7453. +int __cold __attribute__((nonnull))
  7454. +dpa_fq_free(struct device *dev, struct list_head *list);
  7455. +void dpaa_eth_init_ports(struct mac_device *mac_dev,
  7456. + struct dpa_bp *bp, size_t count,
  7457. + struct fm_port_fqs *port_fqs,
  7458. + struct dpa_buffer_layout_s *buf_layout,
  7459. + struct device *dev);
  7460. +void dpa_release_sgt(struct qm_sg_entry *sgt);
  7461. +void __attribute__((nonnull))
  7462. +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
  7463. +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
  7464. + const struct qm_mr_entry *msg);
  7465. +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
  7466. + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
  7467. +#ifdef CONFIG_FSL_DPAA_CEETM
  7468. +void dpa_enable_ceetm(struct net_device *dev);
  7469. +void dpa_disable_ceetm(struct net_device *dev);
  7470. +#endif
  7471. +struct proxy_device {
  7472. + struct mac_device *mac_dev;
  7473. +};
  7474. +
  7475. +/* mac device control functions exposed by proxy interface*/
  7476. +int dpa_proxy_start(struct net_device *net_dev);
  7477. +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
  7478. +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
  7479. + struct net_device *net_dev);
  7480. +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
  7481. + struct net_device *net_dev);
  7482. +
  7483. +#endif /* __DPAA_ETH_COMMON_H */
  7484. --- /dev/null
  7485. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
  7486. @@ -0,0 +1,1735 @@
  7487. +/* Copyright 2013-2015 Freescale Semiconductor Inc.
  7488. + *
  7489. + * Redistribution and use in source and binary forms, with or without
  7490. + * modification, are permitted provided that the following conditions are met:
  7491. + * * Redistributions of source code must retain the above copyright
  7492. + * notice, this list of conditions and the following disclaimer.
  7493. + * * Redistributions in binary form must reproduce the above copyright
  7494. + * notice, this list of conditions and the following disclaimer in the
  7495. + * documentation and/or other materials provided with the distribution.
  7496. + * * Neither the name of Freescale Semiconductor nor the
  7497. + * names of its contributors may be used to endorse or promote products
  7498. + * derived from this software without specific prior written permission.
  7499. + *
  7500. + *
  7501. + * ALTERNATIVELY, this software may be distributed under the terms of the
  7502. + * GNU General Public License ("GPL") as published by the Free Software
  7503. + * Foundation, either version 2 of that License or (at your option) any
  7504. + * later version.
  7505. + *
  7506. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  7507. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  7508. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  7509. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  7510. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  7511. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  7512. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  7513. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7514. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  7515. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7516. + */
  7517. +
  7518. +#include <linux/init.h>
  7519. +#include <linux/module.h>
  7520. +#include <linux/kthread.h>
  7521. +#include <linux/of_net.h>
  7522. +#include <linux/if_vlan.h>
  7523. +#include <linux/ip.h>
  7524. +#include <linux/ipv6.h>
  7525. +#include <linux/percpu.h>
  7526. +
  7527. +#include "dpaa_eth.h"
  7528. +#include "dpaa_eth_common.h"
  7529. +#include "dpaa_eth_base.h"
  7530. +#include "dpaa_eth_generic.h"
  7531. +
  7532. +#define DPA_DEFAULT_TX_HEADROOM 64
  7533. +#define DPA_GENERIC_SKB_COPY_MAX_SIZE 256
  7534. +#define DPA_GENERIC_NAPI_WEIGHT 64
  7535. +#define DPA_GENERIC_DESCRIPTION "FSL DPAA Generic Ethernet driver"
  7536. +#define DPA_GENERIC_BUFFER_QUOTA 4
  7537. +
  7538. +MODULE_LICENSE("Dual BSD/GPL");
  7539. +MODULE_DESCRIPTION(DPA_GENERIC_DESCRIPTION);
  7540. +
  7541. +static uint8_t generic_debug = -1;
  7542. +module_param(generic_debug, byte, S_IRUGO);
  7543. +MODULE_PARM_DESC(generic_debug, "Module/Driver verbosity level");
  7544. +
  7545. +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
  7546. +static uint16_t tx_timeout = 1000;
  7547. +module_param(tx_timeout, ushort, S_IRUGO);
  7548. +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
  7549. +
  7550. +struct rtnl_link_stats64 *__cold
  7551. +dpa_generic_get_stats64(struct net_device *netdev,
  7552. + struct rtnl_link_stats64 *stats);
  7553. +static int dpa_generic_set_mac_address(struct net_device *net_dev,
  7554. + void *addr);
  7555. +static int __cold dpa_generic_start(struct net_device *netdev);
  7556. +static int __cold dpa_generic_stop(struct net_device *netdev);
  7557. +static int dpa_generic_eth_probe(struct platform_device *_of_dev);
  7558. +static int dpa_generic_remove(struct platform_device *of_dev);
  7559. +static void dpa_generic_ern(struct qman_portal *portal,
  7560. + struct qman_fq *fq,
  7561. + const struct qm_mr_entry *msg);
  7562. +static int __hot dpa_generic_tx(struct sk_buff *skb,
  7563. + struct net_device *netdev);
  7564. +static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf);
  7565. +static void dpa_generic_drain_sg_bp(struct dpa_bp *sg_bp, u8 nbuf);
  7566. +
  7567. +static const struct net_device_ops dpa_generic_ops = {
  7568. + .ndo_open = dpa_generic_start,
  7569. + .ndo_start_xmit = dpa_generic_tx,
  7570. + .ndo_stop = dpa_generic_stop,
  7571. + .ndo_set_mac_address = dpa_generic_set_mac_address,
  7572. + .ndo_tx_timeout = dpa_timeout,
  7573. + .ndo_get_stats64 = dpa_generic_get_stats64,
  7574. + .ndo_init = dpa_ndo_init,
  7575. + .ndo_set_features = dpa_set_features,
  7576. + .ndo_fix_features = dpa_fix_features,
  7577. + .ndo_change_mtu = dpa_change_mtu,
  7578. +};
  7579. +
  7580. +static void dpa_generic_draining_timer(unsigned long arg)
  7581. +{
  7582. + struct dpa_generic_priv_s *priv = (struct dpa_generic_priv_s *)arg;
  7583. +
  7584. + dpa_generic_drain_bp(priv->draining_tx_bp, DPA_GENERIC_BUFFER_QUOTA);
  7585. + dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp,
  7586. + DPA_GENERIC_BUFFER_QUOTA);
  7587. +
  7588. + if (priv->net_dev->flags & IFF_UP)
  7589. + mod_timer(&(priv->timer), jiffies + 1);
  7590. +}
  7591. +
  7592. +struct rtnl_link_stats64 *__cold
  7593. +dpa_generic_get_stats64(struct net_device *netdev,
  7594. + struct rtnl_link_stats64 *stats)
  7595. +{
  7596. + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
  7597. + u64 *cpustats;
  7598. + u64 *netstats = (u64 *)stats;
  7599. + int i, j;
  7600. + struct dpa_percpu_priv_s *percpu_priv;
  7601. + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
  7602. +
  7603. + for_each_online_cpu(i) {
  7604. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  7605. +
  7606. + cpustats = (u64 *)&percpu_priv->stats;
  7607. +
  7608. + for (j = 0; j < numstats; j++)
  7609. + netstats[j] += cpustats[j];
  7610. + }
  7611. +
  7612. + return stats;
  7613. +}
  7614. +
  7615. +static int dpa_generic_set_mac_address(struct net_device *net_dev,
  7616. + void *addr)
  7617. +{
  7618. + const struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
  7619. + int _errno;
  7620. +
  7621. + _errno = eth_mac_addr(net_dev, addr);
  7622. + if (_errno < 0) {
  7623. + if (netif_msg_drv(priv))
  7624. + netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
  7625. + return _errno;
  7626. + }
  7627. +
  7628. + return 0;
  7629. +}
  7630. +
  7631. +static const struct of_device_id dpa_generic_match[] = {
  7632. + {
  7633. + .compatible = "fsl,dpa-ethernet-generic"
  7634. + },
  7635. + {}
  7636. +};
  7637. +
  7638. +MODULE_DEVICE_TABLE(of, dpa_generic_match);
  7639. +
  7640. +static struct platform_driver dpa_generic_driver = {
  7641. + .driver = {
  7642. + .name = KBUILD_MODNAME,
  7643. + .of_match_table = dpa_generic_match,
  7644. + .owner = THIS_MODULE,
  7645. + },
  7646. + .probe = dpa_generic_eth_probe,
  7647. + .remove = dpa_generic_remove
  7648. +};
  7649. +
  7650. +static int get_port_ref(struct device_node *dev_node,
  7651. + struct fm_port **port)
  7652. +{
  7653. + struct platform_device *port_of_dev = NULL;
  7654. + struct device *op_dev = NULL;
  7655. + struct device_node *port_node = NULL;
  7656. +
  7657. + port_node = of_parse_phandle(dev_node, "fsl,fman-oh-port", 0);
  7658. + if (port_node == NULL)
  7659. + return -EINVAL;
  7660. +
  7661. + port_of_dev = of_find_device_by_node(port_node);
  7662. + of_node_put(port_node);
  7663. +
  7664. + if (port_of_dev == NULL)
  7665. + return -EINVAL;
  7666. +
  7667. + /* get the reference to oh port from FMD */
  7668. + op_dev = &port_of_dev->dev;
  7669. + *port = fm_port_bind(op_dev);
  7670. +
  7671. + if (*port == NULL)
  7672. + return -EINVAL;
  7673. +
  7674. + return 0;
  7675. +}
  7676. +
  7677. +static void dpaa_generic_napi_enable(struct dpa_generic_priv_s *priv)
  7678. +{
  7679. + struct dpa_percpu_priv_s *percpu_priv;
  7680. + int i, j;
  7681. +
  7682. + for_each_possible_cpu(i) {
  7683. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  7684. +
  7685. + for (j = 0; j < qman_portal_max; j++)
  7686. + napi_enable(&percpu_priv->np[j].napi);
  7687. + }
  7688. +}
  7689. +
  7690. +static void dpaa_generic_napi_disable(struct dpa_generic_priv_s *priv)
  7691. +{
  7692. + struct dpa_percpu_priv_s *percpu_priv;
  7693. + int i, j;
  7694. +
  7695. + for_each_possible_cpu(i) {
  7696. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  7697. +
  7698. + for (j = 0; j < qman_portal_max; j++)
  7699. + napi_disable(&percpu_priv->np[j].napi);
  7700. + }
  7701. +}
  7702. +
  7703. +static struct device_node *get_rx_op_port_node(struct platform_device *_of_dev)
  7704. +{
  7705. + struct device *dev = &_of_dev->dev;
  7706. + struct device_node *port_node = NULL;
  7707. + struct device_node *onic_node = NULL;
  7708. + int num_ports = 0;
  7709. +
  7710. + onic_node = dev->of_node;
  7711. +
  7712. + num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
  7713. + if (num_ports != 2) {
  7714. + dev_err(dev, "There should be two O/H port handles in the device tree\n");
  7715. + return ERR_PTR(-EINVAL);
  7716. + }
  7717. +
  7718. + port_node = of_parse_phandle(onic_node, "fsl,oh-ports", 0);
  7719. + if (port_node == NULL) {
  7720. + dev_err(dev, "Cannot find O/H port node in the device tree\n");
  7721. + return ERR_PTR(-EFAULT);
  7722. + }
  7723. +
  7724. + return port_node;
  7725. +}
  7726. +
  7727. +static int __cold dpa_generic_start(struct net_device *netdev)
  7728. +{
  7729. + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
  7730. +
  7731. + /* seed default buffer pool */
  7732. + dpa_bp_priv_seed(priv->rx_bp);
  7733. +
  7734. + dpaa_generic_napi_enable(priv);
  7735. + netif_tx_start_all_queues(netdev);
  7736. +
  7737. + mod_timer(&priv->timer, jiffies + 100);
  7738. +
  7739. + return 0;
  7740. +}
  7741. +
  7742. +static int __cold dpa_generic_stop(struct net_device *netdev)
  7743. +{
  7744. + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
  7745. +
  7746. + netif_tx_stop_all_queues(netdev);
  7747. + dpaa_generic_napi_disable(priv);
  7748. +
  7749. + return 0;
  7750. +}
  7751. +
  7752. +static enum qman_cb_dqrr_result __hot
  7753. +dpa_generic_rx_err_dqrr(struct qman_portal *portal,
  7754. + struct qman_fq *fq,
  7755. + const struct qm_dqrr_entry *dq)
  7756. +{
  7757. + struct net_device *netdev;
  7758. + struct dpa_generic_priv_s *priv;
  7759. + struct dpa_percpu_priv_s *percpu_priv;
  7760. + const struct qm_fd *fd;
  7761. + int *countptr;
  7762. +
  7763. + netdev = ((struct dpa_fq *)fq)->net_dev;
  7764. + priv = netdev_priv(netdev);
  7765. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  7766. + countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
  7767. + fd = &dq->fd;
  7768. +
  7769. + /* TODO: extract bpid from the fd; when multiple bps are supported
  7770. + * there won't be a default bp
  7771. + */
  7772. +
  7773. + if (dpaa_eth_napi_schedule(percpu_priv, portal))
  7774. + return qman_cb_dqrr_stop;
  7775. +
  7776. + if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
  7777. + /* Unable to refill the buffer pool due to insufficient
  7778. + * system memory. Just release the frame back into the pool,
  7779. + * otherwise we'll soon end up with an empty buffer pool.
  7780. + */
  7781. + dpa_fd_release(netdev, fd);
  7782. + goto qman_consume;
  7783. + }
  7784. +
  7785. + /* limit common, possibly innocuous Rx FIFO Overflow errors'
  7786. + * interference with zero-loss convergence benchmark results.
  7787. + */
  7788. + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
  7789. + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
  7790. + else
  7791. + if (netif_msg_hw(priv) && net_ratelimit())
  7792. + netdev_err(netdev, "Err FD status 2 = 0x%08x\n",
  7793. + fd->status & FM_FD_STAT_RX_ERRORS);
  7794. +
  7795. +
  7796. + percpu_priv->stats.rx_errors++;
  7797. +
  7798. + if (fd->status & FM_PORT_FRM_ERR_DMA)
  7799. + percpu_priv->rx_errors.dme++;
  7800. + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
  7801. + percpu_priv->rx_errors.fpe++;
  7802. + if (fd->status & FM_PORT_FRM_ERR_SIZE)
  7803. + percpu_priv->rx_errors.fse++;
  7804. + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
  7805. + percpu_priv->rx_errors.phe++;
  7806. +
  7807. + /* TODO dpa_csum_validation */
  7808. +
  7809. + dpa_fd_release(netdev, fd);
  7810. +
  7811. +qman_consume:
  7812. + return qman_cb_dqrr_consume;
  7813. +}
  7814. +
  7815. +
  7816. +static enum qman_cb_dqrr_result __hot
  7817. +dpa_generic_rx_dqrr(struct qman_portal *portal,
  7818. + struct qman_fq *fq,
  7819. + const struct qm_dqrr_entry *dq)
  7820. +{
  7821. + struct net_device *netdev;
  7822. + struct dpa_generic_priv_s *priv;
  7823. + struct dpa_bp *bp;
  7824. + struct dpa_percpu_priv_s *percpu_priv;
  7825. + struct sk_buff **skbh;
  7826. + struct sk_buff *skb;
  7827. + const struct qm_fd *fd = &dq->fd;
  7828. + unsigned int skb_len;
  7829. + u32 fd_status = fd->status;
  7830. + u64 pad;
  7831. + dma_addr_t addr = qm_fd_addr(fd);
  7832. + unsigned int data_start;
  7833. + unsigned long skb_addr;
  7834. + int *countptr;
  7835. +
  7836. + netdev = ((struct dpa_fq *)fq)->net_dev;
  7837. + priv = netdev_priv(netdev);
  7838. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  7839. + countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
  7840. +
  7841. + /* This is needed for TCP traffic as draining only on TX is not
  7842. + * enough
  7843. + */
  7844. + dpa_generic_drain_bp(priv->draining_tx_bp, 1);
  7845. + dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp, 1);
  7846. +
  7847. + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
  7848. + return qman_cb_dqrr_stop;
  7849. +
  7850. + if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
  7851. + /* Unable to refill the buffer pool due to insufficient
  7852. + * system memory. Just release the frame back into the pool,
  7853. + * otherwise we'll soon end up with an empty buffer pool.
  7854. + */
  7855. + dpa_fd_release(netdev, fd);
  7856. + goto qman_consume;
  7857. + }
  7858. +
  7859. + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), -1);
  7860. +
  7861. + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
  7862. + if (netif_msg_hw(priv) && net_ratelimit())
  7863. + netdev_warn(netdev, "FD status = 0x%08x\n",
  7864. + fd->status & FM_FD_STAT_RX_ERRORS);
  7865. +
  7866. + percpu_priv->stats.rx_errors++;
  7867. + dpa_fd_release(netdev, fd);
  7868. + goto qman_consume;
  7869. + }
  7870. + if (unlikely(fd->format != qm_fd_contig)) {
  7871. + percpu_priv->stats.rx_dropped++;
  7872. + if (netif_msg_rx_status(priv) && net_ratelimit())
  7873. + netdev_warn(netdev, "Dropping a SG frame\n");
  7874. + dpa_fd_release(netdev, fd);
  7875. + goto qman_consume;
  7876. + }
  7877. +
  7878. + bp = dpa_bpid2pool(fd->bpid);
  7879. +
  7880. + /* find out the pad */
  7881. + skb_addr = virt_to_phys(skb->head);
  7882. + pad = addr - skb_addr;
  7883. +
  7884. + dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
  7885. +
  7886. + countptr = raw_cpu_ptr(bp->percpu_count);
  7887. + (*countptr)--;
  7888. +
  7889. + /* The skb is currently pointed at head + headroom. The packet
  7890. + * starts at skb->head + pad + fd offset.
  7891. + */
  7892. + data_start = (unsigned int)(pad + dpa_fd_offset(fd) -
  7893. + skb_headroom(skb));
  7894. + skb_put(skb, dpa_fd_length(fd) + data_start);
  7895. + skb_pull(skb, data_start);
  7896. + skb->protocol = eth_type_trans(skb, netdev);
  7897. + if (unlikely(dpa_check_rx_mtu(skb, netdev->mtu))) {
  7898. + percpu_priv->stats.rx_dropped++;
  7899. + dev_kfree_skb(skb);
  7900. + goto qman_consume;
  7901. + }
  7902. +
  7903. + skb_len = skb->len;
  7904. +
  7905. + if (fd->status & FM_FD_STAT_L4CV)
  7906. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  7907. + else
  7908. + skb->ip_summed = CHECKSUM_NONE;
  7909. +
  7910. + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
  7911. + goto qman_consume;
  7912. +
  7913. + percpu_priv->stats.rx_packets++;
  7914. + percpu_priv->stats.rx_bytes += skb_len;
  7915. +
  7916. +qman_consume:
  7917. + return qman_cb_dqrr_consume;
  7918. +}
  7919. +
  7920. +static void dpa_generic_drain_sg_bp(struct dpa_bp *sgbp, u8 nbuf)
  7921. +{
  7922. + int ret;
  7923. + struct bm_buffer bmb[8];
  7924. +
  7925. + do {
  7926. + ret = bman_acquire(sgbp->pool, bmb, nbuf, 0);
  7927. + } while (ret >= 0);
  7928. +}
  7929. +
  7930. +inline void dpa_release_sg(struct sk_buff *skb, dma_addr_t addr,
  7931. + struct dpa_bp *bp)
  7932. +{
  7933. + struct qm_sg_entry *sgt = phys_to_virt(addr + DPA_DEFAULT_TX_HEADROOM);
  7934. + int nr_frags = skb_shinfo(skb)->nr_frags;
  7935. + dma_addr_t sg_addr;
  7936. + int j;
  7937. +
  7938. + dma_unmap_single(bp->dev, addr, DPA_DEFAULT_TX_HEADROOM +
  7939. + sizeof(struct qm_sg_entry) * (1 + nr_frags),
  7940. + DMA_BIDIRECTIONAL);
  7941. +
  7942. + for (j = 0; j <= nr_frags; j++) {
  7943. + DPA_BUG_ON(sgt[j].extension);
  7944. + sg_addr = qm_sg_addr(&sgt[j]);
  7945. + dma_unmap_page(bp->dev, sg_addr,
  7946. + sgt[j].length, DMA_BIDIRECTIONAL);
  7947. + }
  7948. +
  7949. + dev_kfree_skb_any(skb);
  7950. +}
  7951. +
  7952. +inline void dpa_release_contig(struct sk_buff *skb, dma_addr_t addr,
  7953. + struct dpa_bp *bp)
  7954. +{
  7955. + dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
  7956. + dev_kfree_skb_any(skb);
  7957. +}
  7958. +
  7959. +static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf)
  7960. +{
  7961. + int ret, i;
  7962. + struct bm_buffer bmb[8];
  7963. + dma_addr_t addr;
  7964. + int *countptr = raw_cpu_ptr(bp->percpu_count);
  7965. + int count = *countptr;
  7966. + struct sk_buff **skbh;
  7967. +
  7968. + do {
  7969. + /* bman_acquire will fail if nbuf > 8 */
  7970. + ret = bman_acquire(bp->pool, bmb, nbuf, 0);
  7971. + if (ret > 0) {
  7972. + for (i = 0; i < nbuf; i++) {
  7973. + addr = bm_buf_addr(&bmb[i]);
  7974. + skbh = (struct sk_buff **)phys_to_virt(addr);
  7975. + dma_unmap_single(bp->dev, addr, bp->size,
  7976. + DMA_TO_DEVICE);
  7977. +
  7978. + if (skb_is_nonlinear(*skbh))
  7979. + dpa_release_sg(*skbh, addr, bp);
  7980. + else
  7981. + dpa_release_contig(*skbh, addr, bp);
  7982. + }
  7983. + count -= i;
  7984. + }
  7985. + } while (ret > 0);
  7986. +
  7987. + *countptr = count;
  7988. +}
  7989. +
  7990. +/**
  7991. + * Turn on HW checksum computation for this outgoing frame.
  7992. + * If the current protocol is not something we support in this regard
  7993. + * (or if the stack has already computed the SW checksum), we do nothing.
  7994. + *
  7995. + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
  7996. + * otherwise.
  7997. + *
  7998. + * Note that this function may modify the fd->cmd field and the skb data buffer
  7999. + * (the Parse Results area).
  8000. + */
  8001. +static int dpa_generic_tx_csum(struct dpa_generic_priv_s *priv,
  8002. + struct sk_buff *skb,
  8003. + struct qm_fd *fd,
  8004. + char *parse_results)
  8005. +{
  8006. + fm_prs_result_t *parse_result;
  8007. + struct iphdr *iph;
  8008. + struct ipv6hdr *ipv6h = NULL;
  8009. + int l4_proto;
  8010. + int ethertype = ntohs(skb->protocol);
  8011. + int retval = 0;
  8012. +
  8013. + if (skb->ip_summed != CHECKSUM_PARTIAL)
  8014. + return 0;
  8015. +
  8016. + /* Note: L3 csum seems to be already computed in sw, but we can't choose
  8017. + * L4 alone from the FM configuration anyway.
  8018. + */
  8019. +
  8020. + /* Fill in some fields of the Parse Results array, so the FMan
  8021. + * can find them as if they came from the FMan Parser.
  8022. + */
  8023. + parse_result = (fm_prs_result_t *)parse_results;
  8024. +
  8025. + /* If we're dealing with VLAN, get the real Ethernet type */
  8026. + if (ethertype == ETH_P_8021Q) {
  8027. + /* We can't always assume the MAC header is set correctly
  8028. + * by the stack, so reset to beginning of skb->data
  8029. + */
  8030. + skb_reset_mac_header(skb);
  8031. + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
  8032. + }
  8033. +
  8034. + /* Fill in the relevant L3 parse result fields
  8035. + * and read the L4 protocol type
  8036. + */
  8037. + switch (ethertype) {
  8038. + case ETH_P_IP:
  8039. + parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
  8040. + iph = ip_hdr(skb);
  8041. + BUG_ON(iph == NULL);
  8042. + l4_proto = iph->protocol;
  8043. + break;
  8044. + case ETH_P_IPV6:
  8045. + parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
  8046. + ipv6h = ipv6_hdr(skb);
  8047. + BUG_ON(ipv6h == NULL);
  8048. + l4_proto = ipv6h->nexthdr;
  8049. + break;
  8050. + default:
  8051. + /* We shouldn't even be here */
  8052. + if (netif_msg_tx_err(priv) && net_ratelimit())
  8053. + netdev_alert(priv->net_dev,
  8054. + "Can't compute HW csum for L3 proto 0x%x\n",
  8055. + ntohs(skb->protocol));
  8056. + retval = -EIO;
  8057. + goto return_error;
  8058. + }
  8059. +
  8060. + /* Fill in the relevant L4 parse result fields */
  8061. + switch (l4_proto) {
  8062. + case IPPROTO_UDP:
  8063. + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
  8064. + break;
  8065. + case IPPROTO_TCP:
  8066. + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
  8067. + break;
  8068. + default:
  8069. + /* This can as well be a BUG() */
  8070. + if (netif_msg_tx_err(priv) && net_ratelimit())
  8071. + netdev_alert(priv->net_dev,
  8072. + "Can't compute HW csum for L4 proto 0x%x\n",
  8073. + l4_proto);
  8074. + retval = -EIO;
  8075. + goto return_error;
  8076. + }
  8077. +
  8078. + /* At index 0 is IPOffset_1 as defined in the Parse Results */
  8079. + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
  8080. + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
  8081. +
  8082. + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
  8083. + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
  8084. +
  8085. + /* On P1023 and similar platforms fd->cmd interpretation could
  8086. + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
  8087. + * is not set so we do not need to check; in the future, if/when
  8088. + * using context_a we need to check this bit
  8089. + */
  8090. +
  8091. +return_error:
  8092. + return retval;
  8093. +}
  8094. +
  8095. +static inline int generic_skb_to_sg_fd(struct dpa_generic_priv_s *priv,
  8096. + struct sk_buff *skb, struct qm_fd *fd)
  8097. +{
  8098. + struct dpa_bp *dpa_bp = priv->draining_tx_bp;
  8099. + struct dpa_bp *dpa_sg_bp = priv->draining_tx_sg_bp;
  8100. + dma_addr_t addr;
  8101. + struct sk_buff **skbh;
  8102. + struct net_device *net_dev = priv->net_dev;
  8103. + int err;
  8104. +
  8105. + struct qm_sg_entry *sgt;
  8106. + void *sgt_buf;
  8107. + void *buffer_start;
  8108. + skb_frag_t *frag;
  8109. + int i, j;
  8110. + const enum dma_data_direction dma_dir = DMA_BIDIRECTIONAL;
  8111. + const int nr_frags = skb_shinfo(skb)->nr_frags;
  8112. +
  8113. + memset(fd, 0, sizeof(*fd));
  8114. + fd->format = qm_fd_sg;
  8115. +
  8116. + /* get a page frag to store the SGTable */
  8117. + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
  8118. + sizeof(struct qm_sg_entry) * (1 + nr_frags));
  8119. + if (unlikely(!sgt_buf)) {
  8120. + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
  8121. + return -ENOMEM;
  8122. + }
  8123. +
  8124. + memset(sgt_buf, 0, priv->tx_headroom +
  8125. + sizeof(struct qm_sg_entry) * (1 + nr_frags));
  8126. +
  8127. + /* do this before dma_map_single(DMA_TO_DEVICE), because we may need to
  8128. + * write into the skb.
  8129. + */
  8130. + err = dpa_generic_tx_csum(priv, skb, fd,
  8131. + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
  8132. + if (unlikely(err < 0)) {
  8133. + if (netif_msg_tx_err(priv) && net_ratelimit())
  8134. + netdev_err(net_dev, "HW csum error: %d\n", err);
  8135. + goto csum_failed;
  8136. + }
  8137. +
  8138. + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
  8139. + sgt[0].bpid = dpa_sg_bp->bpid;
  8140. + sgt[0].offset = 0;
  8141. + sgt[0].length = skb_headlen(skb);
  8142. + sgt[0].extension = 0;
  8143. + sgt[0].final = 0;
  8144. +
  8145. + addr = dma_map_single(dpa_sg_bp->dev, skb->data, sgt[0].length,
  8146. + dma_dir);
  8147. + if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
  8148. + dev_err(dpa_sg_bp->dev, "DMA mapping failed");
  8149. + err = -EINVAL;
  8150. + goto sg0_map_failed;
  8151. + }
  8152. +
  8153. + sgt[0].addr_hi = (uint8_t)upper_32_bits(addr);
  8154. + sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
  8155. +
  8156. + /* populate the rest of SGT entries */
  8157. + for (i = 1; i <= nr_frags; i++) {
  8158. + frag = &skb_shinfo(skb)->frags[i - 1];
  8159. + sgt[i].bpid = dpa_sg_bp->bpid;
  8160. + sgt[i].offset = 0;
  8161. + sgt[i].length = frag->size;
  8162. + sgt[i].extension = 0;
  8163. + sgt[i].final = 0;
  8164. +
  8165. + DPA_BUG_ON(!skb_frag_page(frag));
  8166. + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
  8167. + dma_dir);
  8168. + if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
  8169. + dev_err(dpa_sg_bp->dev, "DMA mapping failed");
  8170. + err = -EINVAL;
  8171. + goto sg_map_failed;
  8172. + }
  8173. +
  8174. + /* keep the offset in the address */
  8175. + sgt[i].addr_hi = (uint8_t)upper_32_bits(addr);
  8176. + sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
  8177. + }
  8178. + sgt[i - 1].final = 1;
  8179. +
  8180. + fd->length20 = skb->len;
  8181. + fd->offset = priv->tx_headroom;
  8182. +
  8183. + /* DMA map the SGT page */
  8184. + buffer_start = (void *)sgt - dpa_fd_offset(fd);
  8185. + /* Can't write at "negative" offset in buffer_start, because this skb
  8186. + * may not have been allocated by us.
  8187. + */
  8188. + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
  8189. +
  8190. + addr = dma_map_single(dpa_bp->dev, buffer_start,
  8191. + priv->tx_headroom + sizeof(struct qm_sg_entry) * (1 + nr_frags),
  8192. + dma_dir);
  8193. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  8194. + dev_err(dpa_bp->dev, "DMA mapping failed");
  8195. + err = -EINVAL;
  8196. + goto sgt_map_failed;
  8197. + }
  8198. +
  8199. + fd->bpid = dpa_bp->bpid;
  8200. + fd->addr_hi = (uint8_t)upper_32_bits(addr);
  8201. + fd->addr_lo = lower_32_bits(addr);
  8202. +
  8203. + return 0;
  8204. +
  8205. +sgt_map_failed:
  8206. +sg_map_failed:
  8207. + for (j = 0; j < i; j++)
  8208. + dma_unmap_page(dpa_sg_bp->dev, qm_sg_addr(&sgt[j]),
  8209. + be32_to_cpu(sgt[j].length), dma_dir);
  8210. +sg0_map_failed:
  8211. +csum_failed:
  8212. + put_page(virt_to_head_page(sgt_buf));
  8213. +
  8214. + return err;
  8215. +}
  8216. +
  8217. +static int __hot dpa_generic_tx(struct sk_buff *skb, struct net_device *netdev)
  8218. +{
  8219. + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
  8220. + struct dpa_percpu_priv_s *percpu_priv =
  8221. + raw_cpu_ptr(priv->percpu_priv);
  8222. + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
  8223. + struct dpa_bp *bp = priv->draining_tx_bp;
  8224. + struct dpa_bp *sg_bp = priv->draining_tx_sg_bp;
  8225. + struct sk_buff **skbh = NULL;
  8226. + dma_addr_t addr;
  8227. + struct qm_fd fd;
  8228. + int queue_mapping;
  8229. + struct qman_fq *egress_fq;
  8230. + const bool nonlinear = skb_is_nonlinear(skb);
  8231. + int i = 0, err = 0;
  8232. + int *countptr;
  8233. +
  8234. + if (nonlinear && skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES) {
  8235. + err = generic_skb_to_sg_fd(priv, skb, &fd);
  8236. + if (unlikely(err < 0))
  8237. + goto sg_failed;
  8238. + percpu_priv->tx_frag_skbuffs++;
  8239. + addr = qm_fd_addr(&fd);
  8240. + } else {
  8241. + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
  8242. + struct sk_buff *skb_new;
  8243. +
  8244. + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
  8245. + if (unlikely(!skb_new)) {
  8246. + percpu_stats->tx_errors++;
  8247. + kfree_skb(skb);
  8248. + goto done;
  8249. + }
  8250. +
  8251. + kfree_skb(skb);
  8252. + skb = skb_new;
  8253. + }
  8254. +
  8255. + clear_fd(&fd);
  8256. +
  8257. + /* store skb backpointer to release the skb later */
  8258. + skbh = (struct sk_buff **)(skb->data - priv->tx_headroom);
  8259. + *skbh = skb;
  8260. +
  8261. + /* do this before dma_map_single(), because we may need to write
  8262. + * into the skb.
  8263. + */
  8264. + err = dpa_generic_tx_csum(priv, skb, &fd,
  8265. + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
  8266. + if (unlikely(err < 0)) {
  8267. + if (netif_msg_tx_err(priv) && net_ratelimit())
  8268. + netdev_err(netdev, "HW csum error: %d\n", err);
  8269. + return err;
  8270. + }
  8271. +
  8272. + addr = dma_map_single(bp->dev, skbh,
  8273. + skb->len + priv->tx_headroom, DMA_TO_DEVICE);
  8274. + if (unlikely(dma_mapping_error(bp->dev, addr))) {
  8275. + if (netif_msg_tx_err(priv) && net_ratelimit())
  8276. + netdev_err(netdev, "dma_map_single() failed\n");
  8277. + goto dma_mapping_failed;
  8278. + }
  8279. +
  8280. + fd.format = qm_fd_contig;
  8281. + fd.length20 = skb->len;
  8282. + fd.offset = priv->tx_headroom;
  8283. + fd.addr_hi = (uint8_t)upper_32_bits(addr);
  8284. + fd.addr_lo = lower_32_bits(addr);
  8285. + /* fd.cmd |= FM_FD_CMD_FCO; */
  8286. + fd.bpid = bp->bpid;
  8287. + }
  8288. +
  8289. + dpa_generic_drain_bp(bp, 1);
  8290. + dpa_generic_drain_sg_bp(sg_bp, 1);
  8291. +
  8292. + queue_mapping = dpa_get_queue_mapping(skb);
  8293. + egress_fq = priv->egress_fqs[queue_mapping];
  8294. +
  8295. + for (i = 0; i < 100000; i++) {
  8296. + err = qman_enqueue(egress_fq, &fd, 0);
  8297. + if (err != -EBUSY)
  8298. + break;
  8299. + }
  8300. +
  8301. + if (unlikely(err < 0)) {
  8302. + percpu_stats->tx_fifo_errors++;
  8303. + goto xmit_failed;
  8304. + }
  8305. +
  8306. + countptr = raw_cpu_ptr(bp->percpu_count);
  8307. + (*countptr)++;
  8308. +
  8309. + percpu_stats->tx_packets++;
  8310. + percpu_stats->tx_bytes += fd.length20;
  8311. + netdev->trans_start = jiffies;
  8312. +
  8313. + goto done;
  8314. +
  8315. +xmit_failed:
  8316. + dma_unmap_single(bp->dev, addr, fd.offset + fd.length20, DMA_TO_DEVICE);
  8317. +sg_failed:
  8318. +dma_mapping_failed:
  8319. + percpu_stats->tx_errors++;
  8320. + dev_kfree_skb(skb);
  8321. +done:
  8322. + return NETDEV_TX_OK;
  8323. +}
  8324. +
  8325. +static int dpa_generic_napi_add(struct net_device *net_dev)
  8326. +{
  8327. + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
  8328. + struct dpa_percpu_priv_s *percpu_priv;
  8329. + int i, cpu;
  8330. +
  8331. + for_each_possible_cpu(cpu) {
  8332. + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
  8333. +
  8334. + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
  8335. + qman_portal_max * sizeof(struct dpa_napi_portal),
  8336. + GFP_KERNEL);
  8337. +
  8338. + if (unlikely(percpu_priv->np == NULL)) {
  8339. + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
  8340. + return -ENOMEM;
  8341. + }
  8342. +
  8343. + for (i = 0; i < qman_portal_max; i++)
  8344. + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
  8345. + dpaa_eth_poll, DPA_GENERIC_NAPI_WEIGHT);
  8346. + }
  8347. +
  8348. + return 0;
  8349. +}
  8350. +
  8351. +static void dpa_generic_napi_del(struct net_device *net_dev)
  8352. +{
  8353. + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
  8354. + struct dpa_percpu_priv_s *percpu_priv;
  8355. + int i, cpu;
  8356. +
  8357. + for_each_possible_cpu(cpu) {
  8358. + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
  8359. +
  8360. + if (percpu_priv->np) {
  8361. + for (i = 0; i < qman_portal_max; i++)
  8362. + netif_napi_del(&percpu_priv->np[i].napi);
  8363. +
  8364. + devm_kfree(net_dev->dev.parent, percpu_priv->np);
  8365. + }
  8366. + }
  8367. +}
  8368. +
  8369. +
  8370. +static int dpa_generic_netdev_init(struct device_node *dpa_node,
  8371. + struct net_device *netdev)
  8372. +{
  8373. + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
  8374. + struct device *dev = netdev->dev.parent;
  8375. + const uint8_t *mac_addr;
  8376. + int err;
  8377. +
  8378. + netdev->netdev_ops = &dpa_generic_ops;
  8379. +
  8380. + mac_addr = of_get_mac_address(dpa_node);
  8381. + if (mac_addr == NULL) {
  8382. + if (netif_msg_probe(priv))
  8383. + dev_err(dev, "No virtual MAC address found!\n");
  8384. + return -EINVAL;
  8385. + }
  8386. +
  8387. + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG;
  8388. + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
  8389. + netdev->features |= netdev->hw_features;
  8390. + netdev->vlan_features = netdev->features;
  8391. +
  8392. + memcpy(netdev->perm_addr, mac_addr, netdev->addr_len);
  8393. + memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
  8394. +
  8395. + netdev->ethtool_ops = &dpa_generic_ethtool_ops;
  8396. +
  8397. + netdev->needed_headroom = priv->tx_headroom;
  8398. + netdev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
  8399. +
  8400. + err = register_netdev(netdev);
  8401. + if (err < 0) {
  8402. + dev_err(dev, "register_netdev() = %d\n", err);
  8403. + return err;
  8404. + }
  8405. +
  8406. + return 0;
  8407. +}
  8408. +
  8409. +static struct dpa_fq_cbs_t generic_fq_cbs = {
  8410. + .rx_defq = { .cb = { .dqrr = dpa_generic_rx_dqrr } },
  8411. + .rx_errq = { .cb = { .dqrr = dpa_generic_rx_err_dqrr } },
  8412. + .egress_ern = { .cb = { .ern = dpa_generic_ern } }
  8413. +};
  8414. +
  8415. +static struct fqid_cell *__fq_alloc(struct device *dev,
  8416. + int num_ranges,
  8417. + const void *fqids_off)
  8418. +{
  8419. + struct fqid_cell *fqids;
  8420. + int i;
  8421. +
  8422. + fqids = kzalloc(sizeof(*fqids) * num_ranges, GFP_KERNEL);
  8423. + if (fqids == NULL)
  8424. + return NULL;
  8425. +
  8426. + /* convert to CPU endianess */
  8427. + for (i = 0; i < num_ranges; i++) {
  8428. + fqids[i].start = be32_to_cpup(fqids_off +
  8429. + i * sizeof(*fqids));
  8430. + fqids[i].count = be32_to_cpup(fqids_off +
  8431. + i * sizeof(*fqids) + sizeof(__be32));
  8432. + }
  8433. +
  8434. + return fqids;
  8435. +}
  8436. +
  8437. +static struct list_head *dpa_generic_fq_probe(struct platform_device *_of_dev,
  8438. + struct fm_port *tx_port)
  8439. +{
  8440. + struct device *dev = &_of_dev->dev;
  8441. + struct device_node *oh_node = NULL;
  8442. + struct device_node *onic_node = NULL;
  8443. + struct fqid_cell *fqids;
  8444. + const void *fqids_off;
  8445. + struct dpa_fq *fq, *tmp;
  8446. + struct list_head *list;
  8447. + int num_ranges;
  8448. + int i, lenp;
  8449. +
  8450. + onic_node = dev->of_node;
  8451. +
  8452. + list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
  8453. + if (!list) {
  8454. + dev_err(dev, "Cannot allocate space for frame queues list\n");
  8455. + return ERR_PTR(-ENOMEM);
  8456. + }
  8457. +
  8458. + INIT_LIST_HEAD(list);
  8459. +
  8460. + /* RX queues (RX error, RX default) are specified in Rx O/H port node */
  8461. + oh_node = get_rx_op_port_node(_of_dev);
  8462. + fqids_off = of_get_property(oh_node, "fsl,qman-frame-queues-oh", &lenp);
  8463. + if (fqids_off == NULL) {
  8464. + dev_err(dev, "Need Rx FQ definition in dts for generic devices\n");
  8465. + return ERR_PTR(-EINVAL);
  8466. + }
  8467. + of_node_put(oh_node);
  8468. +
  8469. + num_ranges = lenp / sizeof(*fqids);
  8470. + if (num_ranges != 2) {
  8471. + dev_err(dev, "Need 2 Rx FQ definitions in dts for generic devices\n");
  8472. + return ERR_PTR(-EINVAL);
  8473. + }
  8474. +
  8475. + fqids = __fq_alloc(dev, num_ranges, fqids_off);
  8476. + if (!dpa_fq_alloc(dev, fqids[0].start, fqids[0].count, list,
  8477. + FQ_TYPE_RX_ERROR) ||
  8478. + !dpa_fq_alloc(dev, fqids[1].start, fqids[1].count,
  8479. + list, FQ_TYPE_RX_DEFAULT)) {
  8480. + dev_err(dev, "Cannot allocate space for default frame queues\n");
  8481. + return ERR_PTR(-ENOMEM);
  8482. + }
  8483. + kfree(fqids);
  8484. +
  8485. + /* TX queues */
  8486. + fqids_off = of_get_property(onic_node, "fsl,qman-frame-queues-tx",
  8487. + &lenp);
  8488. + if (fqids_off == NULL) {
  8489. + dev_err(dev, "Need Tx FQ definition in dts for generic devices\n");
  8490. + return ERR_PTR(-EINVAL);
  8491. + }
  8492. +
  8493. + num_ranges = lenp / sizeof(*fqids);
  8494. + fqids = __fq_alloc(dev, num_ranges, fqids_off);
  8495. + for (i = 0; i < num_ranges; i++) {
  8496. + if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
  8497. + FQ_TYPE_TX)) {
  8498. + dev_err(dev, "_dpa_fq_alloc() failed\n");
  8499. + return ERR_PTR(-ENOMEM);
  8500. + }
  8501. + }
  8502. + kfree(fqids);
  8503. +
  8504. + /* optional RX PCD queues */
  8505. + lenp = 0;
  8506. + fqids_off = of_get_property(onic_node,
  8507. + "fsl,qman-frame-queues-rx", &lenp);
  8508. + num_ranges = lenp / sizeof(*fqids);
  8509. + fqids = __fq_alloc(dev, num_ranges, fqids_off);
  8510. + for (i = 0; i < num_ranges; i++) {
  8511. + if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
  8512. + FQ_TYPE_RX_PCD)) {
  8513. + dev_err(dev, "_dpa_fq_alloc() failed\n");
  8514. + return ERR_PTR(-ENOMEM);
  8515. + }
  8516. + }
  8517. + kfree(fqids);
  8518. +
  8519. + list_for_each_entry_safe(fq, tmp, list, list) {
  8520. + if (fq->fq_type == FQ_TYPE_TX)
  8521. + fq->channel = fm_get_tx_port_channel(tx_port);
  8522. + }
  8523. +
  8524. + return list;
  8525. +}
  8526. +
  8527. +static void dpa_generic_ern(struct qman_portal *portal,
  8528. + struct qman_fq *fq,
  8529. + const struct qm_mr_entry *msg)
  8530. +{
  8531. + struct net_device *netdev;
  8532. + const struct dpa_generic_priv_s *priv;
  8533. + struct dpa_percpu_priv_s *percpu_priv;
  8534. + struct qm_fd fd = msg->ern.fd;
  8535. +
  8536. + netdev = ((struct dpa_fq *)fq)->net_dev;
  8537. + priv = netdev_priv(netdev);
  8538. + /* Non-migratable context, safe to use raw_cpu_ptr */
  8539. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  8540. + percpu_priv->stats.tx_dropped++;
  8541. + percpu_priv->stats.tx_fifo_errors++;
  8542. + count_ern(percpu_priv, msg);
  8543. +
  8544. + /* release this buffer into the draining buffer pool */
  8545. + dpa_fd_release(netdev, &fd);
  8546. +}
  8547. +
  8548. +static int dpa_generic_rx_bp_probe(struct platform_device *_of_dev,
  8549. + struct fm_port *rx_port,
  8550. + int *rx_bp_count,
  8551. + struct dpa_bp **rx_bp,
  8552. + struct dpa_buffer_layout_s **rx_buf_layout)
  8553. +{
  8554. + struct device *dev = &_of_dev->dev;
  8555. + struct fm_port_params params;
  8556. + struct dpa_bp *bp = NULL;
  8557. + int bp_count = 0;
  8558. + int bpid;
  8559. + const __be32 *bpool_cfg = NULL;
  8560. + struct device_node *dev_node = NULL;
  8561. + struct device_node *oh_node = NULL;
  8562. + struct dpa_buffer_layout_s *buf_layout = NULL;
  8563. + int lenp = 0;
  8564. + int na = 0, ns = 0;
  8565. + int err = 0, i = 0;
  8566. +
  8567. + oh_node = get_rx_op_port_node(_of_dev);
  8568. +
  8569. + bp_count = of_count_phandle_with_args(oh_node,
  8570. + "fsl,bman-buffer-pools", NULL);
  8571. + if (bp_count <= 0) {
  8572. + dev_err(dev, "Missing buffer pool handles from onic node from device tree\n");
  8573. + return -EINVAL;
  8574. + }
  8575. +
  8576. + bp = devm_kzalloc(dev, bp_count * sizeof(*bp), GFP_KERNEL);
  8577. + if (unlikely(bp == NULL)) {
  8578. + dev_err(dev, "devm_kzalloc() failed\n");
  8579. + err = -ENOMEM;
  8580. + goto _return_of_node_put;
  8581. + }
  8582. +
  8583. + dev_node = of_find_node_by_path("/");
  8584. + if (unlikely(dev_node == NULL)) {
  8585. + dev_err(dev, "of_find_node_by_path(/) failed\n");
  8586. + err = -EINVAL;
  8587. + goto _return_of_node_put;
  8588. + }
  8589. +
  8590. + na = of_n_addr_cells(dev_node);
  8591. + ns = of_n_size_cells(dev_node);
  8592. +
  8593. + of_node_put(dev_node);
  8594. +
  8595. + for (i = 0; i < bp_count; i++) {
  8596. + dev_node = of_parse_phandle(oh_node,
  8597. + "fsl,bman-buffer-pools", i);
  8598. + if (dev_node == NULL) {
  8599. + dev_err(dev, "Cannot find buffer pool node in the device tree\n");
  8600. + err = -EINVAL;
  8601. + goto _return_of_node_put;
  8602. + }
  8603. +
  8604. + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
  8605. + if (err) {
  8606. + dev_err(dev, "Cannot find buffer pool ID in the buffer pool node in the device tree\n");
  8607. + goto _return_of_node_put;
  8608. + }
  8609. +
  8610. + bp[i].bpid = (uint8_t)bpid;
  8611. +
  8612. + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
  8613. + &lenp);
  8614. + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
  8615. + bp[i].config_count = (int)of_read_number(bpool_cfg, ns);
  8616. + bp[i].size = of_read_number(bpool_cfg + ns, ns);
  8617. + bp[i].paddr = 0;
  8618. + bp[i].seed_pool = false;
  8619. + } else {
  8620. + dev_err(dev, "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
  8621. + dev_node->full_name);
  8622. + err = -EINVAL;
  8623. + goto _return_of_node_put;
  8624. + }
  8625. +
  8626. + bp[i].percpu_count = devm_alloc_percpu(dev,
  8627. + *bp[i].percpu_count);
  8628. + }
  8629. +
  8630. + of_node_put(oh_node);
  8631. +
  8632. + buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
  8633. + if (!buf_layout) {
  8634. + dev_err(dev, "devm_kzalloc() failed\n");
  8635. + err = -ENOMEM;
  8636. + goto _return_of_node_put;
  8637. + }
  8638. +
  8639. + buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
  8640. + buf_layout->parse_results = false;
  8641. + buf_layout->hash_results = false;
  8642. + buf_layout->time_stamp = false;
  8643. + fm_port_get_buff_layout_ext_params(rx_port, &params);
  8644. + buf_layout->manip_extra_space = params.manip_extra_space;
  8645. + /* a value of zero for data alignment means "don't care", so align to
  8646. + * a non-zero value to prevent FMD from using its own default
  8647. + */
  8648. + buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
  8649. +
  8650. + *rx_buf_layout = buf_layout;
  8651. + *rx_bp = bp;
  8652. + *rx_bp_count = bp_count;
  8653. +
  8654. + return 0;
  8655. +
  8656. +_return_of_node_put:
  8657. + if (dev_node)
  8658. + of_node_put(dev_node);
  8659. +
  8660. + return err;
  8661. +}
  8662. +
  8663. +static int dpa_generic_tx_bp_probe(struct platform_device *_of_dev,
  8664. + struct fm_port *tx_port,
  8665. + struct dpa_bp **draining_tx_bp,
  8666. + struct dpa_bp **draining_tx_sg_bp,
  8667. + struct dpa_buffer_layout_s **tx_buf_layout)
  8668. +{
  8669. + struct device *dev = &_of_dev->dev;
  8670. + struct fm_port_params params;
  8671. + struct dpa_bp *bp = NULL;
  8672. + struct dpa_bp *bp_sg = NULL;
  8673. + struct dpa_buffer_layout_s *buf_layout = NULL;
  8674. +
  8675. + buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
  8676. + if (!buf_layout) {
  8677. + dev_err(dev, "devm_kzalloc() failed\n");
  8678. + return -ENOMEM;
  8679. + }
  8680. +
  8681. + buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
  8682. + buf_layout->parse_results = true;
  8683. + buf_layout->hash_results = true;
  8684. + buf_layout->time_stamp = false;
  8685. +
  8686. + fm_port_get_buff_layout_ext_params(tx_port, &params);
  8687. + buf_layout->manip_extra_space = params.manip_extra_space;
  8688. + buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
  8689. +
  8690. + bp = devm_kzalloc(dev, sizeof(*bp), GFP_KERNEL);
  8691. + if (unlikely(bp == NULL)) {
  8692. + dev_err(dev, "devm_kzalloc() failed\n");
  8693. + return -ENOMEM;
  8694. + }
  8695. +
  8696. + bp->size = dpa_bp_size(buf_layout);
  8697. + bp->percpu_count = devm_alloc_percpu(dev, *bp->percpu_count);
  8698. + bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
  8699. +
  8700. + *draining_tx_bp = bp;
  8701. +
  8702. + bp_sg = devm_kzalloc(dev, sizeof(*bp_sg), GFP_KERNEL);
  8703. + if (unlikely(bp_sg == NULL)) {
  8704. + dev_err(dev, "devm_kzalloc() failed\n");
  8705. + return -ENOMEM;
  8706. + }
  8707. +
  8708. + bp_sg->size = dpa_bp_size(buf_layout);
  8709. + bp_sg->percpu_count = alloc_percpu(*bp_sg->percpu_count);
  8710. + bp_sg->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
  8711. +
  8712. + *draining_tx_sg_bp = bp_sg;
  8713. +
  8714. + *tx_buf_layout = buf_layout;
  8715. +
  8716. + return 0;
  8717. +}
  8718. +
  8719. +static int dpa_generic_buff_dealloc_probe(struct platform_device *_of_dev,
  8720. + int *disable_buff_dealloc)
  8721. +{
  8722. + struct device *dev = &_of_dev->dev;
  8723. + const phandle *disable_handle = NULL;
  8724. + int lenp = 0;
  8725. + int err = 0;
  8726. +
  8727. + disable_handle = of_get_property(dev->of_node,
  8728. + "fsl,disable_buff_dealloc", &lenp);
  8729. + if (disable_handle != NULL)
  8730. + *disable_buff_dealloc = 1;
  8731. +
  8732. + return err;
  8733. +}
  8734. +
  8735. +static int dpa_generic_port_probe(struct platform_device *_of_dev,
  8736. + struct fm_port **rx_port,
  8737. + struct fm_port **tx_port)
  8738. +{
  8739. + struct device *dev = &_of_dev->dev;
  8740. + struct device_node *dev_node = NULL;
  8741. + struct device_node *onic_node = NULL;
  8742. + int num_ports = 0;
  8743. + int err = 0;
  8744. +
  8745. + onic_node = dev->of_node;
  8746. +
  8747. + num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
  8748. + if (num_ports != 2) {
  8749. + dev_err(dev, "There should be two OH ports in device tree (one for RX, one for TX\n");
  8750. + return -EINVAL;
  8751. + }
  8752. +
  8753. + dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", RX);
  8754. + if (dev_node == NULL) {
  8755. + dev_err(dev, "Cannot find Rx OH port node in device tree\n");
  8756. + return err;
  8757. + }
  8758. +
  8759. + err = get_port_ref(dev_node, rx_port);
  8760. + if (err) {
  8761. + dev_err(dev, "Cannot read Rx OH port node in device tree\n");
  8762. + return err;
  8763. + }
  8764. +
  8765. + dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", TX);
  8766. + if (dev_node == NULL) {
  8767. + dev_err(dev, "Cannot find Tx OH port node in device tree\n");
  8768. + return -EFAULT;
  8769. + }
  8770. +
  8771. + err = get_port_ref(dev_node, tx_port);
  8772. + if (err) {
  8773. + dev_err(dev, "Cannot read Tx OH port node in device tree\n");
  8774. + return err;
  8775. + }
  8776. +
  8777. + return 0;
  8778. +}
  8779. +
  8780. +static inline void dpa_generic_setup_ingress(
  8781. + const struct dpa_generic_priv_s *priv,
  8782. + struct dpa_fq *fq,
  8783. + const struct qman_fq *template)
  8784. +{
  8785. + fq->fq_base = *template;
  8786. + fq->net_dev = priv->net_dev;
  8787. +
  8788. + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
  8789. + fq->channel = priv->channel;
  8790. +}
  8791. +
  8792. +static inline void dpa_generic_setup_egress(
  8793. + const struct dpa_generic_priv_s *priv,
  8794. + struct dpa_fq *fq,
  8795. + struct fm_port *port,
  8796. + const struct qman_fq *template)
  8797. +{
  8798. + fq->fq_base = *template;
  8799. + fq->net_dev = priv->net_dev;
  8800. +
  8801. + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
  8802. + fq->channel = fm_get_tx_port_channel(port);
  8803. +}
  8804. +
  8805. +static void dpa_generic_fq_setup(struct dpa_generic_priv_s *priv,
  8806. + const struct dpa_fq_cbs_t *fq_cbs,
  8807. + struct fm_port *tx_port)
  8808. +{
  8809. + struct dpa_fq *fq;
  8810. + int egress_cnt = 0;
  8811. +
  8812. + /* Initialize each FQ in the list */
  8813. + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
  8814. + switch (fq->fq_type) {
  8815. + case FQ_TYPE_RX_DEFAULT:
  8816. + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
  8817. + break;
  8818. + case FQ_TYPE_RX_ERROR:
  8819. + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_errq);
  8820. + break;
  8821. + case FQ_TYPE_RX_PCD:
  8822. + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
  8823. + break;
  8824. + case FQ_TYPE_TX:
  8825. + dpa_generic_setup_egress(priv, fq,
  8826. + tx_port, &fq_cbs->egress_ern);
  8827. + /* If we have more Tx queues than the number of cores,
  8828. + * just ignore the extra ones.
  8829. + */
  8830. + if (egress_cnt < DPAA_ETH_TX_QUEUES)
  8831. + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
  8832. + break;
  8833. + default:
  8834. + dev_warn(priv->net_dev->dev.parent,
  8835. + "Unknown FQ type detected!\n");
  8836. + break;
  8837. + }
  8838. + }
  8839. +
  8840. + /* The number of Tx queues may be smaller than the number of cores, if
  8841. + * the Tx queue range is specified in the device tree instead of being
  8842. + * dynamically allocated.
  8843. + * Make sure all CPUs receive a corresponding Tx queue.
  8844. + */
  8845. + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
  8846. + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
  8847. + if (fq->fq_type != FQ_TYPE_TX)
  8848. + continue;
  8849. + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
  8850. + if (egress_cnt == DPAA_ETH_TX_QUEUES)
  8851. + break;
  8852. + }
  8853. + }
  8854. +}
  8855. +
  8856. +static int dpa_generic_fq_init(struct dpa_fq *dpa_fq, int disable_buff_dealloc)
  8857. +{
  8858. + int _errno;
  8859. + struct device *dev;
  8860. + struct qman_fq *fq;
  8861. + struct qm_mcc_initfq initfq;
  8862. +
  8863. + dev = dpa_fq->net_dev->dev.parent;
  8864. +
  8865. + if (dpa_fq->fqid == 0)
  8866. + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
  8867. +
  8868. + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
  8869. + if (_errno) {
  8870. + dev_err(dev, "qman_create_fq() failed\n");
  8871. + return _errno;
  8872. + }
  8873. + fq = &dpa_fq->fq_base;
  8874. +
  8875. + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
  8876. + /* FIXME: why would we want to keep an empty FQ in cache? */
  8877. + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
  8878. +
  8879. + /* FQ placement */
  8880. + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
  8881. +
  8882. + initfq.fqd.dest.channel = dpa_fq->channel;
  8883. + initfq.fqd.dest.wq = dpa_fq->wq;
  8884. +
  8885. + if (dpa_fq->fq_type == FQ_TYPE_TX && !disable_buff_dealloc) {
  8886. + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
  8887. + /* ContextA: A2V=1 (contextA A2 field is valid)
  8888. + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
  8889. + */
  8890. + initfq.fqd.context_a.hi = 0x10000000;
  8891. + initfq.fqd.context_a.lo = 0x80000000;
  8892. + }
  8893. +
  8894. + /* Initialization common to all ingress queues */
  8895. + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
  8896. + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
  8897. + initfq.fqd.fq_ctrl |=
  8898. + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
  8899. + initfq.fqd.context_a.stashing.exclusive =
  8900. + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
  8901. + QM_STASHING_EXCL_ANNOTATION;
  8902. + initfq.fqd.context_a.stashing.data_cl = 2;
  8903. + initfq.fqd.context_a.stashing.annotation_cl = 1;
  8904. + initfq.fqd.context_a.stashing.context_cl =
  8905. + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
  8906. + }
  8907. +
  8908. + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
  8909. + if (_errno < 0) {
  8910. + dev_err(dev, "qman_init_fq(%u) = %d\n",
  8911. + qman_fq_fqid(fq), _errno);
  8912. + qman_destroy_fq(fq, 0);
  8913. + return _errno;
  8914. + }
  8915. +
  8916. + dpa_fq->fqid = qman_fq_fqid(fq);
  8917. +
  8918. + return 0;
  8919. +}
  8920. +
  8921. +static int dpa_generic_fq_create(struct net_device *netdev,
  8922. + struct list_head *dpa_fq_list,
  8923. + struct fm_port *tx_port)
  8924. +{
  8925. + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
  8926. + struct dpa_fq *fqs = NULL, *tmp = NULL;
  8927. + struct task_struct *kth;
  8928. + int err = 0;
  8929. + int channel;
  8930. +
  8931. + INIT_LIST_HEAD(&priv->dpa_fq_list);
  8932. +
  8933. + list_replace_init(dpa_fq_list, &priv->dpa_fq_list);
  8934. +
  8935. + channel = dpa_get_channel();
  8936. + if (channel < 0)
  8937. + return channel;
  8938. + priv->channel = (uint16_t)channel;
  8939. +
  8940. + /* Start a thread that will walk the cpus with affine portals
  8941. + * and add this pool channel to each's dequeue mask.
  8942. + */
  8943. + kth = kthread_run(dpaa_eth_add_channel,
  8944. + (void *)(unsigned long)priv->channel,
  8945. + "dpaa_%p:%d", netdev, priv->channel);
  8946. + if (!kth)
  8947. + return -ENOMEM;
  8948. +
  8949. + dpa_generic_fq_setup(priv, &generic_fq_cbs, tx_port);
  8950. +
  8951. + /* Add the FQs to the interface, and make them active */
  8952. + list_for_each_entry_safe(fqs, tmp, &priv->dpa_fq_list, list) {
  8953. + err = dpa_generic_fq_init(fqs, priv->disable_buff_dealloc);
  8954. + if (err)
  8955. + return err;
  8956. + }
  8957. +
  8958. + return 0;
  8959. +}
  8960. +
  8961. +static int dpa_generic_bp_create(struct net_device *net_dev,
  8962. + int rx_bp_count,
  8963. + struct dpa_bp *rx_bp,
  8964. + struct dpa_buffer_layout_s *rx_buf_layout,
  8965. + struct dpa_bp *draining_tx_bp,
  8966. + struct dpa_bp *draining_tx_sg_bp,
  8967. + struct dpa_buffer_layout_s *tx_buf_layout)
  8968. +{
  8969. + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
  8970. + int err = 0;
  8971. +
  8972. + /* TODO: multiple Rx bps */
  8973. + priv->rx_bp_count = rx_bp_count;
  8974. + priv->rx_bp = rx_bp;
  8975. + priv->rx_buf_layout = rx_buf_layout;
  8976. + priv->draining_tx_bp = draining_tx_bp;
  8977. + priv->draining_tx_sg_bp = draining_tx_sg_bp;
  8978. + priv->tx_buf_layout = tx_buf_layout;
  8979. +
  8980. + err = dpa_bp_alloc(priv->rx_bp);
  8981. + if (err < 0) {
  8982. + priv->rx_bp = NULL;
  8983. + return err;
  8984. + }
  8985. +
  8986. + err = dpa_bp_alloc(priv->draining_tx_bp);
  8987. + if (err < 0) {
  8988. + priv->draining_tx_bp = NULL;
  8989. + return err;
  8990. + }
  8991. +
  8992. + err = dpa_bp_alloc(priv->draining_tx_sg_bp);
  8993. + if (err < 0) {
  8994. + priv->draining_tx_sg_bp = NULL;
  8995. + return err;
  8996. + }
  8997. +
  8998. + return 0;
  8999. +}
  9000. +
  9001. +static void dpa_generic_relase_bp(struct dpa_bp *bp)
  9002. +{
  9003. + if (!bp)
  9004. + return;
  9005. +
  9006. + if (!atomic_dec_and_test(&bp->refs))
  9007. + return;
  9008. +
  9009. + if (bp->free_buf_cb)
  9010. + dpa_bp_drain(bp);
  9011. +
  9012. + bman_free_pool(bp->pool);
  9013. +
  9014. + if (bp->dev)
  9015. + platform_device_unregister(to_platform_device(bp->dev));
  9016. +}
  9017. +
  9018. +static void dpa_generic_bp_free(struct dpa_generic_priv_s *priv)
  9019. +{
  9020. + int i = 0;
  9021. +
  9022. + /* release the rx bpools */
  9023. + for (i = 0; i < priv->rx_bp_count; i++)
  9024. + dpa_generic_relase_bp(&priv->rx_bp[i]);
  9025. +
  9026. + /* release the tx draining bpools */
  9027. + dpa_generic_relase_bp(priv->draining_tx_bp);
  9028. + dpa_generic_relase_bp(priv->draining_tx_sg_bp);
  9029. +}
  9030. +
  9031. +static int dpa_generic_remove(struct platform_device *of_dev)
  9032. +{
  9033. + int err;
  9034. + struct device *dev;
  9035. + struct net_device *net_dev;
  9036. + struct dpa_generic_priv_s *priv;
  9037. +
  9038. + dev = &of_dev->dev;
  9039. + net_dev = dev_get_drvdata(dev);
  9040. + priv = netdev_priv(net_dev);
  9041. +
  9042. + dpaa_eth_generic_sysfs_remove(dev);
  9043. +
  9044. + dev_set_drvdata(dev, NULL);
  9045. + unregister_netdev(net_dev);
  9046. +
  9047. + err = dpa_fq_free(dev, &priv->dpa_fq_list);
  9048. +
  9049. + dpa_generic_napi_del(net_dev);
  9050. +
  9051. + dpa_generic_bp_free(priv);
  9052. +
  9053. + free_netdev(net_dev);
  9054. +
  9055. + return err;
  9056. +}
  9057. +
  9058. +static int dpa_generic_eth_probe(struct platform_device *_of_dev)
  9059. +{
  9060. + struct device *dev = &_of_dev->dev;
  9061. + struct device_node *dpa_node = dev->of_node;
  9062. + struct net_device *netdev = NULL;
  9063. + struct dpa_generic_priv_s *priv;
  9064. + struct fm_port *rx_port = NULL;
  9065. + struct fm_port *tx_port = NULL;
  9066. + struct dpa_percpu_priv_s *percpu_priv;
  9067. + int rx_bp_count = 0;
  9068. + int disable_buff_dealloc = 0;
  9069. + struct dpa_bp *rx_bp = NULL, *draining_tx_bp = NULL;
  9070. + struct dpa_bp *draining_tx_sg_bp = NULL;
  9071. + struct dpa_buffer_layout_s *rx_buf_layout = NULL, *tx_buf_layout = NULL;
  9072. + struct list_head *dpa_fq_list;
  9073. + static u8 generic_idx;
  9074. + int err = 0;
  9075. + int i = 0;
  9076. +
  9077. + if (!of_device_is_available(dpa_node))
  9078. + return -ENODEV;
  9079. +
  9080. + err = dpa_generic_port_probe(_of_dev, &tx_port, &rx_port);
  9081. + if (err < 0)
  9082. + return err;
  9083. +
  9084. + err = dpa_generic_rx_bp_probe(_of_dev, rx_port, &rx_bp_count,
  9085. + &rx_bp, &rx_buf_layout);
  9086. + if (err < 0)
  9087. + return err;
  9088. +
  9089. + err = dpa_generic_tx_bp_probe(_of_dev, tx_port, &draining_tx_bp,
  9090. + &draining_tx_sg_bp, &tx_buf_layout);
  9091. + if (err < 0)
  9092. + return err;
  9093. +
  9094. + dpa_fq_list = dpa_generic_fq_probe(_of_dev, tx_port);
  9095. + if (IS_ERR(dpa_fq_list))
  9096. + return PTR_ERR(dpa_fq_list);
  9097. +
  9098. + err = dpa_generic_buff_dealloc_probe(_of_dev, &disable_buff_dealloc);
  9099. + if (err < 0)
  9100. + return err;
  9101. +
  9102. + /* just one queue for now */
  9103. + netdev = alloc_etherdev_mq(sizeof(*priv), 1);
  9104. + if (!netdev) {
  9105. + dev_err(dev, "alloc_etherdev_mq() failed\n");
  9106. + return -ENOMEM;
  9107. + }
  9108. +
  9109. + SET_NETDEV_DEV(netdev, dev);
  9110. + dev_set_drvdata(dev, netdev);
  9111. + priv = netdev_priv(netdev);
  9112. + priv->net_dev = netdev;
  9113. + sprintf(priv->if_type, "generic%d", generic_idx++);
  9114. + priv->msg_enable = netif_msg_init(generic_debug, -1);
  9115. + priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
  9116. +
  9117. + init_timer(&priv->timer);
  9118. + priv->timer.data = (unsigned long)priv;
  9119. + priv->timer.function = dpa_generic_draining_timer;
  9120. +
  9121. + err = dpa_generic_bp_create(netdev, rx_bp_count, rx_bp, rx_buf_layout,
  9122. + draining_tx_bp, draining_tx_sg_bp, tx_buf_layout);
  9123. + if (err < 0)
  9124. + goto bp_create_failed;
  9125. +
  9126. + priv->disable_buff_dealloc = disable_buff_dealloc;
  9127. +
  9128. + err = dpa_generic_fq_create(netdev, dpa_fq_list, rx_port);
  9129. + if (err < 0)
  9130. + goto fq_create_failed;
  9131. +
  9132. + priv->tx_headroom = dpa_get_headroom(tx_buf_layout);
  9133. + priv->rx_headroom = dpa_get_headroom(rx_buf_layout);
  9134. + priv->rx_port = rx_port;
  9135. + priv->tx_port = tx_port;
  9136. + priv->mac_dev = NULL;
  9137. +
  9138. +
  9139. + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
  9140. + if (priv->percpu_priv == NULL) {
  9141. + dev_err(dev, "devm_alloc_percpu() failed\n");
  9142. + err = -ENOMEM;
  9143. + goto alloc_percpu_failed;
  9144. + }
  9145. + for_each_online_cpu(i) {
  9146. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  9147. + memset(percpu_priv, 0, sizeof(*percpu_priv));
  9148. + }
  9149. +
  9150. + /* Initialize NAPI */
  9151. + err = dpa_generic_napi_add(netdev);
  9152. + if (err < 0)
  9153. + goto napi_add_failed;
  9154. +
  9155. + err = dpa_generic_netdev_init(dpa_node, netdev);
  9156. + if (err < 0)
  9157. + goto netdev_init_failed;
  9158. +
  9159. + dpaa_eth_generic_sysfs_init(&netdev->dev);
  9160. +
  9161. + pr_info("fsl_dpa_generic: Probed %s interface as %s\n",
  9162. + priv->if_type, netdev->name);
  9163. +
  9164. + return 0;
  9165. +
  9166. +netdev_init_failed:
  9167. +napi_add_failed:
  9168. + dpa_generic_napi_del(netdev);
  9169. +alloc_percpu_failed:
  9170. + if (netdev)
  9171. + dpa_fq_free(dev, &priv->dpa_fq_list);
  9172. +fq_create_failed:
  9173. +bp_create_failed:
  9174. + if (netdev)
  9175. + dpa_generic_bp_free(priv);
  9176. + dev_set_drvdata(dev, NULL);
  9177. + if (netdev)
  9178. + free_netdev(netdev);
  9179. +
  9180. + return err;
  9181. +}
  9182. +
  9183. +static int __init __cold dpa_generic_load(void)
  9184. +{
  9185. + int _errno;
  9186. +
  9187. + pr_info(KBUILD_MODNAME ": " DPA_GENERIC_DESCRIPTION "\n");
  9188. +
  9189. + /* initialise dpaa_eth mirror values */
  9190. + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
  9191. + dpa_max_frm = fm_get_max_frm();
  9192. +
  9193. + _errno = platform_driver_register(&dpa_generic_driver);
  9194. + if (unlikely(_errno < 0)) {
  9195. + pr_err(KBUILD_MODNAME
  9196. + ": %s:%hu:%s(): platform_driver_register() = %d\n",
  9197. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  9198. + }
  9199. +
  9200. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  9201. + KBUILD_BASENAME".c", __func__);
  9202. +
  9203. + return _errno;
  9204. +}
  9205. +
  9206. +/* waiting for all referenced ports to be initialized
  9207. + * by other kernel modules (proxy ethernet, offline_port)
  9208. + */
  9209. +late_initcall(dpa_generic_load);
  9210. +
  9211. +static void __exit __cold dpa_generic_unload(void)
  9212. +{
  9213. + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
  9214. + KBUILD_BASENAME".c", __func__);
  9215. +
  9216. + platform_driver_unregister(&dpa_generic_driver);
  9217. +
  9218. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  9219. + KBUILD_BASENAME".c", __func__);
  9220. +}
  9221. +module_exit(dpa_generic_unload);
  9222. --- /dev/null
  9223. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
  9224. @@ -0,0 +1,90 @@
  9225. +/* Copyright 2013 Freescale Semiconductor Inc.
  9226. + *
  9227. + * Redistribution and use in source and binary forms, with or without
  9228. + * modification, are permitted provided that the following conditions are met:
  9229. + * * Redistributions of source code must retain the above copyright
  9230. + * notice, this list of conditions and the following disclaimer.
  9231. + * * Redistributions in binary form must reproduce the above copyright
  9232. + * notice, this list of conditions and the following disclaimer in the
  9233. + * documentation and/or other materials provided with the distribution.
  9234. + * * Neither the name of Freescale Semiconductor nor the
  9235. + * names of its contributors may be used to endorse or promote products
  9236. + * derived from this software without specific prior written permission.
  9237. + *
  9238. + *
  9239. + * ALTERNATIVELY, this software may be distributed under the terms of the
  9240. + * GNU General Public License ("GPL") as published by the Free Software
  9241. + * Foundation, either version 2 of that License or (at your option) any
  9242. + * later version.
  9243. + *
  9244. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  9245. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  9246. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  9247. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  9248. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  9249. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  9250. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  9251. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  9252. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  9253. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  9254. + */
  9255. +
  9256. +#ifndef __DPA_ETH_GENERIC_H
  9257. +#define __DPA_ETH_GENERIC_H
  9258. +
  9259. +#include "lnxwrp_fsl_fman.h"
  9260. +#include "dpaa_eth.h"
  9261. +
  9262. +struct dpa_generic_priv_s {
  9263. + struct net_device *net_dev;
  9264. + /* use the same percpu_priv as other DPAA Ethernet drivers */
  9265. + struct dpa_percpu_priv_s __percpu *percpu_priv;
  9266. +
  9267. + /* up to 4 bps supported for RX */
  9268. + int rx_bp_count;
  9269. + struct dpa_bp *rx_bp;
  9270. + struct dpa_buffer_layout_s *rx_buf_layout;
  9271. +
  9272. + struct dpa_bp *draining_tx_bp;
  9273. + struct dpa_bp *draining_tx_sg_bp;
  9274. + struct dpa_buffer_layout_s *tx_buf_layout;
  9275. +
  9276. + /* Store here the needed Tx headroom for convenience and speed
  9277. + * (even though it can be computed based on the fields of buf_layout)
  9278. + */
  9279. + uint16_t tx_headroom;
  9280. + uint16_t rx_headroom;
  9281. +
  9282. + /* In some scenarios, when VSP are not enabled on the Tx O/H port,
  9283. + * the buffers will be released by other hardware modules
  9284. + */
  9285. + int disable_buff_dealloc;
  9286. +
  9287. + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
  9288. +
  9289. + struct fm_port *rx_port;
  9290. + struct fm_port *tx_port;
  9291. +
  9292. + /* oNIC can have limited control capabilities over a MAC device */
  9293. + struct mac_device *mac_dev;
  9294. +
  9295. + uint16_t channel; /* "fsl,qman-channel-id" */
  9296. + struct list_head dpa_fq_list;
  9297. +
  9298. + uint32_t msg_enable; /* net_device message level */
  9299. +
  9300. + struct dpa_buffer_layout_s *buf_layout;
  9301. + char if_type[30];
  9302. +
  9303. + /* periodic drain */
  9304. + struct timer_list timer;
  9305. +};
  9306. +
  9307. +extern const struct ethtool_ops dpa_generic_ethtool_ops;
  9308. +
  9309. +void dpaa_eth_generic_sysfs_init(struct device *dev);
  9310. +void dpaa_eth_generic_sysfs_remove(struct device *dev);
  9311. +int __init dpa_generic_debugfs_module_init(void);
  9312. +void __exit dpa_generic_debugfs_module_exit(void);
  9313. +
  9314. +#endif /* __DPA_ETH_GENERIC_H */
  9315. --- /dev/null
  9316. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
  9317. @@ -0,0 +1,201 @@
  9318. +/* Copyright 2014 Freescale Semiconductor Inc.
  9319. + *
  9320. + * Redistribution and use in source and binary forms, with or without
  9321. + * modification, are permitted provided that the following conditions are met:
  9322. + * * Redistributions of source code must retain the above copyright
  9323. + * notice, this list of conditions and the following disclaimer.
  9324. + * * Redistributions in binary form must reproduce the above copyright
  9325. + * notice, this list of conditions and the following disclaimer in the
  9326. + * documentation and/or other materials provided with the distribution.
  9327. + * * Neither the name of Freescale Semiconductor nor the
  9328. + * names of its contributors may be used to endorse or promote products
  9329. + * derived from this software without specific prior written permission.
  9330. + *
  9331. + *
  9332. + * ALTERNATIVELY, this software may be distributed under the terms of the
  9333. + * GNU General Public License ("GPL") as published by the Free Software
  9334. + * Foundation, either version 2 of that License or (at your option) any
  9335. + * later version.
  9336. + *
  9337. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  9338. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  9339. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  9340. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  9341. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  9342. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  9343. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  9344. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  9345. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  9346. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  9347. + */
  9348. +
  9349. +#include <linux/init.h>
  9350. +#include <linux/module.h>
  9351. +#include <linux/kthread.h>
  9352. +#include <linux/io.h>
  9353. +#include <linux/of_net.h>
  9354. +
  9355. +#include "dpaa_eth_generic.h"
  9356. +#include "mac.h" /* struct mac_device */
  9357. +
  9358. +static ssize_t dpaa_eth_generic_show_addr(struct device *dev,
  9359. + struct device_attribute *attr, char *buf)
  9360. +{
  9361. + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
  9362. + struct mac_device *mac_dev = priv->mac_dev;
  9363. +
  9364. + if (mac_dev)
  9365. + return sprintf(buf, "%llx\n",
  9366. + (unsigned long long)mac_dev->res->start);
  9367. + else
  9368. + return sprintf(buf, "none\n");
  9369. +}
  9370. +
  9371. +static ssize_t dpaa_eth_generic_show_type(struct device *dev,
  9372. + struct device_attribute *attr, char *buf)
  9373. +{
  9374. + ssize_t res = 0;
  9375. + res = sprintf(buf, "generic\n");
  9376. +
  9377. + return res;
  9378. +}
  9379. +
  9380. +static ssize_t dpaa_eth_generic_show_fqids(struct device *dev,
  9381. + struct device_attribute *attr, char *buf)
  9382. +{
  9383. + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
  9384. + ssize_t bytes = 0;
  9385. + int i = 0;
  9386. + char *str;
  9387. + struct dpa_fq *fq;
  9388. + struct dpa_fq *tmp;
  9389. + struct dpa_fq *prev = NULL;
  9390. + u32 first_fqid = 0;
  9391. + u32 last_fqid = 0;
  9392. + char *prevstr = NULL;
  9393. +
  9394. + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
  9395. + switch (fq->fq_type) {
  9396. + case FQ_TYPE_RX_DEFAULT:
  9397. + str = "Rx default";
  9398. + break;
  9399. + case FQ_TYPE_RX_ERROR:
  9400. + str = "Rx error";
  9401. + break;
  9402. + case FQ_TYPE_RX_PCD:
  9403. + str = "Rx PCD";
  9404. + break;
  9405. + case FQ_TYPE_TX_CONFIRM:
  9406. + str = "Tx default confirmation";
  9407. + break;
  9408. + case FQ_TYPE_TX_CONF_MQ:
  9409. + str = "Tx confirmation (mq)";
  9410. + break;
  9411. + case FQ_TYPE_TX_ERROR:
  9412. + str = "Tx error";
  9413. + break;
  9414. + case FQ_TYPE_TX:
  9415. + str = "Tx";
  9416. + break;
  9417. + default:
  9418. + str = "Unknown";
  9419. + }
  9420. +
  9421. + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
  9422. + str != prevstr)) {
  9423. + if (last_fqid == first_fqid)
  9424. + bytes += sprintf(buf + bytes,
  9425. + "%s: %d\n", prevstr, prev->fqid);
  9426. + else
  9427. + bytes += sprintf(buf + bytes,
  9428. + "%s: %d - %d\n", prevstr,
  9429. + first_fqid, last_fqid);
  9430. + }
  9431. +
  9432. + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
  9433. + last_fqid = fq->fqid;
  9434. + else
  9435. + first_fqid = last_fqid = fq->fqid;
  9436. +
  9437. + prev = fq;
  9438. + prevstr = str;
  9439. + i++;
  9440. + }
  9441. +
  9442. + if (prev) {
  9443. + if (last_fqid == first_fqid)
  9444. + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
  9445. + prev->fqid);
  9446. + else
  9447. + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
  9448. + first_fqid, last_fqid);
  9449. + }
  9450. +
  9451. + return bytes;
  9452. +}
  9453. +
  9454. +static ssize_t dpaa_eth_generic_show_bpids(struct device *dev,
  9455. + struct device_attribute *attr, char *buf)
  9456. +{
  9457. + ssize_t bytes = 0;
  9458. + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
  9459. + struct dpa_bp *rx_bp = priv->rx_bp;
  9460. + struct dpa_bp *draining_tx_bp = priv->draining_tx_bp;
  9461. + int i = 0;
  9462. +
  9463. + bytes += snprintf(buf + bytes, PAGE_SIZE, "Rx buffer pools:\n");
  9464. + for (i = 0; i < priv->rx_bp_count; i++)
  9465. + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u ",
  9466. + rx_bp[i].bpid);
  9467. +
  9468. + bytes += snprintf(buf + bytes, PAGE_SIZE, "\n");
  9469. + bytes += snprintf(buf + bytes, PAGE_SIZE, "Draining buffer pool:\n");
  9470. + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n", draining_tx_bp->bpid);
  9471. +
  9472. + return bytes;
  9473. +}
  9474. +
  9475. +static ssize_t dpaa_eth_generic_show_mac_regs(struct device *dev,
  9476. + struct device_attribute *attr, char *buf)
  9477. +{
  9478. + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
  9479. + struct mac_device *mac_dev = priv->mac_dev;
  9480. + int n = 0;
  9481. +
  9482. + if (mac_dev)
  9483. + n = fm_mac_dump_regs(mac_dev, buf, n);
  9484. + else
  9485. + return sprintf(buf, "no mac control\n");
  9486. +
  9487. + return n;
  9488. +}
  9489. +
  9490. +static struct device_attribute dpaa_eth_generic_attrs[] = {
  9491. + __ATTR(device_addr, S_IRUGO, dpaa_eth_generic_show_addr, NULL),
  9492. + __ATTR(device_type, S_IRUGO, dpaa_eth_generic_show_type, NULL),
  9493. + __ATTR(fqids, S_IRUGO, dpaa_eth_generic_show_fqids, NULL),
  9494. + __ATTR(bpids, S_IRUGO, dpaa_eth_generic_show_bpids, NULL),
  9495. + __ATTR(mac_regs, S_IRUGO, dpaa_eth_generic_show_mac_regs, NULL),
  9496. +};
  9497. +
  9498. +void dpaa_eth_generic_sysfs_init(struct device *dev)
  9499. +{
  9500. + int i;
  9501. +
  9502. + for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
  9503. + if (device_create_file(dev, &dpaa_eth_generic_attrs[i])) {
  9504. + dev_err(dev, "Error creating sysfs file\n");
  9505. + while (i > 0)
  9506. + device_remove_file(dev,
  9507. + &dpaa_eth_generic_attrs[--i]);
  9508. + return;
  9509. + }
  9510. +}
  9511. +
  9512. +void dpaa_eth_generic_sysfs_remove(struct device *dev)
  9513. +{
  9514. + int i;
  9515. +
  9516. + for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
  9517. + device_remove_file(dev, &dpaa_eth_generic_attrs[i]);
  9518. +}
  9519. --- /dev/null
  9520. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
  9521. @@ -0,0 +1,499 @@
  9522. +/* Copyright 2008-2013 Freescale Semiconductor Inc.
  9523. + *
  9524. + * Redistribution and use in source and binary forms, with or without
  9525. + * modification, are permitted provided that the following conditions are met:
  9526. + * * Redistributions of source code must retain the above copyright
  9527. + * notice, this list of conditions and the following disclaimer.
  9528. + * * Redistributions in binary form must reproduce the above copyright
  9529. + * notice, this list of conditions and the following disclaimer in the
  9530. + * documentation and/or other materials provided with the distribution.
  9531. + * * Neither the name of Freescale Semiconductor nor the
  9532. + * names of its contributors may be used to endorse or promote products
  9533. + * derived from this software without specific prior written permission.
  9534. + *
  9535. + *
  9536. + * ALTERNATIVELY, this software may be distributed under the terms of the
  9537. + * GNU General Public License ("GPL") as published by the Free Software
  9538. + * Foundation, either version 2 of that License or (at your option) any
  9539. + * later version.
  9540. + *
  9541. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  9542. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  9543. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  9544. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  9545. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  9546. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  9547. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  9548. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  9549. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  9550. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  9551. + */
  9552. +
  9553. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  9554. +#define pr_fmt(fmt) \
  9555. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  9556. + KBUILD_BASENAME".c", __LINE__, __func__
  9557. +#else
  9558. +#define pr_fmt(fmt) \
  9559. + KBUILD_MODNAME ": " fmt
  9560. +#endif
  9561. +
  9562. +#include <linux/init.h>
  9563. +#include <linux/module.h>
  9564. +#include <linux/of_platform.h>
  9565. +#include <linux/of_net.h>
  9566. +#include <linux/etherdevice.h>
  9567. +#include <linux/kthread.h>
  9568. +#include <linux/percpu.h>
  9569. +#include <linux/highmem.h>
  9570. +#include <linux/fsl_qman.h>
  9571. +#include "dpaa_eth.h"
  9572. +#include "dpaa_eth_common.h"
  9573. +#include "dpaa_eth_base.h"
  9574. +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
  9575. +#include "mac.h"
  9576. +
  9577. +/* For MAC-based interfaces, we compute the tx needed headroom from the
  9578. + * associated Tx port's buffer layout settings.
  9579. + * For MACless interfaces just use a default value.
  9580. + */
  9581. +#define DPA_DEFAULT_TX_HEADROOM 64
  9582. +
  9583. +#define DPA_DESCRIPTION "FSL DPAA MACless Ethernet driver"
  9584. +
  9585. +MODULE_LICENSE("Dual BSD/GPL");
  9586. +
  9587. +MODULE_DESCRIPTION(DPA_DESCRIPTION);
  9588. +
  9589. +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
  9590. +static uint16_t macless_tx_timeout = 1000;
  9591. +module_param(macless_tx_timeout, ushort, S_IRUGO);
  9592. +MODULE_PARM_DESC(macless_tx_timeout, "The MACless Tx timeout in ms");
  9593. +
  9594. +/* forward declarations */
  9595. +static int __cold dpa_macless_start(struct net_device *net_dev);
  9596. +static int __cold dpa_macless_stop(struct net_device *net_dev);
  9597. +static int __cold dpa_macless_set_address(struct net_device *net_dev,
  9598. + void *addr);
  9599. +static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev);
  9600. +
  9601. +static int dpaa_eth_macless_probe(struct platform_device *_of_dev);
  9602. +static netdev_features_t
  9603. +dpa_macless_fix_features(struct net_device *dev, netdev_features_t features);
  9604. +
  9605. +static const struct net_device_ops dpa_macless_ops = {
  9606. + .ndo_open = dpa_macless_start,
  9607. + .ndo_start_xmit = dpa_shared_tx,
  9608. + .ndo_stop = dpa_macless_stop,
  9609. + .ndo_tx_timeout = dpa_timeout,
  9610. + .ndo_get_stats64 = dpa_get_stats64,
  9611. + .ndo_set_mac_address = dpa_macless_set_address,
  9612. + .ndo_set_rx_mode = dpa_macless_set_rx_mode,
  9613. + .ndo_validate_addr = eth_validate_addr,
  9614. +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  9615. + .ndo_select_queue = dpa_select_queue,
  9616. +#endif
  9617. + .ndo_change_mtu = dpa_change_mtu,
  9618. + .ndo_init = dpa_ndo_init,
  9619. + .ndo_set_features = dpa_set_features,
  9620. + .ndo_fix_features = dpa_macless_fix_features,
  9621. +};
  9622. +
  9623. +static const struct of_device_id dpa_macless_match[] = {
  9624. + {
  9625. + .compatible = "fsl,dpa-ethernet-macless"
  9626. + },
  9627. + {}
  9628. +};
  9629. +MODULE_DEVICE_TABLE(of, dpa_macless_match);
  9630. +
  9631. +static struct platform_driver dpa_macless_driver = {
  9632. + .driver = {
  9633. + .name = KBUILD_MODNAME "-macless",
  9634. + .of_match_table = dpa_macless_match,
  9635. + .owner = THIS_MODULE,
  9636. + },
  9637. + .probe = dpaa_eth_macless_probe,
  9638. + .remove = dpa_remove
  9639. +};
  9640. +
  9641. +static const char macless_frame_queues[][25] = {
  9642. + [RX] = "fsl,qman-frame-queues-rx",
  9643. + [TX] = "fsl,qman-frame-queues-tx"
  9644. +};
  9645. +
  9646. +static int __cold dpa_macless_start(struct net_device *net_dev)
  9647. +{
  9648. + const struct dpa_priv_s *priv = netdev_priv(net_dev);
  9649. + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
  9650. +
  9651. + netif_tx_start_all_queues(net_dev);
  9652. +
  9653. + if (proxy_dev)
  9654. + dpa_proxy_start(net_dev);
  9655. +
  9656. +
  9657. + return 0;
  9658. +}
  9659. +
  9660. +static int __cold dpa_macless_stop(struct net_device *net_dev)
  9661. +{
  9662. + const struct dpa_priv_s *priv = netdev_priv(net_dev);
  9663. + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
  9664. +
  9665. + netif_tx_stop_all_queues(net_dev);
  9666. +
  9667. + if (proxy_dev)
  9668. + dpa_proxy_stop(proxy_dev, net_dev);
  9669. +
  9670. + return 0;
  9671. +}
  9672. +
  9673. +static int dpa_macless_set_address(struct net_device *net_dev, void *addr)
  9674. +{
  9675. + const struct dpa_priv_s *priv = netdev_priv(net_dev);
  9676. + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
  9677. + int _errno;
  9678. +
  9679. + _errno = eth_mac_addr(net_dev, addr);
  9680. + if (_errno < 0) {
  9681. + if (netif_msg_drv(priv))
  9682. + netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
  9683. + return _errno;
  9684. + }
  9685. +
  9686. + if (proxy_dev) {
  9687. + _errno = dpa_proxy_set_mac_address(proxy_dev, net_dev);
  9688. + if (_errno < 0) {
  9689. + if (netif_msg_drv(priv))
  9690. + netdev_err(net_dev, "proxy_set_mac_address() = %d\n",
  9691. + _errno);
  9692. + return _errno;
  9693. + }
  9694. + }
  9695. +
  9696. + return 0;
  9697. +}
  9698. +
  9699. +static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev)
  9700. +{
  9701. + const struct dpa_priv_s *priv = netdev_priv(net_dev);
  9702. + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
  9703. +
  9704. + if (proxy_dev)
  9705. + dpa_proxy_set_rx_mode(proxy_dev, net_dev);
  9706. +}
  9707. +
  9708. +static netdev_features_t
  9709. +dpa_macless_fix_features(struct net_device *dev, netdev_features_t features)
  9710. +{
  9711. + netdev_features_t unsupported_features = 0;
  9712. +
  9713. + /* In theory we should never be requested to enable features that
  9714. + * we didn't set in netdev->features and netdev->hw_features at probe
  9715. + * time, but double check just to be on the safe side.
  9716. + */
  9717. + unsupported_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  9718. + /* We don't support enabling Rx csum through ethtool yet */
  9719. + unsupported_features |= NETIF_F_RXCSUM;
  9720. +
  9721. + features &= ~unsupported_features;
  9722. +
  9723. + return features;
  9724. +}
  9725. +
  9726. +static int dpa_macless_netdev_init(struct device_node *dpa_node,
  9727. + struct net_device *net_dev)
  9728. +{
  9729. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  9730. + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
  9731. + struct device *dev = net_dev->dev.parent;
  9732. + const uint8_t *mac_addr;
  9733. +
  9734. + net_dev->netdev_ops = &dpa_macless_ops;
  9735. +
  9736. + if (proxy_dev) {
  9737. + struct mac_device *mac_dev = proxy_dev->mac_dev;
  9738. + net_dev->mem_start = mac_dev->res->start;
  9739. + net_dev->mem_end = mac_dev->res->end;
  9740. +
  9741. + return dpa_netdev_init(net_dev, mac_dev->addr,
  9742. + macless_tx_timeout);
  9743. + } else {
  9744. + /* Get the MAC address from device tree */
  9745. + mac_addr = of_get_mac_address(dpa_node);
  9746. +
  9747. + if (mac_addr == NULL) {
  9748. + if (netif_msg_probe(priv))
  9749. + dev_err(dev, "No MAC address found!\n");
  9750. + return -EINVAL;
  9751. + }
  9752. +
  9753. + return dpa_netdev_init(net_dev, mac_addr,
  9754. + macless_tx_timeout);
  9755. + }
  9756. +}
  9757. +
  9758. +/* Probing of FQs for MACless ports */
  9759. +static int dpa_fq_probe_macless(struct device *dev, struct list_head *list,
  9760. + enum port_type ptype)
  9761. +{
  9762. + struct device_node *np = dev->of_node;
  9763. + const struct fqid_cell *fqids;
  9764. + int num_ranges;
  9765. + int i, lenp;
  9766. +
  9767. + fqids = of_get_property(np, macless_frame_queues[ptype], &lenp);
  9768. + if (fqids == NULL) {
  9769. + dev_err(dev, "Need FQ definition in dts for MACless devices\n");
  9770. + return -EINVAL;
  9771. + }
  9772. +
  9773. + num_ranges = lenp / sizeof(*fqids);
  9774. +
  9775. + /* All ranges defined in the device tree are used as Rx/Tx queues */
  9776. + for (i = 0; i < num_ranges; i++) {
  9777. + if (!dpa_fq_alloc(dev, be32_to_cpu(fqids[i].start),
  9778. + be32_to_cpu(fqids[i].count), list,
  9779. + ptype == RX ? FQ_TYPE_RX_PCD : FQ_TYPE_TX)) {
  9780. + dev_err(dev, "_dpa_fq_alloc() failed\n");
  9781. + return -ENOMEM;
  9782. + }
  9783. + }
  9784. +
  9785. + return 0;
  9786. +}
  9787. +
  9788. + static struct proxy_device *
  9789. +dpa_macless_proxy_probe(struct platform_device *_of_dev)
  9790. +{
  9791. + struct device *dev;
  9792. + const phandle *proxy_prop;
  9793. + struct proxy_device *proxy_dev;
  9794. + struct device_node *proxy_node;
  9795. + struct platform_device *proxy_pdev;
  9796. + int lenp;
  9797. +
  9798. + dev = &_of_dev->dev;
  9799. +
  9800. + proxy_prop = of_get_property(dev->of_node, "proxy", &lenp);
  9801. + if (!proxy_prop)
  9802. + return NULL;
  9803. +
  9804. + proxy_node = of_find_node_by_phandle(*proxy_prop);
  9805. + if (!proxy_node) {
  9806. + dev_err(dev, "Cannot find proxy node\n");
  9807. + return NULL;
  9808. + }
  9809. +
  9810. + proxy_pdev = of_find_device_by_node(proxy_node);
  9811. + if (!proxy_pdev) {
  9812. + of_node_put(proxy_node);
  9813. + dev_err(dev, "Cannot find device represented by proxy node\n");
  9814. + return NULL;
  9815. + }
  9816. +
  9817. + proxy_dev = dev_get_drvdata(&proxy_pdev->dev);
  9818. +
  9819. + of_node_put(proxy_node);
  9820. +
  9821. + return proxy_dev;
  9822. +}
  9823. +
  9824. +static int dpaa_eth_macless_probe(struct platform_device *_of_dev)
  9825. +{
  9826. + int err = 0, i, channel;
  9827. + struct device *dev;
  9828. + struct device_node *dpa_node;
  9829. + struct dpa_bp *dpa_bp;
  9830. + struct dpa_fq *dpa_fq, *tmp;
  9831. + size_t count;
  9832. + struct net_device *net_dev = NULL;
  9833. + struct dpa_priv_s *priv = NULL;
  9834. + struct dpa_percpu_priv_s *percpu_priv;
  9835. + static struct proxy_device *proxy_dev;
  9836. + struct task_struct *kth;
  9837. + static u8 macless_idx;
  9838. +
  9839. + dev = &_of_dev->dev;
  9840. +
  9841. + dpa_node = dev->of_node;
  9842. +
  9843. + if (!of_device_is_available(dpa_node))
  9844. + return -ENODEV;
  9845. +
  9846. + /* Get the buffer pools assigned to this interface */
  9847. + dpa_bp = dpa_bp_probe(_of_dev, &count);
  9848. + if (IS_ERR(dpa_bp))
  9849. + return PTR_ERR(dpa_bp);
  9850. +
  9851. + for (i = 0; i < count; i++)
  9852. + dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
  9853. +
  9854. + proxy_dev = dpa_macless_proxy_probe(_of_dev);
  9855. +
  9856. +
  9857. + /* Allocate this early, so we can store relevant information in
  9858. + * the private area (needed by 1588 code in dpa_mac_probe)
  9859. + */
  9860. + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
  9861. + if (!net_dev) {
  9862. + dev_err(dev, "alloc_etherdev_mq() failed\n");
  9863. + return -ENOMEM;
  9864. + }
  9865. +
  9866. + /* Do this here, so we can be verbose early */
  9867. + SET_NETDEV_DEV(net_dev, dev);
  9868. + dev_set_drvdata(dev, net_dev);
  9869. +
  9870. + priv = netdev_priv(net_dev);
  9871. + priv->net_dev = net_dev;
  9872. + sprintf(priv->if_type, "macless%d", macless_idx++);
  9873. +
  9874. + priv->msg_enable = netif_msg_init(advanced_debug, -1);
  9875. +
  9876. + priv->peer = NULL;
  9877. + priv->mac_dev = NULL;
  9878. + if (proxy_dev) {
  9879. + /* This is a temporary solution for the need of
  9880. + * having main driver upstreamability: adjust_link
  9881. + * is a general function that should work for both
  9882. + * private driver and macless driver with MAC device
  9883. + * control capabilities even if the last will not be
  9884. + * upstreamable.
  9885. + * TODO: find a convenient solution (wrapper over
  9886. + * main priv structure, etc.)
  9887. + */
  9888. + priv->mac_dev = proxy_dev->mac_dev;
  9889. +
  9890. + /* control over proxy's mac device */
  9891. + priv->peer = (void *)proxy_dev;
  9892. + }
  9893. +
  9894. + INIT_LIST_HEAD(&priv->dpa_fq_list);
  9895. +
  9896. + err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list, RX);
  9897. + if (!err)
  9898. + err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list,
  9899. + TX);
  9900. + if (err < 0)
  9901. + goto fq_probe_failed;
  9902. +
  9903. + /* bp init */
  9904. + priv->bp_count = count;
  9905. + err = dpa_bp_create(net_dev, dpa_bp, count);
  9906. + if (err < 0)
  9907. + goto bp_create_failed;
  9908. +
  9909. + channel = dpa_get_channel();
  9910. +
  9911. + if (channel < 0) {
  9912. + err = channel;
  9913. + goto get_channel_failed;
  9914. + }
  9915. +
  9916. + priv->channel = (uint16_t)channel;
  9917. +
  9918. + /* Start a thread that will walk the cpus with affine portals
  9919. + * and add this pool channel to each's dequeue mask.
  9920. + */
  9921. + kth = kthread_run(dpaa_eth_add_channel,
  9922. + (void *)(unsigned long)priv->channel,
  9923. + "dpaa_%p:%d", net_dev, priv->channel);
  9924. + if (!kth) {
  9925. + err = -ENOMEM;
  9926. + goto add_channel_failed;
  9927. + }
  9928. +
  9929. + dpa_fq_setup(priv, &shared_fq_cbs, NULL);
  9930. +
  9931. + /* Add the FQs to the interface, and make them active */
  9932. + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
  9933. + /* For MAC-less devices we only get here for RX frame queues
  9934. + * initialization, which are the TX queues of the other
  9935. + * partition.
  9936. + * It is safe to rely on one partition to set the FQ taildrop
  9937. + * threshold for the TX queues of the other partition
  9938. + * because the ERN notifications will be received by the
  9939. + * partition doing qman_enqueue.
  9940. + */
  9941. + err = dpa_fq_init(dpa_fq, true);
  9942. + if (err < 0)
  9943. + goto fq_alloc_failed;
  9944. + }
  9945. +
  9946. + priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
  9947. +
  9948. + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
  9949. +
  9950. + if (priv->percpu_priv == NULL) {
  9951. + dev_err(dev, "devm_alloc_percpu() failed\n");
  9952. + err = -ENOMEM;
  9953. + goto alloc_percpu_failed;
  9954. + }
  9955. + for_each_possible_cpu(i) {
  9956. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  9957. + memset(percpu_priv, 0, sizeof(*percpu_priv));
  9958. + }
  9959. +
  9960. + err = dpa_macless_netdev_init(dpa_node, net_dev);
  9961. + if (err < 0)
  9962. + goto netdev_init_failed;
  9963. +
  9964. + dpaa_eth_sysfs_init(&net_dev->dev);
  9965. +
  9966. + pr_info("fsl_dpa_macless: Probed %s interface as %s\n",
  9967. + priv->if_type, net_dev->name);
  9968. +
  9969. + return 0;
  9970. +
  9971. +netdev_init_failed:
  9972. +alloc_percpu_failed:
  9973. +fq_alloc_failed:
  9974. + if (net_dev)
  9975. + dpa_fq_free(dev, &priv->dpa_fq_list);
  9976. +add_channel_failed:
  9977. +get_channel_failed:
  9978. + if (net_dev)
  9979. + dpa_bp_free(priv);
  9980. +bp_create_failed:
  9981. +fq_probe_failed:
  9982. + dev_set_drvdata(dev, NULL);
  9983. + if (net_dev)
  9984. + free_netdev(net_dev);
  9985. +
  9986. + return err;
  9987. +}
  9988. +
  9989. +static int __init __cold dpa_macless_load(void)
  9990. +{
  9991. + int _errno;
  9992. +
  9993. + pr_info(DPA_DESCRIPTION "\n");
  9994. +
  9995. + /* Initialize dpaa_eth mirror values */
  9996. + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
  9997. + dpa_max_frm = fm_get_max_frm();
  9998. +
  9999. + _errno = platform_driver_register(&dpa_macless_driver);
  10000. + if (unlikely(_errno < 0)) {
  10001. + pr_err(KBUILD_MODNAME
  10002. + ": %s:%hu:%s(): platform_driver_register() = %d\n",
  10003. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  10004. + }
  10005. +
  10006. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  10007. + KBUILD_BASENAME".c", __func__);
  10008. +
  10009. + return _errno;
  10010. +}
  10011. +module_init(dpa_macless_load);
  10012. +
  10013. +static void __exit __cold dpa_macless_unload(void)
  10014. +{
  10015. + platform_driver_unregister(&dpa_macless_driver);
  10016. +
  10017. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  10018. + KBUILD_BASENAME".c", __func__);
  10019. +}
  10020. +module_exit(dpa_macless_unload);
  10021. --- /dev/null
  10022. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
  10023. @@ -0,0 +1,2156 @@
  10024. +/* Copyright 2015 Freescale Semiconductor Inc.
  10025. + *
  10026. + * Redistribution and use in source and binary forms, with or without
  10027. + * modification, are permitted provided that the following conditions are met:
  10028. + * * Redistributions of source code must retain the above copyright
  10029. + * notice, this list of conditions and the following disclaimer.
  10030. + * * Redistributions in binary form must reproduce the above copyright
  10031. + * notice, this list of conditions and the following disclaimer in the
  10032. + * documentation and/or other materials provided with the distribution.
  10033. + * * Neither the name of Freescale Semiconductor nor the
  10034. + * names of its contributors may be used to endorse or promote products
  10035. + * derived from this software without specific prior written permission.
  10036. + *
  10037. + *
  10038. + * ALTERNATIVELY, this software may be distributed under the terms of the
  10039. + * GNU General Public License ("GPL") as published by the Free Software
  10040. + * Foundation, either version 2 of that License or (at your option) any
  10041. + * later version.
  10042. + *
  10043. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  10044. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  10045. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  10046. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  10047. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  10048. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  10049. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  10050. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  10051. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  10052. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  10053. + */
  10054. +
  10055. +#include <linux/init.h>
  10056. +#include <linux/module.h>
  10057. +#include <linux/kernel.h>
  10058. +#include <linux/moduleparam.h>
  10059. +
  10060. +#include <net/sock.h>
  10061. +#include <linux/netlink.h>
  10062. +#include <linux/skbuff.h>
  10063. +
  10064. +#include "dpaa_eth_macsec.h"
  10065. +#include "dpaa_eth_common.h"
  10066. +
  10067. +#ifdef CONFIG_FSL_DPAA_1588
  10068. +#include "dpaa_1588.h"
  10069. +#endif
  10070. +
  10071. +static struct sock *nl_sk;
  10072. +static struct macsec_priv_s *macsec_priv[FM_MAX_NUM_OF_MACS];
  10073. +static char *macsec_ifs[FM_MAX_NUM_OF_MACS];
  10074. +static int macsec_ifs_cnt;
  10075. +
  10076. +static char ifs[MAX_LEN];
  10077. +const struct ethtool_ops *dpa_ethtool_ops_prev;
  10078. +static struct ethtool_ops dpa_macsec_ethtool_ops;
  10079. +
  10080. +module_param_string(ifs, ifs, MAX_LEN, 0000);
  10081. +MODULE_PARM_DESC(ifs, "Comma separated interface list");
  10082. +
  10083. +struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev)
  10084. +{
  10085. + return macsec_priv[net_dev->ifindex - 1];
  10086. +}
  10087. +
  10088. +static void macsec_setup_ethtool_ops(struct net_device *net_dev)
  10089. +{
  10090. + /* remember private driver's ethtool ops just once */
  10091. + if (!dpa_ethtool_ops_prev) {
  10092. + dpa_ethtool_ops_prev = net_dev->ethtool_ops;
  10093. +
  10094. + memcpy(&dpa_macsec_ethtool_ops, net_dev->ethtool_ops,
  10095. + sizeof(struct ethtool_ops));
  10096. + dpa_macsec_ethtool_ops.get_sset_count =
  10097. + dpa_macsec_get_sset_count;
  10098. + dpa_macsec_ethtool_ops.get_ethtool_stats =
  10099. + dpa_macsec_get_ethtool_stats;
  10100. + dpa_macsec_ethtool_ops.get_strings =
  10101. + dpa_macsec_get_strings;
  10102. + }
  10103. +
  10104. + net_dev->ethtool_ops = &dpa_macsec_ethtool_ops;
  10105. +}
  10106. +
  10107. +static void macsec_restore_ethtool_ops(struct net_device *net_dev)
  10108. +{
  10109. + net_dev->ethtool_ops = dpa_ethtool_ops_prev;
  10110. +}
  10111. +
  10112. +
  10113. +static int ifname_to_id(char *ifname)
  10114. +{
  10115. + int i;
  10116. +
  10117. + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
  10118. + if (macsec_priv[i]->net_dev &&
  10119. + (strcmp(ifname, macsec_priv[i]->net_dev->name) == 0)) {
  10120. + return i;
  10121. + }
  10122. + }
  10123. +
  10124. + return -1;
  10125. +}
  10126. +
  10127. +static void deinit_macsec(int macsec_id)
  10128. +{
  10129. + struct macsec_priv_s *selected_macsec_priv;
  10130. + int i;
  10131. +
  10132. + selected_macsec_priv = macsec_priv[macsec_id];
  10133. +
  10134. + if (selected_macsec_priv->en_state == SECY_ENABLED) {
  10135. + for (i = 0; i < NUM_OF_RX_SC; i++) {
  10136. + if (!selected_macsec_priv->rx_sc_dev[i])
  10137. + continue;
  10138. + fm_macsec_secy_rxsa_disable_receive(
  10139. + selected_macsec_priv->fm_ms_secy,
  10140. + selected_macsec_priv->rx_sc_dev[i],
  10141. + selected_macsec_priv->an);
  10142. + pr_debug("disable rx_sa done\n");
  10143. +
  10144. + fm_macsec_secy_delete_rx_sa(
  10145. + selected_macsec_priv->fm_ms_secy,
  10146. + selected_macsec_priv->rx_sc_dev[i],
  10147. + selected_macsec_priv->an);
  10148. + pr_debug("delete rx_sa done\n");
  10149. +
  10150. + fm_macsec_secy_delete_rxsc(
  10151. + selected_macsec_priv->fm_ms_secy,
  10152. + selected_macsec_priv->rx_sc_dev[i]);
  10153. + pr_debug("delete rx_sc done\n");
  10154. + }
  10155. +
  10156. + fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
  10157. + selected_macsec_priv->an);
  10158. + pr_debug("delete tx_sa done\n");
  10159. +
  10160. + fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
  10161. + selected_macsec_priv->fm_ms_secy = NULL;
  10162. + pr_debug("secy free done\n");
  10163. + }
  10164. +
  10165. + if (selected_macsec_priv->en_state != MACSEC_DISABLED) {
  10166. + fm_macsec_disable(selected_macsec_priv->fm_macsec);
  10167. + fm_macsec_free(selected_macsec_priv->fm_macsec);
  10168. + selected_macsec_priv->fm_macsec = NULL;
  10169. + pr_debug("macsec disable and free done\n");
  10170. + }
  10171. +}
  10172. +
  10173. +static void parse_ifs(void)
  10174. +{
  10175. + char *token, *strpos = ifs;
  10176. +
  10177. + while ((token = strsep(&strpos, ","))) {
  10178. + if (strlen(token) == 0)
  10179. + return;
  10180. + else
  10181. + macsec_ifs[macsec_ifs_cnt] = token;
  10182. + macsec_ifs_cnt++;
  10183. + }
  10184. +}
  10185. +
  10186. +static void macsec_exception(handle_t _macsec_priv_s,
  10187. + fm_macsec_exception exception)
  10188. +{
  10189. + struct macsec_priv_s *priv;
  10190. + priv = (struct macsec_priv_s *)_macsec_priv_s;
  10191. +
  10192. + switch (exception) {
  10193. + case (SINGLE_BIT_ECC):
  10194. + dev_warn(priv->mac_dev->dev, "%s:%s SINGLE_BIT_ECC exception\n",
  10195. + KBUILD_BASENAME".c", __func__);
  10196. + break;
  10197. + case (MULTI_BIT_ECC):
  10198. + dev_warn(priv->mac_dev->dev, "%s:%s MULTI_BIT_ECC exception\n",
  10199. + KBUILD_BASENAME".c", __func__);
  10200. + break;
  10201. + default:
  10202. + dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
  10203. + KBUILD_BASENAME".c", __func__, exception);
  10204. + break;
  10205. + }
  10206. +}
  10207. +
  10208. +
  10209. +static void macsec_secy_exception(handle_t _macsec_priv_s,
  10210. + fm_macsec_secy_exception exception)
  10211. +{
  10212. + struct macsec_priv_s *priv;
  10213. + priv = (struct macsec_priv_s *)_macsec_priv_s;
  10214. +
  10215. + switch (exception) {
  10216. + case (SECY_EX_FRAME_DISCARDED):
  10217. + dev_warn(priv->mac_dev->dev,
  10218. + "%s:%s SECY_EX_FRAME_DISCARDED exception\n",
  10219. + KBUILD_BASENAME".c", __func__);
  10220. + break;
  10221. + default:
  10222. + dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
  10223. + KBUILD_BASENAME".c", __func__, exception);
  10224. + break;
  10225. + }
  10226. +}
  10227. +
  10228. +static void macsec_secy_events(handle_t _macsec_priv_s,
  10229. + fm_macsec_secy_event event)
  10230. +{
  10231. + struct macsec_priv_s *priv;
  10232. + priv = (struct macsec_priv_s *)_macsec_priv_s;
  10233. +
  10234. + switch (event) {
  10235. + case (SECY_EV_NEXT_PN):
  10236. + dev_dbg(priv->mac_dev->dev, "%s:%s SECY_EV_NEXT_PN event\n",
  10237. + KBUILD_BASENAME".c", __func__);
  10238. + break;
  10239. + default:
  10240. + dev_dbg(priv->mac_dev->dev, "%s:%s event %d\n",
  10241. + KBUILD_BASENAME".c", __func__, event);
  10242. + break;
  10243. + }
  10244. +}
  10245. +
  10246. +static struct qman_fq *macsec_get_tx_conf_queue(
  10247. + const struct macsec_priv_s *macsec_priv,
  10248. + struct qman_fq *tx_fq)
  10249. +{
  10250. + int i;
  10251. +
  10252. + for (i = 0; i < MACSEC_ETH_TX_QUEUES; i++)
  10253. + if (macsec_priv->egress_fqs[i] == tx_fq)
  10254. + return macsec_priv->conf_fqs[i];
  10255. + return NULL;
  10256. +}
  10257. +
  10258. +/* Initialize qman fqs. Still need to set context_a, specifically the bits
  10259. + * that identify the secure channel.
  10260. + */
  10261. +static int macsec_fq_init(struct dpa_fq *dpa_fq)
  10262. +{
  10263. + struct qman_fq *fq;
  10264. + struct device *dev;
  10265. + struct qm_mcc_initfq initfq;
  10266. + uint32_t sc_phys_id;
  10267. + int _errno, macsec_id;
  10268. +
  10269. + dev = dpa_fq->net_dev->dev.parent;
  10270. + macsec_id = dpa_fq->net_dev->ifindex - 1;
  10271. +
  10272. + if (dpa_fq->fqid == 0)
  10273. + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
  10274. +
  10275. + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
  10276. + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
  10277. +
  10278. + if (_errno) {
  10279. + dev_err(dev, "qman_create_fq() failed\n");
  10280. + return _errno;
  10281. + }
  10282. +
  10283. + fq = &dpa_fq->fq_base;
  10284. +
  10285. + if (dpa_fq->init) {
  10286. + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
  10287. + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
  10288. +
  10289. + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
  10290. + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
  10291. +
  10292. + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
  10293. +
  10294. + initfq.fqd.dest.channel = dpa_fq->channel;
  10295. + initfq.fqd.dest.wq = dpa_fq->wq;
  10296. +
  10297. + if (dpa_fq->fq_type == FQ_TYPE_TX) {
  10298. + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
  10299. +
  10300. + /* Obtain the TX scId from fman */
  10301. + _errno = fm_macsec_secy_get_txsc_phys_id(
  10302. + macsec_priv[macsec_id]->fm_ms_secy,
  10303. + &sc_phys_id);
  10304. + if (unlikely(_errno < 0)) {
  10305. + dev_err(dev, "fm_macsec_secy_get_txsc_phys_id = %d\n",
  10306. + _errno);
  10307. + return _errno;
  10308. + }
  10309. +
  10310. + /* Write the TX SC-ID in the context of the FQ.
  10311. + * A2V=1 (use the A2 field)
  10312. + * A0V=1 (use the A0 field)
  10313. + * OVOM=1
  10314. + * MCV=1 (MACsec controlled frames)
  10315. + * MACCMD=the TX scId
  10316. + */
  10317. + initfq.fqd.context_a.hi = 0x1a100000 |
  10318. + sc_phys_id << 16;
  10319. + initfq.fqd.context_a.lo = 0x80000000;
  10320. + }
  10321. +
  10322. + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
  10323. + if (_errno < 0) {
  10324. + dev_err(dev, "qman_init_fq(%u) = %d\n",
  10325. + qman_fq_fqid(fq), _errno);
  10326. + qman_destroy_fq(fq, 0);
  10327. + return _errno;
  10328. + }
  10329. + }
  10330. +
  10331. + dpa_fq->fqid = qman_fq_fqid(fq);
  10332. +
  10333. + return 0;
  10334. +}
  10335. +
  10336. +/* Configure and enable secy. */
  10337. +static int enable_secy(struct generic_msg *gen, int *macsec_id)
  10338. +{
  10339. + struct enable_secy *sec;
  10340. + int _errno;
  10341. + struct fm_macsec_secy_params secy_params;
  10342. + struct dpa_fq *dpa_fq, *tmp;
  10343. + struct macsec_priv_s *selected_macsec_priv;
  10344. +
  10345. + sec = &gen->payload.secy;
  10346. +
  10347. + if (sec->macsec_id < 0 || sec->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10348. + _errno = -EINVAL;
  10349. + goto _return;
  10350. + }
  10351. + *macsec_id = sec->macsec_id;
  10352. + selected_macsec_priv = macsec_priv[sec->macsec_id];
  10353. +
  10354. + if (selected_macsec_priv->fm_ms_secy) {
  10355. + pr_err("Secy has already been enabled\n");
  10356. + return -EINVAL;
  10357. + }
  10358. +
  10359. + memset(&secy_params, 0, sizeof(secy_params));
  10360. + secy_params.fm_macsec_h = selected_macsec_priv->fm_macsec;
  10361. + secy_params.num_receive_channels = NUM_OF_RX_SC;
  10362. + secy_params.tx_sc_params.sci = sec->sci;
  10363. +
  10364. + /* Set encryption method */
  10365. + secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_128;
  10366. +#if (DPAA_VERSION >= 11)
  10367. + secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_256;
  10368. +#endif /* (DPAA_VERSION >= 11) */
  10369. + secy_params.exception_f = macsec_secy_exception;
  10370. + secy_params.event_f = macsec_secy_events;
  10371. + secy_params.app_h = selected_macsec_priv;
  10372. +
  10373. + selected_macsec_priv->fm_ms_secy =
  10374. + fm_macsec_secy_config(&secy_params);
  10375. +
  10376. + if (unlikely(selected_macsec_priv->fm_ms_secy == NULL)) {
  10377. + _errno = -EINVAL;
  10378. + goto _return;
  10379. + }
  10380. +
  10381. + /* Configure the insertion mode */
  10382. + if (sec->config_insertion_mode) {
  10383. + _errno = fm_macsec_secy_config_sci_insertion_mode(
  10384. + selected_macsec_priv->fm_ms_secy,
  10385. + sec->sci_insertion_mode);
  10386. + if (unlikely(_errno < 0))
  10387. + goto _return;
  10388. + }
  10389. +
  10390. + /* Configure the frame protection */
  10391. + if (sec->config_protect_frames) {
  10392. + _errno = fm_macsec_secy_config_protect_frames(
  10393. + selected_macsec_priv->fm_ms_secy,
  10394. + sec->protect_frames);
  10395. + if (unlikely(_errno < 0))
  10396. + goto _return;
  10397. + }
  10398. +
  10399. + /* Configure the replay window */
  10400. + if (sec->config_replay_window) {
  10401. + _errno = fm_macsec_secy_config_replay_window(
  10402. + selected_macsec_priv->fm_ms_secy,
  10403. + sec->replay_protect,
  10404. + sec->replay_window);
  10405. + if (unlikely(_errno < 0))
  10406. + goto _return;
  10407. + }
  10408. +
  10409. + /* Configure the validation mode */
  10410. + if (sec->config_validation_mode) {
  10411. + _errno = fm_macsec_secy_config_validation_mode(
  10412. + selected_macsec_priv->fm_ms_secy,
  10413. + sec->validate_frames);
  10414. + if (unlikely(_errno < 0))
  10415. + goto _return;
  10416. + }
  10417. +
  10418. + /* Select the exceptions that will be signaled */
  10419. + if (sec->config_exception) {
  10420. + _errno = fm_macsec_secy_config_exception(
  10421. + selected_macsec_priv->fm_ms_secy,
  10422. + sec->exception,
  10423. + sec->enable_exception);
  10424. + if (unlikely(_errno < 0))
  10425. + goto _return;
  10426. + }
  10427. +
  10428. + /* Select the events that will be signaled */
  10429. + if (sec->config_event) {
  10430. + _errno = fm_macsec_secy_config_event(
  10431. + selected_macsec_priv->fm_ms_secy,
  10432. + sec->event,
  10433. + sec->enable_event);
  10434. + if (unlikely(_errno < 0))
  10435. + goto _return;
  10436. + }
  10437. +
  10438. + /* Configure a point-to-point connection */
  10439. + if (sec->config_point_to_point) {
  10440. + _errno = fm_macsec_secy_config_point_to_point(
  10441. + selected_macsec_priv->fm_ms_secy);
  10442. + if (unlikely(_errno < 0))
  10443. + goto _return;
  10444. + }
  10445. +
  10446. + /* Configure the connection's confidentiality state */
  10447. + if (sec->config_confidentiality) {
  10448. + _errno = fm_macsec_secy_config_confidentiality(
  10449. + selected_macsec_priv->fm_ms_secy,
  10450. + sec->confidentiality_enable,
  10451. + sec->confidentiality_offset);
  10452. + if (unlikely(_errno < 0))
  10453. + goto _return;
  10454. + }
  10455. +
  10456. + _errno = fm_macsec_secy_init(selected_macsec_priv->fm_ms_secy);
  10457. + if (unlikely(_errno < 0))
  10458. + goto _return_fm_macsec_secy_free;
  10459. +
  10460. + list_for_each_entry_safe(dpa_fq,
  10461. + tmp,
  10462. + &selected_macsec_priv->dpa_fq_list,
  10463. + list) {
  10464. + _errno = macsec_fq_init(dpa_fq);
  10465. + if (_errno < 0)
  10466. + goto _return;
  10467. + }
  10468. +
  10469. + return 0;
  10470. +
  10471. +_return_fm_macsec_secy_free:
  10472. + fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
  10473. + selected_macsec_priv->fm_ms_secy = NULL;
  10474. +_return:
  10475. + return _errno;
  10476. +}
  10477. +
  10478. +static int set_macsec_exception(struct generic_msg *gen)
  10479. +{
  10480. + struct set_exception *set_ex;
  10481. + struct macsec_priv_s *selected_macsec_priv;
  10482. + int rv;
  10483. +
  10484. + set_ex = &(gen->payload.set_ex);
  10485. +
  10486. + selected_macsec_priv = macsec_priv[set_ex->macsec_id];
  10487. +
  10488. + rv = fm_macsec_set_exception(selected_macsec_priv->fm_macsec,
  10489. + set_ex->exception,
  10490. + set_ex->enable_exception);
  10491. + if (unlikely(rv < 0))
  10492. + pr_err("error when setting the macsec exception mask\n");
  10493. +
  10494. + return rv;
  10495. +}
  10496. +
  10497. +static int create_tx_sa(struct generic_msg *gen)
  10498. +{
  10499. + struct create_tx_sa *c_tx_sa;
  10500. + macsec_sa_key_t sa_key;
  10501. + int rv;
  10502. + struct macsec_priv_s *selected_macsec_priv;
  10503. +
  10504. + c_tx_sa = &(gen->payload.c_tx_sa);
  10505. +
  10506. + if (c_tx_sa->macsec_id < 0 ||
  10507. + c_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10508. + kfree(c_tx_sa);
  10509. + return -EINVAL;
  10510. + }
  10511. + selected_macsec_priv = macsec_priv[c_tx_sa->macsec_id];
  10512. +
  10513. + /* set macsec_priv field */
  10514. + selected_macsec_priv->an = c_tx_sa->an;
  10515. +
  10516. + /* because of the algorithms used */
  10517. + if (unlikely(c_tx_sa->sak_len > 32)) {
  10518. + pr_warn("size of secure key is greater than 32 bytes!\n");
  10519. + kfree(c_tx_sa);
  10520. + return -EINVAL;
  10521. + }
  10522. +
  10523. + rv = copy_from_user(&sa_key,
  10524. + c_tx_sa->sak,
  10525. + c_tx_sa->sak_len);
  10526. + if (unlikely(rv != 0)) {
  10527. + pr_err("copy_from_user could not copy %i bytes\n", rv);
  10528. + return -EFAULT;
  10529. + }
  10530. +
  10531. + rv = fm_macsec_secy_create_tx_sa(selected_macsec_priv->fm_ms_secy,
  10532. + c_tx_sa->an,
  10533. + sa_key);
  10534. + if (unlikely(rv < 0))
  10535. + pr_err("error when creating tx sa\n");
  10536. +
  10537. + return rv;
  10538. +}
  10539. +
  10540. +static int modify_tx_sa_key(struct generic_msg *gen)
  10541. +{
  10542. + struct modify_tx_sa_key *tx_sa_key;
  10543. + struct macsec_priv_s *selected_macsec_priv;
  10544. + macsec_sa_key_t sa_key;
  10545. + int rv;
  10546. +
  10547. + tx_sa_key = &(gen->payload.modify_tx_sa_key);
  10548. +
  10549. + if (tx_sa_key->macsec_id < 0 ||
  10550. + tx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
  10551. + return -EINVAL;
  10552. + selected_macsec_priv = macsec_priv[tx_sa_key->macsec_id];
  10553. +
  10554. + /* set macsec_priv field */
  10555. + selected_macsec_priv->an = tx_sa_key->an;
  10556. +
  10557. + if (unlikely(tx_sa_key->sak_len > 32)) {
  10558. + pr_warn("size of secure key is greater than 32 bytes!\n");
  10559. + kfree(tx_sa_key);
  10560. + return -EINVAL;
  10561. + }
  10562. +
  10563. + rv = copy_from_user(&sa_key,
  10564. + tx_sa_key->sak,
  10565. + tx_sa_key->sak_len);
  10566. + if (unlikely(rv != 0)) {
  10567. + pr_err("copy_from_user could not copy %i bytes\n", rv);
  10568. + return -EFAULT;
  10569. + }
  10570. +
  10571. + rv = fm_macsec_secy_txsa_modify_key(selected_macsec_priv->fm_ms_secy,
  10572. + tx_sa_key->an,
  10573. + sa_key);
  10574. + if (unlikely(rv < 0))
  10575. + pr_err("error while modifying the tx sa key\n");
  10576. +
  10577. + return rv;
  10578. +}
  10579. +
  10580. +static int activate_tx_sa(struct generic_msg *gen)
  10581. +{
  10582. + struct activate_tx_sa *a_tx_sa;
  10583. + struct macsec_priv_s *selected_macsec_priv;
  10584. + int rv;
  10585. +
  10586. + a_tx_sa = &(gen->payload.a_tx_sa);
  10587. +
  10588. + if (a_tx_sa->macsec_id < 0 ||
  10589. + a_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10590. + kfree(a_tx_sa);
  10591. + return -EINVAL;
  10592. + }
  10593. + selected_macsec_priv = macsec_priv[a_tx_sa->macsec_id];
  10594. +
  10595. + rv = fm_macsec_secy_txsa_set_active(selected_macsec_priv->fm_ms_secy,
  10596. + a_tx_sa->an);
  10597. + if (unlikely(rv < 0))
  10598. + pr_err("error when creating tx sa\n");
  10599. +
  10600. + return rv;
  10601. +}
  10602. +
  10603. +static int get_tx_sa_an(struct generic_msg *gen, macsec_an_t *an)
  10604. +{
  10605. + struct macsec_priv_s *selected_macsec_priv;
  10606. +
  10607. + if (gen->payload.macsec_id < 0 ||
  10608. + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS)
  10609. + return -EINVAL;
  10610. +
  10611. + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
  10612. +
  10613. + fm_macsec_secy_txsa_get_active(selected_macsec_priv->fm_ms_secy, an);
  10614. +
  10615. + return 0;
  10616. +}
  10617. +
  10618. +static int create_rx_sc(struct generic_msg *gen)
  10619. +{
  10620. + struct fm_macsec_secy_sc_params params;
  10621. + struct macsec_priv_s *selected_macsec_priv;
  10622. + struct rx_sc_dev *rx_sc_dev;
  10623. + uint32_t sc_phys_id;
  10624. + int i;
  10625. +
  10626. + if (gen->payload.c_rx_sc.macsec_id < 0 ||
  10627. + gen->payload.c_rx_sc.macsec_id >= FM_MAX_NUM_OF_MACS)
  10628. + return -EINVAL;
  10629. + selected_macsec_priv = macsec_priv[gen->payload.c_rx_sc.macsec_id];
  10630. +
  10631. + for (i = 0; i < NUM_OF_RX_SC; i++)
  10632. + if (!selected_macsec_priv->rx_sc_dev[i])
  10633. + break;
  10634. + if (i == NUM_OF_RX_SC) {
  10635. + pr_err("number of maximum RX_SC's has been reached\n");
  10636. + return -EINVAL;
  10637. + }
  10638. +
  10639. + params.sci = gen->payload.c_rx_sc.sci;
  10640. + params.cipher_suite = SECY_GCM_AES_128;
  10641. +#if (DPAA_VERSION >= 11)
  10642. + params.cipher_suite = SECY_GCM_AES_256;
  10643. +#endif /* (DPAA_VERSION >= 11) */
  10644. +
  10645. + rx_sc_dev = fm_macsec_secy_create_rxsc(selected_macsec_priv->fm_ms_secy,
  10646. + &params);
  10647. +
  10648. + fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
  10649. + rx_sc_dev,
  10650. + &sc_phys_id);
  10651. +
  10652. + selected_macsec_priv->rx_sc_dev[sc_phys_id] = rx_sc_dev;
  10653. +
  10654. + return sc_phys_id;
  10655. +}
  10656. +
  10657. +static int create_rx_sa(struct generic_msg *gen)
  10658. +{
  10659. + struct create_rx_sa *c_rx_sa;
  10660. + struct macsec_priv_s *selected_macsec_priv;
  10661. + struct rx_sc_dev *selected_rx_sc_dev;
  10662. + macsec_sa_key_t sak;
  10663. + int rv;
  10664. +
  10665. + c_rx_sa = &(gen->payload.c_rx_sa);
  10666. +
  10667. + if (unlikely(c_rx_sa->sak_len > 32)) {
  10668. + pr_warn("size of secure key is greater than 32 bytes!\n");
  10669. + return -EINVAL;
  10670. + }
  10671. + rv = copy_from_user(&sak,
  10672. + c_rx_sa->sak,
  10673. + c_rx_sa->sak_len);
  10674. + if (unlikely(rv != 0)) {
  10675. + pr_err("copy_from_user could not copy %i bytes\n", rv);
  10676. + return -EFAULT;
  10677. + }
  10678. +
  10679. + if (c_rx_sa->macsec_id < 0 ||
  10680. + c_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS)
  10681. + return -EINVAL;
  10682. +
  10683. + selected_macsec_priv = macsec_priv[c_rx_sa->macsec_id];
  10684. +
  10685. + if (c_rx_sa->rx_sc_id < 0 || c_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
  10686. + return -EINVAL;
  10687. +
  10688. + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[c_rx_sa->rx_sc_id];
  10689. +
  10690. + rv = fm_macsec_secy_create_rx_sa(selected_macsec_priv->fm_ms_secy,
  10691. + selected_rx_sc_dev,
  10692. + c_rx_sa->an,
  10693. + c_rx_sa->lpn,
  10694. + sak);
  10695. + if (unlikely(rv < 0)) {
  10696. + pr_err("fm_macsec_secy_create_rx_sa failed\n");
  10697. + return -EBUSY;
  10698. + }
  10699. +
  10700. + return 0;
  10701. +}
  10702. +
  10703. +static int modify_rx_sa_key(struct generic_msg *gen)
  10704. +{
  10705. + struct modify_rx_sa_key *rx_sa_key;
  10706. + struct macsec_priv_s *selected_macsec_priv;
  10707. + struct rx_sc_dev *selected_rx_sc;
  10708. + macsec_sa_key_t sa_key;
  10709. + int rv;
  10710. +
  10711. + rx_sa_key = &(gen->payload.modify_rx_sa_key);
  10712. +
  10713. + if (rx_sa_key->macsec_id < 0 ||
  10714. + rx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
  10715. + return -EINVAL;
  10716. + selected_macsec_priv = macsec_priv[rx_sa_key->macsec_id];
  10717. +
  10718. + if (rx_sa_key->rx_sc_id < 0 || rx_sa_key->rx_sc_id >= NUM_OF_RX_SC)
  10719. + return -EINVAL;
  10720. + selected_rx_sc = selected_macsec_priv->rx_sc_dev[rx_sa_key->rx_sc_id];
  10721. +
  10722. + /* set macsec_priv field */
  10723. + selected_macsec_priv->an = rx_sa_key->an;
  10724. +
  10725. + if (unlikely(rx_sa_key->sak_len > 32)) {
  10726. + pr_warn("size of secure key is greater than 32 bytes!\n");
  10727. + kfree(rx_sa_key);
  10728. + return -EINVAL;
  10729. + }
  10730. +
  10731. + rv = copy_from_user(&sa_key,
  10732. + rx_sa_key->sak,
  10733. + rx_sa_key->sak_len);
  10734. + if (unlikely(rv != 0)) {
  10735. + pr_err("copy_from_user could not copy %i bytes\n", rv);
  10736. + return -EFAULT;
  10737. + }
  10738. +
  10739. + rv = fm_macsec_secy_rxsa_modify_key(selected_macsec_priv->fm_ms_secy,
  10740. + selected_rx_sc,
  10741. + rx_sa_key->an,
  10742. + sa_key);
  10743. + if (unlikely(rv < 0))
  10744. + pr_err("error while modifying the rx sa key\n");
  10745. +
  10746. + return rv;
  10747. +}
  10748. +
  10749. +static int update_npn(struct generic_msg *gen)
  10750. +{
  10751. + struct update_npn *update_npn;
  10752. + struct macsec_priv_s *selected_macsec_priv;
  10753. + struct rx_sc_dev *selected_rx_sc_dev;
  10754. + int err;
  10755. +
  10756. + update_npn = &(gen->payload.update_npn);
  10757. +
  10758. + if (update_npn->macsec_id < 0 ||
  10759. + update_npn->macsec_id >= FM_MAX_NUM_OF_MACS)
  10760. + return -EINVAL;
  10761. + selected_macsec_priv = macsec_priv[update_npn->macsec_id];
  10762. +
  10763. + if (update_npn->rx_sc_id < 0 || update_npn->rx_sc_id >= NUM_OF_RX_SC)
  10764. + return -EINVAL;
  10765. +
  10766. + selected_rx_sc_dev =
  10767. + selected_macsec_priv->rx_sc_dev[update_npn->rx_sc_id];
  10768. +
  10769. + err = fm_macsec_secy_rxsa_update_next_pn(
  10770. + selected_macsec_priv->fm_ms_secy,
  10771. + selected_rx_sc_dev,
  10772. + update_npn->an,
  10773. + update_npn->pn);
  10774. + if (unlikely(err < 0)) {
  10775. + pr_err("fm_macsec_secy_rxsa_update_next_pn failed\n");
  10776. + return -EBUSY;
  10777. + }
  10778. +
  10779. + return 0;
  10780. +}
  10781. +
  10782. +static int update_lpn(struct generic_msg *gen)
  10783. +{
  10784. + struct update_lpn *update_lpn;
  10785. + struct macsec_priv_s *selected_macsec_priv;
  10786. + struct rx_sc_dev *selected_rx_sc_dev;
  10787. + int err;
  10788. +
  10789. + update_lpn = &(gen->payload.update_lpn);
  10790. +
  10791. + if (update_lpn->macsec_id < 0 ||
  10792. + update_lpn->macsec_id >= FM_MAX_NUM_OF_MACS)
  10793. + return -EINVAL;
  10794. + selected_macsec_priv = macsec_priv[update_lpn->macsec_id];
  10795. +
  10796. + if (update_lpn->rx_sc_id < 0 || update_lpn->rx_sc_id >= NUM_OF_RX_SC)
  10797. + return -EINVAL;
  10798. + selected_rx_sc_dev =
  10799. + selected_macsec_priv->rx_sc_dev[update_lpn->rx_sc_id];
  10800. +
  10801. + err = fm_macsec_secy_rxsa_update_lowest_pn(
  10802. + selected_macsec_priv->fm_ms_secy,
  10803. + selected_rx_sc_dev,
  10804. + update_lpn->an,
  10805. + update_lpn->pn);
  10806. + if (unlikely(err < 0)) {
  10807. + pr_err("fm_macsec_secy_rxsa_update_lowest_pn failed\n");
  10808. + return -EBUSY;
  10809. + }
  10810. +
  10811. + return 0;
  10812. +}
  10813. +
  10814. +static int activate_rx_sa(struct generic_msg *gen)
  10815. +{
  10816. + struct activate_rx_sa *a_rx_sa;
  10817. + struct macsec_priv_s *selected_macsec_priv;
  10818. + struct rx_sc_dev *selected_rx_sc_dev;
  10819. + int err;
  10820. +
  10821. + a_rx_sa = &(gen->payload.a_rx_sa);
  10822. +
  10823. + if (a_rx_sa->macsec_id < 0 ||
  10824. + a_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10825. + return -EINVAL;
  10826. + }
  10827. + selected_macsec_priv = macsec_priv[a_rx_sa->macsec_id];
  10828. +
  10829. + if (a_rx_sa->rx_sc_id < 0 || a_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
  10830. + return -EINVAL;
  10831. + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[a_rx_sa->rx_sc_id];
  10832. +
  10833. + err = fm_macsec_secy_rxsa_enable_receive(
  10834. + selected_macsec_priv->fm_ms_secy,
  10835. + selected_rx_sc_dev,
  10836. + a_rx_sa->an);
  10837. + if (unlikely(err < 0)) {
  10838. + pr_err("fm_macsec_secy_rxsa_enable_receive failed\n");
  10839. + return -EBUSY;
  10840. + }
  10841. +
  10842. + return 0;
  10843. +}
  10844. +
  10845. +static int get_tx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
  10846. +{
  10847. + struct macsec_priv_s *selected_macsec_priv;
  10848. + int err;
  10849. +
  10850. + if (gen->payload.macsec_id < 0 ||
  10851. + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
  10852. + return -EINVAL;
  10853. + }
  10854. + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
  10855. +
  10856. + err = fm_macsec_secy_get_txsc_phys_id(selected_macsec_priv->fm_ms_secy,
  10857. + sc_id);
  10858. +
  10859. + if (unlikely(err < 0)) {
  10860. + pr_err("fm_macsec_secy_get_txsc_phys_id failed\n");
  10861. + return err;
  10862. + }
  10863. +
  10864. + return 0;
  10865. +}
  10866. +
  10867. +static int get_rx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
  10868. +{
  10869. + struct get_rx_sc_id *get_rx_sc_id;
  10870. + struct macsec_priv_s *selected_macsec_priv;
  10871. + struct rx_sc_dev *selected_rx_sc_dev;
  10872. + int err;
  10873. +
  10874. + get_rx_sc_id = &(gen->payload.get_rx_sc_id);
  10875. +
  10876. + if (get_rx_sc_id->macsec_id < 0 ||
  10877. + get_rx_sc_id->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10878. + return -EINVAL;
  10879. + }
  10880. + selected_macsec_priv = macsec_priv[get_rx_sc_id->macsec_id];
  10881. +
  10882. + if (get_rx_sc_id->rx_sc_id < 0 ||
  10883. + get_rx_sc_id->rx_sc_id >= NUM_OF_RX_SC)
  10884. + return -EINVAL;
  10885. + selected_rx_sc_dev =
  10886. + selected_macsec_priv->rx_sc_dev[get_rx_sc_id->rx_sc_id];
  10887. +
  10888. + err = fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
  10889. + selected_rx_sc_dev,
  10890. + sc_id);
  10891. + if (unlikely(err < 0)) {
  10892. + pr_err("fm_macsec_secy_get_rxsc_phys_id failed\n");
  10893. + return err;
  10894. + }
  10895. +
  10896. + return 0;
  10897. +}
  10898. +
  10899. +static int get_macsec_revision(struct generic_msg *gen, int *macsec_revision)
  10900. +{
  10901. + struct macsec_priv_s *selected_macsec_priv;
  10902. + int err;
  10903. +
  10904. + if (gen->payload.macsec_id < 0 ||
  10905. + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
  10906. + return -EINVAL;
  10907. + }
  10908. + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
  10909. +
  10910. + err = fm_macsec_get_revision(selected_macsec_priv->fm_macsec,
  10911. + macsec_revision);
  10912. + if (unlikely(err < 0)) {
  10913. + pr_err("fm_macsec_get_revision failed\n");
  10914. + return err;
  10915. + }
  10916. +
  10917. + return 0;
  10918. +}
  10919. +
  10920. +static int rx_sa_disable(struct generic_msg *gen)
  10921. +{
  10922. + struct disable_rx_sa *disable_rx_sa;
  10923. + struct macsec_priv_s *selected_macsec_priv;
  10924. + struct rx_sc_dev *selected_rx_sc_dev;
  10925. + int err;
  10926. +
  10927. + disable_rx_sa = &(gen->payload.d_rx_sa);
  10928. +
  10929. + if (disable_rx_sa->macsec_id < 0 ||
  10930. + disable_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10931. + return -EINVAL;
  10932. + }
  10933. + selected_macsec_priv = macsec_priv[disable_rx_sa->macsec_id];
  10934. +
  10935. + if (disable_rx_sa->rx_sc_id < 0 ||
  10936. + disable_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
  10937. + return -EINVAL;
  10938. + selected_rx_sc_dev =
  10939. + selected_macsec_priv->rx_sc_dev[disable_rx_sa->rx_sc_id];
  10940. +
  10941. + err = fm_macsec_secy_rxsa_disable_receive(
  10942. + selected_macsec_priv->fm_ms_secy,
  10943. + selected_rx_sc_dev,
  10944. + selected_macsec_priv->an);
  10945. +
  10946. + if (unlikely(err < 0)) {
  10947. + pr_err("fm_macsec_secy_rxsa_disable_receive failed\n");
  10948. + return err;
  10949. + }
  10950. +
  10951. + return 0;
  10952. +}
  10953. +
  10954. +static int rx_sa_delete(struct generic_msg *gen)
  10955. +{
  10956. + struct delete_rx_sa *delete_rx_sa;
  10957. + struct macsec_priv_s *selected_macsec_priv;
  10958. + struct rx_sc_dev *selected_rx_sc_dev;
  10959. + int err;
  10960. +
  10961. + delete_rx_sa = &(gen->payload.del_rx_sa);
  10962. +
  10963. + if (delete_rx_sa->macsec_id < 0 ||
  10964. + delete_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10965. + return -EINVAL;
  10966. + }
  10967. + selected_macsec_priv = macsec_priv[delete_rx_sa->macsec_id];
  10968. +
  10969. + if (delete_rx_sa->rx_sc_id < 0 ||
  10970. + delete_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
  10971. + return -EINVAL;
  10972. + selected_rx_sc_dev =
  10973. + selected_macsec_priv->rx_sc_dev[delete_rx_sa->rx_sc_id];
  10974. +
  10975. + err = fm_macsec_secy_delete_rx_sa(selected_macsec_priv->fm_ms_secy,
  10976. + selected_rx_sc_dev,
  10977. + selected_macsec_priv->an);
  10978. +
  10979. + if (unlikely(err < 0)) {
  10980. + pr_err("fm_macsec_secy_delete_rx_sa failed\n");
  10981. + return err;
  10982. + }
  10983. +
  10984. + return 0;
  10985. +}
  10986. +
  10987. +static int rx_sc_delete(struct generic_msg *gen)
  10988. +{
  10989. + struct delete_rx_sc *delete_rx_sc;
  10990. + struct macsec_priv_s *selected_macsec_priv;
  10991. + struct rx_sc_dev *selected_rx_sc_dev;
  10992. + int err;
  10993. +
  10994. + delete_rx_sc = &(gen->payload.del_rx_sc);
  10995. +
  10996. + if (delete_rx_sc->macsec_id < 0 ||
  10997. + delete_rx_sc->macsec_id >= FM_MAX_NUM_OF_MACS) {
  10998. + return -EINVAL;
  10999. + }
  11000. + selected_macsec_priv = macsec_priv[delete_rx_sc->macsec_id];
  11001. +
  11002. + if (delete_rx_sc->rx_sc_id < 0 ||
  11003. + delete_rx_sc->rx_sc_id >= NUM_OF_RX_SC)
  11004. + return -EINVAL;
  11005. + selected_rx_sc_dev =
  11006. + selected_macsec_priv->rx_sc_dev[delete_rx_sc->rx_sc_id];
  11007. +
  11008. + err = fm_macsec_secy_delete_rxsc(selected_macsec_priv->fm_ms_secy,
  11009. + selected_rx_sc_dev);
  11010. +
  11011. + if (unlikely(err < 0)) {
  11012. + pr_err("fm_macsec_secy_delete_rxsc failed\n");
  11013. + return err;
  11014. + }
  11015. +
  11016. + return 0;
  11017. +}
  11018. +
  11019. +static int tx_sa_delete(struct generic_msg *gen)
  11020. +{
  11021. + struct macsec_priv_s *selected_macsec_priv;
  11022. + int err;
  11023. +
  11024. + if (gen->payload.del_tx_sa.macsec_id < 0 ||
  11025. + gen->payload.del_tx_sa.macsec_id >= FM_MAX_NUM_OF_MACS) {
  11026. + return -EINVAL;
  11027. + }
  11028. + selected_macsec_priv = macsec_priv[gen->payload.del_tx_sa.macsec_id];
  11029. +
  11030. + err = fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
  11031. + selected_macsec_priv->an);
  11032. +
  11033. + if (unlikely(err < 0)) {
  11034. + pr_err("fm_macsec_secy_delete_tx_sa failed\n");
  11035. + return err;
  11036. + }
  11037. +
  11038. + return 0;
  11039. +}
  11040. +
  11041. +static int disable_secy(struct generic_msg *gen, int *macsec_id)
  11042. +{
  11043. + struct macsec_priv_s *selected_macsec_priv;
  11044. + int err;
  11045. +
  11046. + if (gen->payload.macsec_id < 0 ||
  11047. + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
  11048. + return -EINVAL;
  11049. + }
  11050. + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
  11051. + *macsec_id = gen->payload.macsec_id;
  11052. +
  11053. + err = fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
  11054. + selected_macsec_priv->fm_ms_secy = NULL;
  11055. +
  11056. + if (unlikely(err < 0)) {
  11057. + pr_err("fm_macsec_secy_free failed\n");
  11058. + return err;
  11059. + }
  11060. +
  11061. + return 0;
  11062. +}
  11063. +
  11064. +static int disable_macsec(struct generic_msg *gen, int *macsec_id)
  11065. +{
  11066. + struct macsec_priv_s *selected_macsec_priv;
  11067. + int err;
  11068. +
  11069. + if (gen->payload.macsec_id < 0 ||
  11070. + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
  11071. + return -EINVAL;
  11072. + }
  11073. +
  11074. + selected_macsec_priv =
  11075. + macsec_priv[gen->payload.macsec_id];
  11076. + *macsec_id = gen->payload.macsec_id;
  11077. +
  11078. + err = fm_macsec_disable(selected_macsec_priv->fm_macsec);
  11079. + err += fm_macsec_free(selected_macsec_priv->fm_macsec);
  11080. + selected_macsec_priv->fm_macsec = NULL;
  11081. +
  11082. + if (unlikely(err < 0)) {
  11083. + pr_err("macsec disable failed\n");
  11084. + return err;
  11085. + }
  11086. +
  11087. + return 0;
  11088. +
  11089. +}
  11090. +
  11091. +static int disable_all(struct generic_msg *gen, int *macsec_id)
  11092. +{
  11093. + struct macsec_priv_s *selected_macsec_priv;
  11094. + struct rx_sc_dev *selected_rx_sc_dev;
  11095. + int err = 0, i;
  11096. +
  11097. + if (gen->payload.macsec_id < 0 ||
  11098. + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
  11099. + return -EINVAL;
  11100. + }
  11101. +
  11102. + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
  11103. + *macsec_id = gen->payload.macsec_id;
  11104. +
  11105. + for (i = 0; i < NUM_OF_RX_SC; i++) {
  11106. + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[i];
  11107. +
  11108. + if (!selected_rx_sc_dev)
  11109. + continue;
  11110. +
  11111. + err += fm_macsec_secy_rxsa_disable_receive(
  11112. + selected_macsec_priv->fm_ms_secy,
  11113. + selected_rx_sc_dev,
  11114. + selected_macsec_priv->an);
  11115. +
  11116. + err += fm_macsec_secy_delete_rx_sa(
  11117. + selected_macsec_priv->fm_ms_secy,
  11118. + selected_rx_sc_dev,
  11119. + selected_macsec_priv->an);
  11120. +
  11121. + err += fm_macsec_secy_delete_rxsc(
  11122. + selected_macsec_priv->fm_ms_secy,
  11123. + selected_rx_sc_dev);
  11124. + }
  11125. +
  11126. + err += fm_macsec_secy_delete_tx_sa(
  11127. + selected_macsec_priv->fm_ms_secy,
  11128. + selected_macsec_priv->an);
  11129. +
  11130. + err += fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
  11131. + selected_macsec_priv->fm_ms_secy = NULL;
  11132. +
  11133. + err += fm_macsec_disable(selected_macsec_priv->fm_macsec);
  11134. +
  11135. + err += fm_macsec_free(selected_macsec_priv->fm_macsec);
  11136. + selected_macsec_priv->fm_macsec = NULL;
  11137. +
  11138. + if (unlikely(err < 0)) {
  11139. + pr_err("macsec disable failed\n");
  11140. + return err;
  11141. + }
  11142. +
  11143. + return 0;
  11144. +}
  11145. +
  11146. +static inline void macsec_setup_ingress(struct macsec_priv_s *macsec_priv,
  11147. + struct dpa_fq *fq,
  11148. + const struct qman_fq *template)
  11149. +{
  11150. + fq->fq_base = *template;
  11151. + fq->net_dev = macsec_priv->net_dev;
  11152. +
  11153. + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
  11154. + fq->channel = macsec_priv->channel;
  11155. +}
  11156. +
  11157. +static inline void macsec_setup_egress(struct macsec_priv_s *macsec_priv,
  11158. + struct dpa_fq *fq,
  11159. + struct fm_port *port,
  11160. + const struct qman_fq *template)
  11161. +{
  11162. + fq->fq_base = *template;
  11163. + fq->net_dev = macsec_priv->net_dev;
  11164. +
  11165. + if (port) {
  11166. + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
  11167. + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
  11168. + } else {
  11169. + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
  11170. + }
  11171. +}
  11172. +
  11173. +/* At the moment, we don't create recycle queues. */
  11174. +static void macsec_fq_setup(struct macsec_priv_s *macsec_priv,
  11175. + const struct dpa_fq_cbs_t *fq_cbs,
  11176. + struct fm_port *tx_port)
  11177. +{
  11178. + struct dpa_fq *fq;
  11179. + int egress_cnt = 0, conf_cnt = 0;
  11180. +
  11181. + /* Initialize each FQ in the list */
  11182. + list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
  11183. + switch (fq->fq_type) {
  11184. + /* Normal TX queues */
  11185. + case FQ_TYPE_TX:
  11186. + macsec_setup_egress(macsec_priv, fq, tx_port,
  11187. + &fq_cbs->egress_ern);
  11188. + /* If we have more Tx queues than the number of cores,
  11189. + * just ignore the extra ones.
  11190. + */
  11191. + if (egress_cnt < MACSEC_ETH_TX_QUEUES)
  11192. + macsec_priv->egress_fqs[egress_cnt++] =
  11193. + &fq->fq_base;
  11194. + break;
  11195. + case FQ_TYPE_TX_CONFIRM:
  11196. + BUG_ON(!macsec_priv->mac_dev);
  11197. + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
  11198. + break;
  11199. + /* TX confirm multiple queues */
  11200. + case FQ_TYPE_TX_CONF_MQ:
  11201. + BUG_ON(!macsec_priv->mac_dev);
  11202. + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
  11203. + macsec_priv->conf_fqs[conf_cnt++] = &fq->fq_base;
  11204. + break;
  11205. + case FQ_TYPE_TX_ERROR:
  11206. + BUG_ON(!macsec_priv->mac_dev);
  11207. + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_errq);
  11208. + break;
  11209. + default:
  11210. + dev_warn(macsec_priv->net_dev->dev.parent,
  11211. + "Unknown FQ type detected!\n");
  11212. + break;
  11213. + }
  11214. + }
  11215. +
  11216. + /* The number of Tx queues may be smaller than the number of cores, if
  11217. + * the Tx queue range is specified in the device tree instead of being
  11218. + * dynamically allocated.
  11219. + * Make sure all CPUs receive a corresponding Tx queue.
  11220. + */
  11221. + while (egress_cnt < MACSEC_ETH_TX_QUEUES) {
  11222. + list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
  11223. + if (fq->fq_type != FQ_TYPE_TX)
  11224. + continue;
  11225. + macsec_priv->egress_fqs[egress_cnt++] = &fq->fq_base;
  11226. + if (egress_cnt == MACSEC_ETH_TX_QUEUES)
  11227. + break;
  11228. + }
  11229. + }
  11230. +
  11231. +}
  11232. +
  11233. +static const struct fqid_cell tx_fqids[] = {
  11234. + {0, MACSEC_ETH_TX_QUEUES}
  11235. +};
  11236. +
  11237. +static const struct fqid_cell tx_confirm_fqids[] = {
  11238. + {0, MACSEC_ETH_TX_QUEUES}
  11239. +};
  11240. +
  11241. +/* Allocate percpu priv. This is used to keep track of rx and tx packets on
  11242. + * each cpu (take into consideration that the number of queues is equal to the
  11243. + * number of cpus, so there is one queue/cpu).
  11244. + */
  11245. +static void alloc_priv(struct macsec_percpu_priv_s *percpu_priv,
  11246. + struct macsec_priv_s *macsec_priv, struct device *dev)
  11247. +{
  11248. + int i, err;
  11249. +
  11250. + macsec_priv->percpu_priv = alloc_percpu(*macsec_priv->percpu_priv);
  11251. +
  11252. + if (unlikely(macsec_priv->percpu_priv == NULL)) {
  11253. + dev_err(dev, "alloc_percpu() failed\n");
  11254. + err = -ENOMEM;
  11255. + dpa_fq_free(dev, &macsec_priv->dpa_fq_list);
  11256. + }
  11257. +
  11258. + for_each_possible_cpu(i) {
  11259. + percpu_priv = per_cpu_ptr(macsec_priv->percpu_priv, i);
  11260. + memset(percpu_priv, 0, sizeof(*percpu_priv));
  11261. + }
  11262. +
  11263. +}
  11264. +
  11265. +/* On RX, we only need to retain the information about frames, if they were
  11266. + * encrypted or not. Statistics regarding this will be printed in a log file.
  11267. + */
  11268. +static int macsec_rx_hook(void *ptr, struct net_device *net_dev, u32 fqid)
  11269. +{
  11270. +
  11271. + struct qm_fd *rx_fd = (struct qm_fd *)ptr;
  11272. + struct macsec_percpu_priv_s *percpu_priv_m;
  11273. + struct macsec_priv_s *selected_macsec_priv;
  11274. +
  11275. + selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
  11276. +
  11277. + percpu_priv_m = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
  11278. +
  11279. + if ((rx_fd->status & FM_FD_STAT_RX_MACSEC) != 0) {
  11280. + if (netif_msg_hw(selected_macsec_priv) && net_ratelimit())
  11281. + netdev_warn(net_dev, "FD status = 0x%u\n",
  11282. + rx_fd->status & FM_FD_STAT_RX_MACSEC);
  11283. + percpu_priv_m->rx_macsec++;
  11284. + }
  11285. +
  11286. + return DPAA_ETH_CONTINUE;
  11287. +}
  11288. +
  11289. +/* Split TX traffic. If encryption enabled, send packets on specific QMAN frame
  11290. + * queues. Other way, let them be handled by dpa eth. Also, keep track of the
  11291. + * number of packets that are walking away through "macsec" queues.
  11292. + */
  11293. +static enum dpaa_eth_hook_result macsec_tx_hook(struct sk_buff *skb,
  11294. + struct net_device *net_dev)
  11295. +{
  11296. + struct dpa_priv_s *dpa_priv;
  11297. + struct qm_fd fd;
  11298. + struct macsec_percpu_priv_s *macsec_percpu_priv;
  11299. + struct dpa_percpu_priv_s *dpa_percpu_priv;
  11300. + int i, err = 0;
  11301. + int *countptr, offset = 0;
  11302. + const bool nonlinear = skb_is_nonlinear(skb);
  11303. + struct qman_fq *egress_fq;
  11304. + struct macsec_priv_s *selected_macsec_priv;
  11305. +
  11306. + selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
  11307. +
  11308. + if (!selected_macsec_priv->net_dev ||
  11309. + (selected_macsec_priv->en_state != SECY_ENABLED) ||
  11310. + (ntohs(skb->protocol) == ETH_P_PAE))
  11311. + return DPAA_ETH_CONTINUE;
  11312. +
  11313. + dpa_priv = netdev_priv(net_dev);
  11314. + /* Non-migratable context, safe to use raw_cpu_ptr */
  11315. + macsec_percpu_priv = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
  11316. + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
  11317. +
  11318. + countptr = raw_cpu_ptr(dpa_priv->dpa_bp->percpu_count);
  11319. +
  11320. + clear_fd(&fd);
  11321. +
  11322. +#ifdef CONFIG_FSL_DPAA_1588
  11323. + if (dpa_priv->tsu && dpa_priv->tsu->valid &&
  11324. + dpa_priv->tsu->hwts_tx_en_ioctl)
  11325. + fd.cmd |= FM_FD_CMD_UPD;
  11326. +#endif
  11327. +#ifdef CONFIG_FSL_DPAA_TS
  11328. + if (unlikely(dpa_priv->ts_tx_en &&
  11329. + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
  11330. + fd.cmd |= FM_FD_CMD_UPD;
  11331. + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  11332. +#endif /* CONFIG_FSL_DPAA_TS */
  11333. +
  11334. + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
  11335. + * we don't feed FMan with more fragments than it supports.
  11336. + * Btw, we're using the first sgt entry to store the linear part of
  11337. + * the skb, so we're one extra frag short.
  11338. + */
  11339. + if (nonlinear &&
  11340. + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
  11341. + /* Just create a S/G fd based on the skb */
  11342. + err = skb_to_sg_fd(dpa_priv, skb, &fd);
  11343. + dpa_percpu_priv->tx_frag_skbuffs++;
  11344. + } else {
  11345. + /* Make sure we have enough headroom to accommodate private
  11346. + * data, parse results, etc. Normally this shouldn't happen if
  11347. + * we're here via the standard kernel stack.
  11348. + */
  11349. + if (unlikely(skb_headroom(skb) < dpa_priv->tx_headroom)) {
  11350. + struct sk_buff *skb_new;
  11351. +
  11352. + skb_new = skb_realloc_headroom(skb,
  11353. + dpa_priv->tx_headroom);
  11354. + if (unlikely(!skb_new)) {
  11355. + dev_kfree_skb(skb);
  11356. + dpa_percpu_priv->stats.tx_errors++;
  11357. + return DPAA_ETH_STOLEN;
  11358. + }
  11359. + dev_kfree_skb(skb);
  11360. + skb = skb_new;
  11361. + }
  11362. +
  11363. + /* We're going to store the skb backpointer at the beginning
  11364. + * of the data buffer, so we need a privately owned skb
  11365. + */
  11366. +
  11367. + /* Code borrowed from skb_unshare(). */
  11368. + if (skb_cloned(skb)) {
  11369. + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
  11370. + kfree_skb(skb);
  11371. + skb = nskb;
  11372. + /* skb_copy() has now linearized the skbuff. */
  11373. + } else if (unlikely(nonlinear)) {
  11374. + /* We are here because the egress skb contains
  11375. + * more fragments than we support. In this case,
  11376. + * we have no choice but to linearize it ourselves.
  11377. + */
  11378. + err = __skb_linearize(skb);
  11379. + }
  11380. + if (unlikely(!skb || err < 0)) {
  11381. + /* Common out-of-memory error path */
  11382. + goto enomem;
  11383. + }
  11384. +
  11385. + /* Finally, create a contig FD from this skb */
  11386. + err = skb_to_contig_fd(dpa_priv, skb, &fd, countptr, &offset);
  11387. + }
  11388. + if (unlikely(err < 0))
  11389. + goto skb_to_fd_failed;
  11390. +
  11391. + if (fd.bpid != 0xff) {
  11392. + skb_recycle(skb);
  11393. + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
  11394. + * but we need the skb to look as if returned by build_skb().
  11395. + * We need to manually adjust the tailptr as well.
  11396. + */
  11397. + skb->data = skb->head + offset;
  11398. + skb_reset_tail_pointer(skb);
  11399. +
  11400. + (*countptr)++;
  11401. + dpa_percpu_priv->tx_returned++;
  11402. + }
  11403. +
  11404. + egress_fq = selected_macsec_priv->egress_fqs[smp_processor_id()];
  11405. + if (fd.bpid == 0xff)
  11406. + fd.cmd |= qman_fq_fqid(macsec_get_tx_conf_queue(
  11407. + selected_macsec_priv,
  11408. + egress_fq));
  11409. +
  11410. + for (i = 0; i < 100000; i++) {
  11411. + err = qman_enqueue(egress_fq, &fd, 0);
  11412. + if (err != -EBUSY)
  11413. + break;
  11414. + }
  11415. +
  11416. + if (unlikely(err < 0)) {
  11417. + dpa_percpu_priv->stats.tx_errors++;
  11418. + dpa_percpu_priv->stats.tx_fifo_errors++;
  11419. + goto xmit_failed;
  11420. + }
  11421. +
  11422. + macsec_percpu_priv->tx_macsec++;
  11423. + dpa_percpu_priv->stats.tx_packets++;
  11424. + dpa_percpu_priv->stats.tx_bytes += dpa_fd_length(&fd);
  11425. +
  11426. + net_dev->trans_start = jiffies;
  11427. + return DPAA_ETH_STOLEN;
  11428. +
  11429. +xmit_failed:
  11430. + if (fd.bpid != 0xff) {
  11431. + (*countptr)--;
  11432. + dpa_percpu_priv->tx_returned--;
  11433. + dpa_fd_release(net_dev, &fd);
  11434. + dpa_percpu_priv->stats.tx_errors++;
  11435. + return DPAA_ETH_STOLEN;
  11436. + }
  11437. + _dpa_cleanup_tx_fd(dpa_priv, &fd);
  11438. +skb_to_fd_failed:
  11439. +enomem:
  11440. + dpa_percpu_priv->stats.tx_errors++;
  11441. + dev_kfree_skb(skb);
  11442. + return DPAA_ETH_STOLEN;
  11443. +}
  11444. +
  11445. +/* Allocate and initialize macsec priv and fqs. Also, create debugfs entry for
  11446. + * a spcific interface. Iterate thourgh existing devices in order to find the
  11447. + * one we want to have macsec for.
  11448. + */
  11449. +static int macsec_setup(void)
  11450. +{
  11451. + struct net_device *net_dev;
  11452. + struct macsec_percpu_priv_s *percpu_priv = NULL;
  11453. + struct dpa_priv_s *dpa_priv = NULL;
  11454. + struct dpa_fq *dpa_fq;
  11455. + struct device *dev = NULL;
  11456. + int err, i, j, macsec_id;
  11457. +
  11458. + pr_debug("Entering: %s\n", __func__);
  11459. +
  11460. + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
  11461. + macsec_priv[i] = kzalloc(sizeof(*(macsec_priv[i])), GFP_KERNEL);
  11462. +
  11463. + if (unlikely(macsec_priv[i] == NULL)) {
  11464. + int j;
  11465. + for (j = 0; j < i; j++)
  11466. + kfree(macsec_priv[j]);
  11467. + pr_err("could not allocate\n");
  11468. + return -ENOMEM;
  11469. + }
  11470. + }
  11471. +
  11472. + for (i = 0; i < macsec_ifs_cnt; i++) {
  11473. + net_dev = first_net_device(&init_net);
  11474. + macsec_id = net_dev->ifindex - 1;
  11475. + while (net_dev) {
  11476. + macsec_id = net_dev->ifindex - 1;
  11477. +
  11478. + /* to maintain code readability and less than
  11479. + * 80 characters per line
  11480. + */
  11481. + if (strcmp(net_dev->name, macsec_ifs[i]) != 0) {
  11482. + net_dev = next_net_device(net_dev);
  11483. + continue;
  11484. + }
  11485. +
  11486. + /* strcmp(net_dev->name, macsec_ifs[i]) == 0 */
  11487. + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
  11488. + macsec_priv[macsec_id]->net_dev = net_dev;
  11489. + dpa_priv = netdev_priv(net_dev);
  11490. + macsec_priv[macsec_id]->mac_dev = dpa_priv->mac_dev;
  11491. + macsec_priv[macsec_id]->channel = dpa_priv->channel;
  11492. + dev = net_dev->dev.parent;
  11493. +
  11494. + INIT_LIST_HEAD(&macsec_priv[macsec_id]->dpa_fq_list);
  11495. +
  11496. + dpa_fq = dpa_fq_alloc(dev,
  11497. + tx_fqids->start, tx_fqids->count,
  11498. + &macsec_priv[macsec_id]->dpa_fq_list,
  11499. + FQ_TYPE_TX);
  11500. + if (unlikely(dpa_fq == NULL)) {
  11501. + dev_err(dev, "dpa_fq_alloc() failed\n");
  11502. + return -ENOMEM;
  11503. + }
  11504. +
  11505. + dpa_fq = dpa_fq_alloc(dev,
  11506. + tx_confirm_fqids->start,
  11507. + tx_confirm_fqids->count,
  11508. + &macsec_priv[macsec_id]->dpa_fq_list,
  11509. + FQ_TYPE_TX_CONF_MQ);
  11510. + if (unlikely(dpa_fq == NULL)) {
  11511. + dev_err(dev, "dpa_fq_alloc() failed\n");
  11512. + return -ENOMEM;
  11513. + }
  11514. +
  11515. + macsec_fq_setup(macsec_priv[macsec_id], &private_fq_cbs,
  11516. + macsec_priv[macsec_id]->mac_dev->port_dev[TX]);
  11517. +
  11518. + alloc_priv(percpu_priv, macsec_priv[macsec_id], dev);
  11519. +
  11520. + break;
  11521. + }
  11522. + if (macsec_priv[macsec_id]->net_dev == NULL) {
  11523. + pr_err("Interface unknown\n");
  11524. + err = -EINVAL;
  11525. + goto _error;
  11526. + }
  11527. +
  11528. + /* setup specific ethtool ops for macsec */
  11529. + macsec_setup_ethtool_ops(net_dev);
  11530. + }
  11531. + return 0;
  11532. +
  11533. +_error:
  11534. + for (j = 0; j < i; i++) {
  11535. + net_dev = first_net_device(&init_net);
  11536. + while (net_dev) {
  11537. + macsec_id = net_dev->ifindex - 1;
  11538. + if (strcmp(net_dev->name, macsec_ifs[j]) != 0) {
  11539. + net_dev = next_net_device(net_dev);
  11540. + continue;
  11541. + }
  11542. + dpa_fq_free(net_dev->dev.parent,
  11543. + &macsec_priv[macsec_id]->dpa_fq_list);
  11544. + break;
  11545. + }
  11546. + macsec_restore_ethtool_ops(macsec_priv[j]->net_dev);
  11547. + kfree(macsec_priv[j]);
  11548. + }
  11549. + for (j = i; j < FM_MAX_NUM_OF_MACS; j++)
  11550. + kfree(macsec_priv[j]);
  11551. + return err;
  11552. +}
  11553. +
  11554. +static int enable_macsec(struct generic_msg *gen)
  11555. +{
  11556. + struct fm_macsec_params macsec_params;
  11557. + int rv, macsec_id;
  11558. + void __iomem *mac_dev_base_addr;
  11559. + uintptr_t macsec_reg_addr;
  11560. + struct macsec_data *mdata;
  11561. + char if_name[IFNAMSIZ];
  11562. + struct macsec_priv_s *selected_macsec_priv;
  11563. +
  11564. + mdata = &gen->payload.en_macsec;
  11565. +
  11566. + if (unlikely(mdata->if_name_length > IFNAMSIZ)) {
  11567. + pr_err("interface name too long\n");
  11568. + return -EINVAL;
  11569. + }
  11570. +
  11571. + rv = copy_from_user(if_name, mdata->if_name, mdata->if_name_length);
  11572. + if (unlikely(rv != 0)) {
  11573. + pr_err("copy_from_user could not copy %i bytes\n", rv);
  11574. + return -EFAULT;
  11575. + }
  11576. +
  11577. + macsec_id = ifname_to_id(if_name);
  11578. + if (macsec_id < 0 || macsec_id >= FM_MAX_NUM_OF_MACS) {
  11579. + pr_err("error on converting to macsec_id\n");
  11580. + return -ENXIO;
  11581. + }
  11582. +
  11583. + selected_macsec_priv = macsec_priv[macsec_id];
  11584. +
  11585. + if (selected_macsec_priv->fm_macsec) {
  11586. + pr_err("macsec has already been configured\n");
  11587. + return -EINVAL;
  11588. + }
  11589. +
  11590. + mac_dev_base_addr = selected_macsec_priv->mac_dev->vaddr;
  11591. +
  11592. + macsec_reg_addr = (uintptr_t)(mac_dev_base_addr + MACSEC_REG_OFFSET);
  11593. +
  11594. + memset(&macsec_params, 0, sizeof(macsec_params));
  11595. + macsec_params.fm_h = (handle_t)selected_macsec_priv->mac_dev->fm;
  11596. + macsec_params.guest_mode = FALSE;
  11597. + /* The MACsec offset relative to the memory mapped MAC device */
  11598. + macsec_params.non_guest_params.base_addr = macsec_reg_addr;
  11599. + macsec_params.non_guest_params.fm_mac_h =
  11600. + (handle_t)selected_macsec_priv->mac_dev->get_mac_handle(
  11601. + selected_macsec_priv->mac_dev);
  11602. + macsec_params.non_guest_params.exception_f = macsec_exception;
  11603. + macsec_params.non_guest_params.app_h = selected_macsec_priv->mac_dev;
  11604. +
  11605. + selected_macsec_priv->fm_macsec = fm_macsec_config(&macsec_params);
  11606. + if (unlikely(selected_macsec_priv->fm_macsec == NULL))
  11607. + return -EINVAL;
  11608. +
  11609. + if (mdata->config_unknown_sci_treatment) {
  11610. + rv = fm_macsec_config_unknown_sci_frame_treatment(
  11611. + selected_macsec_priv->fm_macsec,
  11612. + mdata->unknown_sci_treatment);
  11613. + if (unlikely(rv < 0))
  11614. + goto _return_fm_macsec_free;
  11615. + }
  11616. +
  11617. + if (mdata->config_invalid_tag_treatment) {
  11618. + rv = fm_macsec_config_invalid_tags_frame_treatment(
  11619. + selected_macsec_priv->fm_macsec,
  11620. + mdata->deliver_uncontrolled);
  11621. + if (unlikely(rv < 0))
  11622. + goto _return_fm_macsec_free;
  11623. + }
  11624. +
  11625. + if (mdata->config_kay_frame_treatment) {
  11626. + rv = fm_macsec_config_kay_frame_treatment(
  11627. + selected_macsec_priv->fm_macsec,
  11628. + mdata->discard_uncontrolled);
  11629. + if (unlikely(rv < 0))
  11630. + goto _return_fm_macsec_free;
  11631. + }
  11632. +
  11633. + if (mdata->config_untag_treatment) {
  11634. + rv = fm_macsec_config_untag_frame_treatment(
  11635. + selected_macsec_priv->fm_macsec,
  11636. + mdata->untag_treatment);
  11637. + if (unlikely(rv < 0))
  11638. + goto _return_fm_macsec_free;
  11639. + }
  11640. +
  11641. + if (mdata->config_pn_exhaustion_threshold) {
  11642. + rv = fm_macsec_config_pn_exhaustion_threshold(
  11643. + selected_macsec_priv->fm_macsec,
  11644. + mdata->pn_threshold);
  11645. + if (unlikely(rv < 0))
  11646. + goto _return_fm_macsec_free;
  11647. + }
  11648. +
  11649. + if (mdata->config_keys_unreadable) {
  11650. + rv = fm_macsec_config_keys_unreadable(
  11651. + selected_macsec_priv->fm_macsec);
  11652. + if (unlikely(rv < 0))
  11653. + goto _return_fm_macsec_free;
  11654. + }
  11655. +
  11656. + if (mdata->config_sectag_without_sci) {
  11657. + rv = fm_macsec_config_sectag_without_sci(
  11658. + selected_macsec_priv->fm_macsec);
  11659. + if (unlikely(rv < 0))
  11660. + goto _return_fm_macsec_free;
  11661. + }
  11662. +
  11663. + if (mdata->config_exception) {
  11664. + rv = fm_macsec_config_exception(selected_macsec_priv->fm_macsec,
  11665. + mdata->exception,
  11666. + mdata->enable_exception);
  11667. + if (unlikely(rv < 0))
  11668. + goto _return_fm_macsec_free;
  11669. + }
  11670. +
  11671. + rv = fm_macsec_init(selected_macsec_priv->fm_macsec);
  11672. + if (unlikely(rv < 0))
  11673. + goto _return_fm_macsec_free;
  11674. +
  11675. + rv = fm_macsec_enable(selected_macsec_priv->fm_macsec);
  11676. + if (unlikely(rv < 0))
  11677. + goto _return_fm_macsec_free;
  11678. +
  11679. + return macsec_id;
  11680. +
  11681. +_return_fm_macsec_free:
  11682. + fm_macsec_free(selected_macsec_priv->fm_macsec);
  11683. + selected_macsec_priv->fm_macsec = NULL;
  11684. + return rv;
  11685. +}
  11686. +
  11687. +static int send_result(struct nlmsghdr *nlh, int pid, int result)
  11688. +{
  11689. + int res;
  11690. + struct sk_buff *skb_out;
  11691. + size_t msg_size = sizeof(result);
  11692. +
  11693. + skb_out = nlmsg_new(msg_size, 0);
  11694. + if (unlikely(!skb_out)) {
  11695. + pr_err("Failed to allocate new skb\n");
  11696. + goto _ret_err;
  11697. + }
  11698. +
  11699. + nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
  11700. + if (unlikely(!nlh)) {
  11701. + pr_err("Failed to send\n");
  11702. + goto _ret_err;
  11703. + }
  11704. +
  11705. + NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
  11706. + memcpy(nlmsg_data(nlh), &result, msg_size);
  11707. +
  11708. + res = nlmsg_unicast(nl_sk, skb_out, pid);
  11709. + if (unlikely(res < 0)) {
  11710. + pr_err("Error while sending back to user\n");
  11711. + goto _ret_err;
  11712. + }
  11713. +
  11714. + return 0;
  11715. +
  11716. +_ret_err:
  11717. + return -1;
  11718. +}
  11719. +
  11720. +/* Kernel communicates with user space through netlink sockets. This function
  11721. + * implements the responses of the kernel. The generic struct is used for
  11722. + * easier handling of the code, which otherwise would have been duplicated.
  11723. + */
  11724. +static void switch_messages(struct sk_buff *skb)
  11725. +{
  11726. + struct nlmsghdr *nlh;
  11727. + int pid, rv;
  11728. + enum msg_type cmd;
  11729. +
  11730. + struct dpa_fq *dpa_fq, *tmp;
  11731. + struct device *dev;
  11732. +
  11733. + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
  11734. +
  11735. + struct generic_msg *check;
  11736. + int macsec_id = 0;
  11737. + uint32_t sc_id, macsec_revision;
  11738. + macsec_an_t ret_an;
  11739. + int i;
  11740. +
  11741. + pr_debug("Entering: %s\n", __func__);
  11742. +
  11743. + if (unlikely(!skb)) {
  11744. + pr_err("skb null\n");
  11745. + return;
  11746. + }
  11747. +
  11748. + nlh = (struct nlmsghdr *)skb->data;
  11749. + check = kmalloc(sizeof(*check), GFP_KERNEL);
  11750. + memcpy(check, nlmsg_data(nlh), sizeof(*check));
  11751. + pid = nlh->nlmsg_pid; /*pid of sending process */
  11752. + cmd = check->chf;
  11753. +
  11754. + switch (cmd) {
  11755. + case ENABLE_MACSEC:
  11756. + pr_debug("ENABLE_MACSEC\n");
  11757. +
  11758. + macsec_id = enable_macsec(check);
  11759. +
  11760. + if (macsec_id >= 0)
  11761. + macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
  11762. +
  11763. + rv = send_result(nlh, pid, (macsec_id < 0) ? NACK : macsec_id);
  11764. + if (unlikely(rv < 0))
  11765. + goto _release;
  11766. +
  11767. + break;
  11768. +
  11769. + case SET_EXCEPTION:
  11770. + pr_debug("SET_EXCEPTION\n");
  11771. +
  11772. + rv = set_macsec_exception(check);
  11773. +
  11774. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11775. + if (unlikely(rv < 0))
  11776. + goto _release;
  11777. +
  11778. + break;
  11779. +
  11780. + case ENABLE_SECY:
  11781. + pr_debug("ENABLE_SECY\n");
  11782. +
  11783. + rv = enable_secy(check, &macsec_id);
  11784. +
  11785. + if (rv == 0)
  11786. + macsec_priv[macsec_id]->en_state = SECY_ENABLED;
  11787. +
  11788. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11789. + if (unlikely(rv < 0))
  11790. + goto _release;
  11791. +
  11792. + break;
  11793. +
  11794. + case GET_REVISION:
  11795. + pr_debug("GET_REVISION\n");
  11796. +
  11797. + rv = get_macsec_revision(check, &macsec_revision);
  11798. +
  11799. + rv = send_result(nlh, pid,
  11800. + (rv < 0) ? NACK : (int)macsec_revision);
  11801. + if (unlikely(rv < 0))
  11802. + goto _release;
  11803. +
  11804. + break;
  11805. +
  11806. + case GET_TXSC_PHYS_ID:
  11807. + pr_debug("GET_TXSC_PHYS_ID\n");
  11808. +
  11809. + rv = get_tx_sc_phys_id(check, &sc_id);
  11810. +
  11811. + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
  11812. + if (unlikely(rv < 0))
  11813. + goto _release;
  11814. +
  11815. + break;
  11816. +
  11817. + case TX_SA_CREATE:
  11818. + pr_debug("TX_SA_CREATE\n");
  11819. +
  11820. + rv = create_tx_sa(check);
  11821. +
  11822. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11823. + if (unlikely(rv < 0))
  11824. + goto _release;
  11825. +
  11826. + break;
  11827. +
  11828. + case MODIFY_TXSA_KEY:
  11829. + pr_debug("MODIFY_TXSA_KEY\n");
  11830. +
  11831. + rv = modify_tx_sa_key(check);
  11832. +
  11833. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11834. + if (unlikely(rv < 0))
  11835. + goto _release;
  11836. +
  11837. + break;
  11838. +
  11839. + case TX_SA_ACTIVATE:
  11840. + pr_debug("TX_SA_ACTIVATE\n");
  11841. +
  11842. + rv = activate_tx_sa(check);
  11843. +
  11844. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11845. + if (unlikely(rv < 0))
  11846. + goto _release;
  11847. +
  11848. + break;
  11849. +
  11850. + case GET_TXSA_AN:
  11851. + pr_debug("GET_TXSA_AN\n");
  11852. +
  11853. + rv = get_tx_sa_an(check, &ret_an);
  11854. +
  11855. + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)ret_an);
  11856. + if (unlikely(rv < 0))
  11857. + goto _release;
  11858. +
  11859. + break;
  11860. +
  11861. + case RX_SC_CREATE:
  11862. + pr_debug("RX_SC_CREATE\n");
  11863. +
  11864. + sc_id = create_rx_sc(check);
  11865. +
  11866. + rv = send_result(nlh, pid, (sc_id < 0) ? NACK : (int)sc_id);
  11867. + if (unlikely(rv < 0))
  11868. + goto _release;
  11869. +
  11870. + break;
  11871. +
  11872. + case GET_RXSC_PHYS_ID:
  11873. + pr_debug("GET_RXSC_PHYS_ID\n");
  11874. +
  11875. + rv = get_rx_sc_phys_id(check, &sc_id);
  11876. +
  11877. + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
  11878. + if (unlikely(rv < 0))
  11879. + goto _release;
  11880. +
  11881. + break;
  11882. +
  11883. + case RX_SA_CREATE:
  11884. + pr_debug("RX_SA_CREATE\n");
  11885. +
  11886. + rv = create_rx_sa(check);
  11887. +
  11888. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11889. + if (unlikely(rv < 0))
  11890. + goto _release;
  11891. +
  11892. + break;
  11893. +
  11894. + case MODIFY_RXSA_KEY:
  11895. + pr_debug("MODIFY_RXSA_KEY\n");
  11896. +
  11897. + rv = modify_rx_sa_key(check);
  11898. +
  11899. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11900. + if (unlikely(rv < 0))
  11901. + goto _release;
  11902. +
  11903. + break;
  11904. +
  11905. + case UPDATE_NPN:
  11906. + pr_debug("UPDATE_NPN\n");
  11907. +
  11908. + rv = update_npn(check);
  11909. +
  11910. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11911. + if (unlikely(rv < 0))
  11912. + goto _release;
  11913. +
  11914. + break;
  11915. +
  11916. + case UPDATE_LPN:
  11917. + pr_debug("UPDATE_LPN\n");
  11918. +
  11919. + rv = update_lpn(check);
  11920. +
  11921. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11922. + if (unlikely(rv < 0))
  11923. + goto _release;
  11924. +
  11925. + break;
  11926. +
  11927. + case RX_SA_ACTIVATE:
  11928. + pr_debug("RX_SA_ACTIVATE\n");
  11929. +
  11930. + rv = activate_rx_sa(check);
  11931. +
  11932. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11933. + if (unlikely(rv < 0))
  11934. + goto _release;
  11935. +
  11936. + break;
  11937. +
  11938. + case RX_SA_DISABLE:
  11939. + pr_debug("RX_SA_DISABLE\n");
  11940. +
  11941. + rv = rx_sa_disable(check);
  11942. +
  11943. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11944. + if (unlikely(rv < 0))
  11945. + goto _release;
  11946. +
  11947. + break;
  11948. +
  11949. + case RX_SA_DELETE:
  11950. + pr_debug("RX_SA_DELETE\n");
  11951. +
  11952. + rv = rx_sa_delete(check);
  11953. +
  11954. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11955. + if (unlikely(rv < 0))
  11956. + goto _release;
  11957. +
  11958. + break;
  11959. +
  11960. + case RX_SC_DELETE:
  11961. + pr_debug("RX_SC_DELETE\n");
  11962. +
  11963. + rv = rx_sc_delete(check);
  11964. +
  11965. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11966. + if (unlikely(rv < 0))
  11967. + goto _release;
  11968. +
  11969. + break;
  11970. +
  11971. + case TX_SA_DELETE:
  11972. + pr_debug("TX_SA_DELETE\n");
  11973. +
  11974. + rv = tx_sa_delete(check);
  11975. +
  11976. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11977. + if (unlikely(rv < 0))
  11978. + goto _release;
  11979. +
  11980. + break;
  11981. +
  11982. + case DISABLE_SECY:
  11983. + pr_debug("DISABLE_SECY\n");
  11984. +
  11985. + rv = disable_secy(check, &macsec_id);
  11986. +
  11987. + if (unlikely(rv < 0))
  11988. + macsec_priv[macsec_id]->en_state = SECY_ENABLED;
  11989. + else
  11990. + macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
  11991. +
  11992. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  11993. + if (unlikely(rv < 0))
  11994. + goto _release;
  11995. +
  11996. + break;
  11997. +
  11998. + case DISABLE_MACSEC:
  11999. + pr_debug("DISABLE_MACSEC\n");
  12000. +
  12001. + rv = disable_macsec(check, &macsec_id);
  12002. +
  12003. + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
  12004. +
  12005. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  12006. + if (unlikely(rv < 0))
  12007. + goto _release;
  12008. +
  12009. + break;
  12010. +
  12011. + case DISABLE_ALL:
  12012. + pr_debug("DISABLE_ALL\n");
  12013. +
  12014. + rv = disable_all(check, &macsec_id);
  12015. +
  12016. + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
  12017. +
  12018. + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
  12019. + if (unlikely(rv < 0))
  12020. + goto _release;
  12021. + break;
  12022. +
  12023. + default:
  12024. + /* should never get here */
  12025. + pr_err("not a state\n");
  12026. + break;
  12027. + }
  12028. +
  12029. + return;
  12030. +
  12031. +_release:
  12032. + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++)
  12033. + deinit_macsec(i);
  12034. +
  12035. + /* Reset the TX hooks */
  12036. + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
  12037. + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
  12038. +
  12039. + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
  12040. +
  12041. + if (!macsec_priv[i]->net_dev)
  12042. + continue;
  12043. +
  12044. + free_percpu(macsec_priv[i]->percpu_priv);
  12045. +
  12046. + /* Delete the fman queues */
  12047. + list_for_each_entry_safe(dpa_fq,
  12048. + tmp,
  12049. + &macsec_priv[i]->dpa_fq_list,
  12050. + list) {
  12051. + dev = dpa_fq->net_dev->dev.parent;
  12052. + rv = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
  12053. + if (unlikely(rv < 0))
  12054. + pr_err("_dpa_fq_fre=%d\n", rv);
  12055. + }
  12056. +
  12057. + macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
  12058. + kfree(macsec_priv[i]);
  12059. + macsec_priv[i] = NULL;
  12060. + }
  12061. +
  12062. + kfree(check);
  12063. +
  12064. + netlink_kernel_release(nl_sk);
  12065. +}
  12066. +
  12067. +struct netlink_kernel_cfg ms_cfg = {
  12068. + .groups = 1,
  12069. + .input = switch_messages,
  12070. +};
  12071. +
  12072. +static int __init macsec_init(void)
  12073. +{
  12074. + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
  12075. + int ret, i;
  12076. +
  12077. + pr_debug("Entering: %s\n", __func__);
  12078. +
  12079. + /* If there is no interface we want macsec on, just exit. */
  12080. + parse_ifs();
  12081. + for (i = 0; i < macsec_ifs_cnt; i++) {
  12082. + if (!macsec_ifs[i]) {
  12083. + pr_err("Interface unknown\n");
  12084. + return -EINVAL;
  12085. + }
  12086. + }
  12087. +
  12088. + /* Actually send the info to the user through a given socket. */
  12089. + nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &ms_cfg);
  12090. + if (unlikely(!nl_sk)) {
  12091. + pr_err("Error creating socket.\n");
  12092. + ret = -ENOMEM;
  12093. + goto _release;
  12094. + }
  12095. +
  12096. + ret = macsec_setup();
  12097. + if (unlikely(ret != 0)) {
  12098. + pr_err("Setup of macsec failed\n");
  12099. + goto _release;
  12100. + }
  12101. +
  12102. + /* set dpaa hooks for default queues */
  12103. + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
  12104. + macsec_dpaa_eth_hooks.tx = (dpaa_eth_egress_hook_t)(macsec_tx_hook);
  12105. + macsec_dpaa_eth_hooks.rx_default =
  12106. + (dpaa_eth_ingress_hook_t)(macsec_rx_hook);
  12107. +
  12108. + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
  12109. +
  12110. + return 0;
  12111. +
  12112. +_release:
  12113. + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
  12114. + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
  12115. + netlink_kernel_release(nl_sk);
  12116. + return ret;
  12117. +}
  12118. +
  12119. +static void __exit macsec_exit(void)
  12120. +{
  12121. + int _errno;
  12122. + struct dpa_fq *dpa_fq, *tmp;
  12123. + struct device *dev;
  12124. + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
  12125. + int i;
  12126. +
  12127. + pr_debug("exiting macsec module\n");
  12128. +
  12129. + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
  12130. + /* release has already been done, due to errors,
  12131. + * in switch_messages we will return to exit the module properly
  12132. + */
  12133. + if (!macsec_priv[i]->net_dev) {
  12134. + pr_debug("no release needed\n");
  12135. + continue;
  12136. + }
  12137. + deinit_macsec(i);
  12138. + }
  12139. +
  12140. + /* Reset the TX hooks before exiting */
  12141. + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
  12142. + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
  12143. +
  12144. + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
  12145. +
  12146. + if (!macsec_priv[i]->net_dev) {
  12147. + pr_debug("no release needed\n");
  12148. + continue;
  12149. + }
  12150. +
  12151. + free_percpu(macsec_priv[i]->percpu_priv);
  12152. +
  12153. + /* Delete the fman queues */
  12154. + list_for_each_entry_safe(dpa_fq, tmp,
  12155. + &macsec_priv[i]->dpa_fq_list, list) {
  12156. + if (dpa_fq) {
  12157. + dev = dpa_fq->net_dev->dev.parent;
  12158. + _errno = _dpa_fq_free(dev,
  12159. + (struct qman_fq *)dpa_fq);
  12160. + if (unlikely(_errno < 0))
  12161. + pr_err("_dpa_fq_fre=%d\n", _errno);
  12162. + }
  12163. + }
  12164. +
  12165. + /* restore ethtool ops to the previous private ones */
  12166. + macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
  12167. +
  12168. + kfree(macsec_priv[i]);
  12169. + }
  12170. +
  12171. + netlink_kernel_release(nl_sk);
  12172. +
  12173. + pr_debug("exited macsec module\n");
  12174. +}
  12175. +
  12176. +module_init(macsec_init);
  12177. +module_exit(macsec_exit);
  12178. +
  12179. +MODULE_LICENSE("Dual BSD/GPL");
  12180. --- /dev/null
  12181. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
  12182. @@ -0,0 +1,294 @@
  12183. +/* Copyright 2015 Freescale Semiconductor Inc.
  12184. + *
  12185. + * Redistribution and use in source and binary forms, with or without
  12186. + * modification, are permitted provided that the following conditions are met:
  12187. + * * Redistributions of source code must retain the above copyright
  12188. + * notice, this list of conditions and the following disclaimer.
  12189. + * * Redistributions in binary form must reproduce the above copyright
  12190. + * notice, this list of conditions and the following disclaimer in the
  12191. + * documentation and/or other materials provided with the distribution.
  12192. + * * Neither the name of Freescale Semiconductor nor the
  12193. + * names of its contributors may be used to endorse or promote products
  12194. + * derived from this software without specific prior written permission.
  12195. + *
  12196. + *
  12197. + * ALTERNATIVELY, this software may be distributed under the terms of the
  12198. + * GNU General Public License ("GPL") as published by the Free Software
  12199. + * Foundation, either version 2 of that License or (at your option) any
  12200. + * later version.
  12201. + *
  12202. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  12203. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  12204. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  12205. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  12206. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  12207. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  12208. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  12209. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  12210. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  12211. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  12212. + */
  12213. +
  12214. +#ifndef __DPAA_ETH_MACSEC_H
  12215. +#define __DPAA_ETH_MACSEC_H
  12216. +
  12217. +#include "mac.h"
  12218. +
  12219. +#define NETLINK_USER 31
  12220. +#define MAX_NUM_OF_SECY 1
  12221. +#define MAX_LEN 100
  12222. +#define FM_FD_STAT_RX_MACSEC 0x00800000
  12223. +#define MACSEC_ETH_TX_QUEUES NR_CPUS
  12224. +#define MACSEC_REG_OFFSET 0x800
  12225. +#define ACK 0
  12226. +#define NACK -1
  12227. +
  12228. +extern const struct dpa_fq_cbs_t private_fq_cbs;
  12229. +
  12230. +extern int dpa_macsec_get_sset_count(struct net_device *net_dev, int type);
  12231. +extern void
  12232. +dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
  12233. + struct ethtool_stats *stats, u64 *data);
  12234. +extern void
  12235. +dpa_macsec_get_strings(struct net_device *net_dev,
  12236. + u32 stringset, u8 *data);
  12237. +
  12238. +enum msg_type {ENABLE_MACSEC,
  12239. + SET_EXCEPTION,
  12240. + ENABLE_SECY,
  12241. + TX_SA_CREATE,
  12242. + TX_SA_ACTIVATE,
  12243. + RX_SC_CREATE,
  12244. + RX_SA_CREATE,
  12245. + RX_SA_ACTIVATE,
  12246. + RX_SA_DISABLE,
  12247. + RX_SA_DELETE,
  12248. + RX_SC_DELETE,
  12249. + TX_SA_DELETE,
  12250. + DISABLE_MACSEC,
  12251. + DISABLE_SECY,
  12252. + DISABLE_ALL,
  12253. + GET_REVISION,
  12254. + UPDATE_NPN,
  12255. + UPDATE_LPN,
  12256. + GET_TXSC_PHYS_ID,
  12257. + GET_RXSC_PHYS_ID,
  12258. + GET_TXSA_AN,
  12259. + MODIFY_TXSA_KEY,
  12260. + MODIFY_RXSA_KEY,
  12261. +};
  12262. +
  12263. +enum macsec_enablement {MACSEC_DISABLED, MACSEC_ENABLED, SECY_ENABLED};
  12264. +
  12265. +struct enable_secy {
  12266. + int macsec_id;
  12267. +
  12268. + u64 sci; /* MAC address(48b) + port_id(16b) */
  12269. +
  12270. + bool config_insertion_mode;
  12271. + fm_macsec_sci_insertion_mode sci_insertion_mode;
  12272. +
  12273. + bool config_protect_frames;
  12274. + bool protect_frames;
  12275. +
  12276. + bool config_replay_window;
  12277. + bool replay_protect;
  12278. + uint32_t replay_window;
  12279. +
  12280. + bool config_validation_mode;
  12281. + fm_macsec_valid_frame_behavior validate_frames;
  12282. +
  12283. + bool config_confidentiality;
  12284. + bool confidentiality_enable;
  12285. + uint32_t confidentiality_offset;
  12286. +
  12287. + bool config_point_to_point;
  12288. +
  12289. + bool config_exception;
  12290. + bool enable_exception;
  12291. + fm_macsec_secy_exception exception;
  12292. +
  12293. + bool config_event;
  12294. + bool enable_event;
  12295. + fm_macsec_secy_event event;
  12296. +};
  12297. +
  12298. +struct macsec_data {
  12299. + char *if_name;
  12300. + size_t if_name_length; /* including string terminator */
  12301. +
  12302. + bool config_unknown_sci_treatment;
  12303. + fm_macsec_unknown_sci_frame_treatment unknown_sci_treatment;
  12304. +
  12305. + bool config_invalid_tag_treatment;
  12306. + bool deliver_uncontrolled;
  12307. +
  12308. + bool config_kay_frame_treatment;
  12309. + bool discard_uncontrolled;
  12310. +
  12311. + bool config_untag_treatment;
  12312. + fm_macsec_untag_frame_treatment untag_treatment;
  12313. +
  12314. + bool config_pn_exhaustion_threshold;
  12315. + uint32_t pn_threshold;
  12316. +
  12317. + bool config_keys_unreadable;
  12318. +
  12319. + bool config_sectag_without_sci;
  12320. +
  12321. + bool config_exception;
  12322. + bool enable_exception;
  12323. + fm_macsec_exception exception;
  12324. +};
  12325. +
  12326. +struct set_exception {
  12327. + int macsec_id;
  12328. + bool enable_exception;
  12329. + fm_macsec_exception exception;
  12330. +};
  12331. +
  12332. +struct create_tx_sa {
  12333. + int macsec_id;
  12334. + u8 an; /* association number */
  12335. + u8 *sak; /* secure assoc key */
  12336. + u32 sak_len; /* assoc key length */
  12337. +};
  12338. +
  12339. +struct modify_tx_sa_key {
  12340. + int macsec_id;
  12341. + u8 an; /* association number */
  12342. + u8 *sak; /* secure assoc key */
  12343. + u32 sak_len; /* assoc key length */
  12344. +};
  12345. +
  12346. +struct activate_tx_sa {
  12347. + int macsec_id;
  12348. + u8 an; /* association number */
  12349. +};
  12350. +
  12351. +struct create_rx_sc {
  12352. + int macsec_id;
  12353. + u64 sci;
  12354. +};
  12355. +
  12356. +struct delete_rx_sc {
  12357. + int macsec_id;
  12358. + u32 rx_sc_id;
  12359. +};
  12360. +
  12361. +struct get_rx_sc_id {
  12362. + int macsec_id;
  12363. + u32 rx_sc_id;
  12364. +};
  12365. +
  12366. +struct create_rx_sa {
  12367. + int macsec_id;
  12368. + u32 rx_sc_id;
  12369. + u8 an;
  12370. + u32 lpn;
  12371. + u8 *sak;
  12372. + u32 sak_len;
  12373. +};
  12374. +
  12375. +struct activate_rx_sa {
  12376. + int macsec_id;
  12377. + u32 rx_sc_id;
  12378. + u8 an;
  12379. +};
  12380. +
  12381. +struct disable_rx_sa {
  12382. + int macsec_id;
  12383. + u32 rx_sc_id;
  12384. + u8 an;
  12385. +};
  12386. +
  12387. +struct delete_rx_sa {
  12388. + int macsec_id;
  12389. + u32 rx_sc_id;
  12390. + u8 an;
  12391. +};
  12392. +
  12393. +struct delete_tx_sa {
  12394. + int macsec_id;
  12395. + u32 rx_sc_id;
  12396. + u8 an;
  12397. +};
  12398. +
  12399. +struct update_npn {
  12400. + int macsec_id;
  12401. + u32 rx_sc_id;
  12402. + u8 an;
  12403. + u32 pn;
  12404. +};
  12405. +
  12406. +struct update_lpn {
  12407. + int macsec_id;
  12408. + u32 rx_sc_id;
  12409. + u8 an;
  12410. + u32 pn;
  12411. +};
  12412. +
  12413. +struct modify_rx_sa_key {
  12414. + int macsec_id;
  12415. + u32 rx_sc_id;
  12416. + u8 an;
  12417. + u8 *sak;
  12418. + u32 sak_len;
  12419. +};
  12420. +
  12421. +struct generic_msg {
  12422. + enum msg_type chf;
  12423. + union {
  12424. + int macsec_id;
  12425. + struct macsec_data en_macsec;
  12426. + struct enable_secy secy;
  12427. + struct create_tx_sa c_tx_sa;
  12428. + struct activate_tx_sa a_tx_sa;
  12429. + struct create_rx_sc c_rx_sc;
  12430. + struct get_rx_sc_id get_rx_sc_id;
  12431. + struct create_rx_sa c_rx_sa;
  12432. + struct activate_rx_sa a_rx_sa;
  12433. + struct disable_rx_sa d_rx_sa;
  12434. + struct delete_rx_sa del_rx_sa;
  12435. + struct delete_rx_sc del_rx_sc;
  12436. + struct delete_tx_sa del_tx_sa;
  12437. + struct update_npn update_npn;
  12438. + struct update_lpn update_lpn;
  12439. + struct modify_tx_sa_key modify_tx_sa_key;
  12440. + struct modify_rx_sa_key modify_rx_sa_key;
  12441. + struct set_exception set_ex;
  12442. + } payload;
  12443. +};
  12444. +
  12445. +struct macsec_percpu_priv_s {
  12446. + u64 rx_macsec;
  12447. + u64 tx_macsec;
  12448. +};
  12449. +
  12450. +struct macsec_priv_s {
  12451. + struct macsec_percpu_priv_s __percpu *percpu_priv;
  12452. +
  12453. + struct net_device *net_dev;
  12454. + struct mac_device *mac_dev;
  12455. +
  12456. + struct qman_fq *egress_fqs[MACSEC_ETH_TX_QUEUES];
  12457. + struct qman_fq *conf_fqs[MACSEC_ETH_TX_QUEUES];
  12458. + struct list_head dpa_fq_list;
  12459. + uint32_t msg_enable; /* net_device message level */
  12460. + uint16_t channel;
  12461. + struct fm_macsec_dev *fm_macsec;
  12462. +
  12463. + struct fm_macsec_secy_dev *fm_ms_secy;
  12464. + uint8_t an;
  12465. +
  12466. + struct rx_sc_dev *rx_sc_dev[NUM_OF_RX_SC];
  12467. + uint8_t *sa_key;
  12468. + enum macsec_enablement en_state;
  12469. +
  12470. + uintptr_t vaddr;
  12471. + struct resource *fman_resource;
  12472. +};
  12473. +
  12474. +struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev);
  12475. +
  12476. +#endif /* __DPAA_ETH_MACSEC_H */
  12477. --- /dev/null
  12478. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
  12479. @@ -0,0 +1,381 @@
  12480. +/* Copyright 2008-2013 Freescale Semiconductor Inc.
  12481. + *
  12482. + * Redistribution and use in source and binary forms, with or without
  12483. + * modification, are permitted provided that the following conditions are met:
  12484. + * * Redistributions of source code must retain the above copyright
  12485. + * notice, this list of conditions and the following disclaimer.
  12486. + * * Redistributions in binary form must reproduce the above copyright
  12487. + * notice, this list of conditions and the following disclaimer in the
  12488. + * documentation and/or other materials provided with the distribution.
  12489. + * * Neither the name of Freescale Semiconductor nor the
  12490. + * names of its contributors may be used to endorse or promote products
  12491. + * derived from this software without specific prior written permission.
  12492. + *
  12493. + *
  12494. + * ALTERNATIVELY, this software may be distributed under the terms of the
  12495. + * GNU General Public License ("GPL") as published by the Free Software
  12496. + * Foundation, either version 2 of that License or (at your option) any
  12497. + * later version.
  12498. + *
  12499. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  12500. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  12501. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  12502. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  12503. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  12504. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  12505. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  12506. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  12507. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  12508. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  12509. + */
  12510. +
  12511. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  12512. +#define pr_fmt(fmt) \
  12513. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  12514. + KBUILD_BASENAME".c", __LINE__, __func__
  12515. +#else
  12516. +#define pr_fmt(fmt) \
  12517. + KBUILD_MODNAME ": " fmt
  12518. +#endif
  12519. +
  12520. +#include <linux/init.h>
  12521. +#include <linux/module.h>
  12522. +#include <linux/of_platform.h>
  12523. +#include "dpaa_eth.h"
  12524. +#include "dpaa_eth_common.h"
  12525. +#include "dpaa_eth_base.h"
  12526. +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
  12527. +#include "mac.h"
  12528. +
  12529. +#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
  12530. +
  12531. +MODULE_LICENSE("Dual BSD/GPL");
  12532. +
  12533. +MODULE_DESCRIPTION(DPA_DESCRIPTION);
  12534. +
  12535. +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
  12536. +#ifdef CONFIG_PM
  12537. +
  12538. +static int proxy_suspend(struct device *dev)
  12539. +{
  12540. + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
  12541. + struct mac_device *mac_dev = proxy_dev->mac_dev;
  12542. + int err = 0;
  12543. +
  12544. + err = fm_port_suspend(mac_dev->port_dev[RX]);
  12545. + if (err)
  12546. + goto port_suspend_failed;
  12547. +
  12548. + err = fm_port_suspend(mac_dev->port_dev[TX]);
  12549. + if (err)
  12550. + err = fm_port_resume(mac_dev->port_dev[RX]);
  12551. +
  12552. +port_suspend_failed:
  12553. + return err;
  12554. +}
  12555. +
  12556. +static int proxy_resume(struct device *dev)
  12557. +{
  12558. + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
  12559. + struct mac_device *mac_dev = proxy_dev->mac_dev;
  12560. + int err = 0;
  12561. +
  12562. + err = fm_port_resume(mac_dev->port_dev[TX]);
  12563. + if (err)
  12564. + goto port_resume_failed;
  12565. +
  12566. + err = fm_port_resume(mac_dev->port_dev[RX]);
  12567. + if (err)
  12568. + err = fm_port_suspend(mac_dev->port_dev[TX]);
  12569. +
  12570. +port_resume_failed:
  12571. + return err;
  12572. +}
  12573. +
  12574. +static const struct dev_pm_ops proxy_pm_ops = {
  12575. + .suspend = proxy_suspend,
  12576. + .resume = proxy_resume,
  12577. +};
  12578. +
  12579. +#define PROXY_PM_OPS (&proxy_pm_ops)
  12580. +
  12581. +#else /* CONFIG_PM */
  12582. +
  12583. +#define PROXY_PM_OPS NULL
  12584. +
  12585. +#endif /* CONFIG_PM */
  12586. +
  12587. +static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
  12588. +{
  12589. + int err = 0, i;
  12590. + struct device *dev;
  12591. + struct device_node *dpa_node;
  12592. + struct dpa_bp *dpa_bp;
  12593. + struct list_head proxy_fq_list;
  12594. + size_t count;
  12595. + struct fm_port_fqs port_fqs;
  12596. + struct dpa_buffer_layout_s *buf_layout = NULL;
  12597. + struct mac_device *mac_dev;
  12598. + struct proxy_device *proxy_dev;
  12599. +
  12600. + dev = &_of_dev->dev;
  12601. +
  12602. + dpa_node = dev->of_node;
  12603. +
  12604. + if (!of_device_is_available(dpa_node))
  12605. + return -ENODEV;
  12606. +
  12607. + /* Get the buffer pools assigned to this interface */
  12608. + dpa_bp = dpa_bp_probe(_of_dev, &count);
  12609. + if (IS_ERR(dpa_bp))
  12610. + return PTR_ERR(dpa_bp);
  12611. +
  12612. + mac_dev = dpa_mac_probe(_of_dev);
  12613. + if (IS_ERR(mac_dev))
  12614. + return PTR_ERR(mac_dev);
  12615. +
  12616. + proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
  12617. + if (!proxy_dev) {
  12618. + dev_err(dev, "devm_kzalloc() failed\n");
  12619. + return -ENOMEM;
  12620. + }
  12621. +
  12622. + proxy_dev->mac_dev = mac_dev;
  12623. + dev_set_drvdata(dev, proxy_dev);
  12624. +
  12625. + /* We have physical ports, so we need to establish
  12626. + * the buffer layout.
  12627. + */
  12628. + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
  12629. + GFP_KERNEL);
  12630. + if (!buf_layout) {
  12631. + dev_err(dev, "devm_kzalloc() failed\n");
  12632. + return -ENOMEM;
  12633. + }
  12634. + dpa_set_buffers_layout(mac_dev, buf_layout);
  12635. +
  12636. + INIT_LIST_HEAD(&proxy_fq_list);
  12637. +
  12638. + memset(&port_fqs, 0, sizeof(port_fqs));
  12639. +
  12640. + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
  12641. + if (!err)
  12642. + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
  12643. + TX);
  12644. + if (err < 0) {
  12645. + devm_kfree(dev, buf_layout);
  12646. + return err;
  12647. + }
  12648. +
  12649. + /* Proxy initializer - Just configures the MAC on behalf of
  12650. + * another partition.
  12651. + */
  12652. + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
  12653. + buf_layout, dev);
  12654. +
  12655. + /* Proxy interfaces need to be started, and the allocated
  12656. + * memory freed
  12657. + */
  12658. + devm_kfree(dev, buf_layout);
  12659. + devm_kfree(dev, dpa_bp);
  12660. +
  12661. + /* Free FQ structures */
  12662. + devm_kfree(dev, port_fqs.rx_defq);
  12663. + devm_kfree(dev, port_fqs.rx_errq);
  12664. + devm_kfree(dev, port_fqs.tx_defq);
  12665. + devm_kfree(dev, port_fqs.tx_errq);
  12666. +
  12667. + for_each_port_device(i, mac_dev->port_dev) {
  12668. + err = fm_port_enable(mac_dev->port_dev[i]);
  12669. + if (err)
  12670. + goto port_enable_fail;
  12671. + }
  12672. +
  12673. + dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
  12674. + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
  12675. + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
  12676. +
  12677. + return 0; /* Proxy interface initialization ended */
  12678. +
  12679. +port_enable_fail:
  12680. + for_each_port_device(i, mac_dev->port_dev)
  12681. + fm_port_disable(mac_dev->port_dev[i]);
  12682. + dpa_eth_proxy_remove(_of_dev);
  12683. +
  12684. + return err;
  12685. +}
  12686. +
  12687. +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
  12688. + struct net_device *net_dev)
  12689. +{
  12690. + struct mac_device *mac_dev;
  12691. + int _errno;
  12692. +
  12693. + mac_dev = proxy_dev->mac_dev;
  12694. +
  12695. + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
  12696. + net_dev->dev_addr);
  12697. + if (_errno < 0)
  12698. + return _errno;
  12699. +
  12700. + return 0;
  12701. +}
  12702. +EXPORT_SYMBOL(dpa_proxy_set_mac_address);
  12703. +
  12704. +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
  12705. + struct net_device *net_dev)
  12706. +{
  12707. + struct mac_device *mac_dev = proxy_dev->mac_dev;
  12708. + int _errno;
  12709. +
  12710. + if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
  12711. + mac_dev->promisc = !mac_dev->promisc;
  12712. + _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
  12713. + mac_dev->promisc);
  12714. + if (unlikely(_errno < 0))
  12715. + netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
  12716. + _errno);
  12717. + }
  12718. +
  12719. + _errno = mac_dev->set_multi(net_dev, mac_dev);
  12720. + if (unlikely(_errno < 0))
  12721. + return _errno;
  12722. +
  12723. + return 0;
  12724. +}
  12725. +EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
  12726. +
  12727. +int dpa_proxy_start(struct net_device *net_dev)
  12728. +{
  12729. + struct mac_device *mac_dev;
  12730. + const struct dpa_priv_s *priv;
  12731. + struct proxy_device *proxy_dev;
  12732. + int _errno;
  12733. + int i;
  12734. +
  12735. + priv = netdev_priv(net_dev);
  12736. + proxy_dev = (struct proxy_device *)priv->peer;
  12737. + mac_dev = proxy_dev->mac_dev;
  12738. +
  12739. + _errno = mac_dev->init_phy(net_dev, mac_dev);
  12740. + if (_errno < 0) {
  12741. + if (netif_msg_drv(priv))
  12742. + netdev_err(net_dev, "init_phy() = %d\n",
  12743. + _errno);
  12744. + return _errno;
  12745. + }
  12746. +
  12747. + for_each_port_device(i, mac_dev->port_dev) {
  12748. + _errno = fm_port_enable(mac_dev->port_dev[i]);
  12749. + if (_errno)
  12750. + goto port_enable_fail;
  12751. + }
  12752. +
  12753. + _errno = mac_dev->start(mac_dev);
  12754. + if (_errno < 0) {
  12755. + if (netif_msg_drv(priv))
  12756. + netdev_err(net_dev, "mac_dev->start() = %d\n",
  12757. + _errno);
  12758. + goto port_enable_fail;
  12759. + }
  12760. +
  12761. + return _errno;
  12762. +
  12763. +port_enable_fail:
  12764. + for_each_port_device(i, mac_dev->port_dev)
  12765. + fm_port_disable(mac_dev->port_dev[i]);
  12766. +
  12767. + return _errno;
  12768. +}
  12769. +EXPORT_SYMBOL(dpa_proxy_start);
  12770. +
  12771. +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
  12772. +{
  12773. + struct mac_device *mac_dev = proxy_dev->mac_dev;
  12774. + const struct dpa_priv_s *priv = netdev_priv(net_dev);
  12775. + int _errno, i, err;
  12776. +
  12777. + _errno = mac_dev->stop(mac_dev);
  12778. + if (_errno < 0) {
  12779. + if (netif_msg_drv(priv))
  12780. + netdev_err(net_dev, "mac_dev->stop() = %d\n",
  12781. + _errno);
  12782. + return _errno;
  12783. + }
  12784. +
  12785. + for_each_port_device(i, mac_dev->port_dev) {
  12786. + err = fm_port_disable(mac_dev->port_dev[i]);
  12787. + _errno = err ? err : _errno;
  12788. + }
  12789. +
  12790. + if (mac_dev->phy_dev)
  12791. + phy_disconnect(mac_dev->phy_dev);
  12792. + mac_dev->phy_dev = NULL;
  12793. +
  12794. + return _errno;
  12795. +}
  12796. +EXPORT_SYMBOL(dpa_proxy_stop);
  12797. +
  12798. +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
  12799. +{
  12800. + struct device *dev = &of_dev->dev;
  12801. + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
  12802. +
  12803. + kfree(proxy_dev);
  12804. +
  12805. + dev_set_drvdata(dev, NULL);
  12806. +
  12807. + return 0;
  12808. +}
  12809. +
  12810. +static const struct of_device_id dpa_proxy_match[] = {
  12811. + {
  12812. + .compatible = "fsl,dpa-ethernet-init"
  12813. + },
  12814. + {}
  12815. +};
  12816. +MODULE_DEVICE_TABLE(of, dpa_proxy_match);
  12817. +
  12818. +static struct platform_driver dpa_proxy_driver = {
  12819. + .driver = {
  12820. + .name = KBUILD_MODNAME "-proxy",
  12821. + .of_match_table = dpa_proxy_match,
  12822. + .owner = THIS_MODULE,
  12823. + .pm = PROXY_PM_OPS,
  12824. + },
  12825. + .probe = dpaa_eth_proxy_probe,
  12826. + .remove = dpa_eth_proxy_remove
  12827. +};
  12828. +
  12829. +static int __init __cold dpa_proxy_load(void)
  12830. +{
  12831. + int _errno;
  12832. +
  12833. + pr_info(DPA_DESCRIPTION "\n");
  12834. +
  12835. + /* Initialize dpaa_eth mirror values */
  12836. + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
  12837. + dpa_max_frm = fm_get_max_frm();
  12838. +
  12839. + _errno = platform_driver_register(&dpa_proxy_driver);
  12840. + if (unlikely(_errno < 0)) {
  12841. + pr_err(KBUILD_MODNAME
  12842. + ": %s:%hu:%s(): platform_driver_register() = %d\n",
  12843. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  12844. + }
  12845. +
  12846. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  12847. + KBUILD_BASENAME".c", __func__);
  12848. +
  12849. + return _errno;
  12850. +}
  12851. +module_init(dpa_proxy_load);
  12852. +
  12853. +static void __exit __cold dpa_proxy_unload(void)
  12854. +{
  12855. + platform_driver_unregister(&dpa_proxy_driver);
  12856. +
  12857. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  12858. + KBUILD_BASENAME".c", __func__);
  12859. +}
  12860. +module_exit(dpa_proxy_unload);
  12861. --- /dev/null
  12862. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
  12863. @@ -0,0 +1,1128 @@
  12864. +/* Copyright 2012 Freescale Semiconductor Inc.
  12865. + *
  12866. + * Redistribution and use in source and binary forms, with or without
  12867. + * modification, are permitted provided that the following conditions are met:
  12868. + * * Redistributions of source code must retain the above copyright
  12869. + * notice, this list of conditions and the following disclaimer.
  12870. + * * Redistributions in binary form must reproduce the above copyright
  12871. + * notice, this list of conditions and the following disclaimer in the
  12872. + * documentation and/or other materials provided with the distribution.
  12873. + * * Neither the name of Freescale Semiconductor nor the
  12874. + * names of its contributors may be used to endorse or promote products
  12875. + * derived from this software without specific prior written permission.
  12876. + *
  12877. + *
  12878. + * ALTERNATIVELY, this software may be distributed under the terms of the
  12879. + * GNU General Public License ("GPL") as published by the Free Software
  12880. + * Foundation, either version 2 of that License or (at your option) any
  12881. + * later version.
  12882. + *
  12883. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  12884. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  12885. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  12886. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  12887. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  12888. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  12889. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  12890. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  12891. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  12892. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  12893. + */
  12894. +
  12895. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  12896. +#define pr_fmt(fmt) \
  12897. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  12898. + KBUILD_BASENAME".c", __LINE__, __func__
  12899. +#else
  12900. +#define pr_fmt(fmt) \
  12901. + KBUILD_MODNAME ": " fmt
  12902. +#endif
  12903. +
  12904. +#include <linux/init.h>
  12905. +#include <linux/skbuff.h>
  12906. +#include <linux/highmem.h>
  12907. +#include <linux/fsl_bman.h>
  12908. +
  12909. +#include "dpaa_eth.h"
  12910. +#include "dpaa_eth_common.h"
  12911. +#ifdef CONFIG_FSL_DPAA_1588
  12912. +#include "dpaa_1588.h"
  12913. +#endif
  12914. +#ifdef CONFIG_FSL_DPAA_CEETM
  12915. +#include "dpaa_eth_ceetm.h"
  12916. +#endif
  12917. +
  12918. +/* DMA map and add a page frag back into the bpool.
  12919. + * @vaddr fragment must have been allocated with netdev_alloc_frag(),
  12920. + * specifically for fitting into @dpa_bp.
  12921. + */
  12922. +static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
  12923. + int *count_ptr)
  12924. +{
  12925. + struct bm_buffer bmb;
  12926. + dma_addr_t addr;
  12927. +
  12928. + memset(&bmb, 0, sizeof(struct bm_buffer));
  12929. +
  12930. + addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
  12931. + DMA_BIDIRECTIONAL);
  12932. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  12933. + dev_err(dpa_bp->dev, "DMA mapping failed");
  12934. + return;
  12935. + }
  12936. +
  12937. + bm_buffer_set64(&bmb, addr);
  12938. +
  12939. + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
  12940. + cpu_relax();
  12941. +
  12942. + (*count_ptr)++;
  12943. +}
  12944. +
  12945. +static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
  12946. +{
  12947. + struct bm_buffer bmb[8];
  12948. + void *new_buf;
  12949. + dma_addr_t addr;
  12950. + uint8_t i;
  12951. + struct device *dev = dpa_bp->dev;
  12952. + struct sk_buff *skb, **skbh;
  12953. +
  12954. + memset(bmb, 0, sizeof(struct bm_buffer) * 8);
  12955. +
  12956. + for (i = 0; i < 8; i++) {
  12957. + /* We'll prepend the skb back-pointer; can't use the DPA
  12958. + * priv space, because FMan will overwrite it (from offset 0)
  12959. + * if it ends up being the second, third, etc. fragment
  12960. + * in a S/G frame.
  12961. + *
  12962. + * We only need enough space to store a pointer, but allocate
  12963. + * an entire cacheline for performance reasons.
  12964. + */
  12965. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  12966. + new_buf = page_address(alloc_page(GFP_ATOMIC));
  12967. +#else
  12968. + new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
  12969. +#endif
  12970. + if (unlikely(!new_buf))
  12971. + goto netdev_alloc_failed;
  12972. + new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
  12973. +
  12974. + skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
  12975. + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  12976. + if (unlikely(!skb)) {
  12977. + put_page(virt_to_head_page(new_buf));
  12978. + goto build_skb_failed;
  12979. + }
  12980. + DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
  12981. +
  12982. + addr = dma_map_single(dev, new_buf,
  12983. + dpa_bp->size, DMA_BIDIRECTIONAL);
  12984. + if (unlikely(dma_mapping_error(dev, addr)))
  12985. + goto dma_map_failed;
  12986. +
  12987. + bm_buffer_set64(&bmb[i], addr);
  12988. + }
  12989. +
  12990. +release_bufs:
  12991. + /* Release the buffers. In case bman is busy, keep trying
  12992. + * until successful. bman_release() is guaranteed to succeed
  12993. + * in a reasonable amount of time
  12994. + */
  12995. + while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
  12996. + cpu_relax();
  12997. + return i;
  12998. +
  12999. +dma_map_failed:
  13000. + kfree_skb(skb);
  13001. +
  13002. +build_skb_failed:
  13003. +netdev_alloc_failed:
  13004. + net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
  13005. + WARN_ONCE(1, "Memory allocation failure on Rx\n");
  13006. +
  13007. + bm_buffer_set64(&bmb[i], 0);
  13008. + /* Avoid releasing a completely null buffer; bman_release() requires
  13009. + * at least one buffer.
  13010. + */
  13011. + if (likely(i))
  13012. + goto release_bufs;
  13013. +
  13014. + return 0;
  13015. +}
  13016. +
  13017. +/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
  13018. +static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
  13019. +{
  13020. + int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
  13021. + *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
  13022. +}
  13023. +
  13024. +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
  13025. +{
  13026. + int i;
  13027. +
  13028. + /* Give each CPU an allotment of "config_count" buffers */
  13029. + for_each_possible_cpu(i) {
  13030. + int j;
  13031. +
  13032. + /* Although we access another CPU's counters here
  13033. + * we do it at boot time so it is safe
  13034. + */
  13035. + for (j = 0; j < dpa_bp->config_count; j += 8)
  13036. + dpa_bp_add_8_bufs(dpa_bp, i);
  13037. + }
  13038. + return 0;
  13039. +}
  13040. +EXPORT_SYMBOL(dpa_bp_priv_seed);
  13041. +
  13042. +/* Add buffers/(pages) for Rx processing whenever bpool count falls below
  13043. + * REFILL_THRESHOLD.
  13044. + */
  13045. +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
  13046. +{
  13047. + int count = *countptr;
  13048. + int new_bufs;
  13049. +
  13050. + if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
  13051. + do {
  13052. + new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
  13053. + if (unlikely(!new_bufs)) {
  13054. + /* Avoid looping forever if we've temporarily
  13055. + * run out of memory. We'll try again at the
  13056. + * next NAPI cycle.
  13057. + */
  13058. + break;
  13059. + }
  13060. + count += new_bufs;
  13061. + } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
  13062. +
  13063. + *countptr = count;
  13064. + if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
  13065. + return -ENOMEM;
  13066. + }
  13067. +
  13068. + return 0;
  13069. +}
  13070. +EXPORT_SYMBOL(dpaa_eth_refill_bpools);
  13071. +
  13072. +/* Cleanup function for outgoing frame descriptors that were built on Tx path,
  13073. + * either contiguous frames or scatter/gather ones.
  13074. + * Skb freeing is not handled here.
  13075. + *
  13076. + * This function may be called on error paths in the Tx function, so guard
  13077. + * against cases when not all fd relevant fields were filled in.
  13078. + *
  13079. + * Return the skb backpointer, since for S/G frames the buffer containing it
  13080. + * gets freed here.
  13081. + */
  13082. +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
  13083. + const struct qm_fd *fd)
  13084. +{
  13085. + const struct qm_sg_entry *sgt;
  13086. + int i;
  13087. + struct dpa_bp *dpa_bp = priv->dpa_bp;
  13088. + dma_addr_t addr = qm_fd_addr(fd);
  13089. + dma_addr_t sg_addr;
  13090. + struct sk_buff **skbh;
  13091. + struct sk_buff *skb = NULL;
  13092. + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
  13093. + int nr_frags;
  13094. + int sg_len;
  13095. +
  13096. + /* retrieve skb back pointer */
  13097. + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
  13098. +
  13099. + if (unlikely(fd->format == qm_fd_sg)) {
  13100. + nr_frags = skb_shinfo(skb)->nr_frags;
  13101. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13102. +/* addressing the 4k DMA issue can yield a larger number of fragments than
  13103. + * the skb had
  13104. + */
  13105. + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
  13106. + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
  13107. + dma_dir);
  13108. +#else
  13109. + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
  13110. + sizeof(struct qm_sg_entry) * (1 + nr_frags),
  13111. + dma_dir);
  13112. +#endif
  13113. + /* The sgt buffer has been allocated with netdev_alloc_frag(),
  13114. + * it's from lowmem.
  13115. + */
  13116. + sgt = phys_to_virt(addr + dpa_fd_offset(fd));
  13117. +#ifdef CONFIG_FSL_DPAA_1588
  13118. + if (priv->tsu && priv->tsu->valid &&
  13119. + priv->tsu->hwts_tx_en_ioctl)
  13120. + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
  13121. +#endif
  13122. +#ifdef CONFIG_FSL_DPAA_TS
  13123. + if (unlikely(priv->ts_tx_en &&
  13124. + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  13125. + struct skb_shared_hwtstamps shhwtstamps;
  13126. +
  13127. + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
  13128. + skb_tstamp_tx(skb, &shhwtstamps);
  13129. + }
  13130. +#endif /* CONFIG_FSL_DPAA_TS */
  13131. +
  13132. + /* sgt[0] is from lowmem, was dma_map_single()-ed */
  13133. + sg_addr = qm_sg_addr(&sgt[0]);
  13134. + sg_len = qm_sg_entry_get_len(&sgt[0]);
  13135. + dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
  13136. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13137. + i = 1;
  13138. + do {
  13139. + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
  13140. + sg_addr = qm_sg_addr(&sgt[i]);
  13141. + sg_len = qm_sg_entry_get_len(&sgt[i]);
  13142. + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
  13143. + } while (!qm_sg_entry_get_final(&sgt[i++]));
  13144. +#else
  13145. + /* remaining pages were mapped with dma_map_page() */
  13146. + for (i = 1; i <= nr_frags; i++) {
  13147. + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
  13148. + sg_addr = qm_sg_addr(&sgt[i]);
  13149. + sg_len = qm_sg_entry_get_len(&sgt[i]);
  13150. + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
  13151. + }
  13152. +#endif
  13153. +
  13154. + /* Free the page frag that we allocated on Tx */
  13155. + put_page(virt_to_head_page(sgt));
  13156. + } else {
  13157. + dma_unmap_single(dpa_bp->dev, addr,
  13158. + skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
  13159. +#ifdef CONFIG_FSL_DPAA_TS
  13160. + /* get the timestamp for non-SG frames */
  13161. +#ifdef CONFIG_FSL_DPAA_1588
  13162. + if (priv->tsu && priv->tsu->valid &&
  13163. + priv->tsu->hwts_tx_en_ioctl)
  13164. + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
  13165. +#endif
  13166. + if (unlikely(priv->ts_tx_en &&
  13167. + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  13168. + struct skb_shared_hwtstamps shhwtstamps;
  13169. +
  13170. + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
  13171. + skb_tstamp_tx(skb, &shhwtstamps);
  13172. + }
  13173. +#endif
  13174. + }
  13175. +
  13176. + return skb;
  13177. +}
  13178. +EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
  13179. +
  13180. +#ifndef CONFIG_FSL_DPAA_TS
  13181. +bool dpa_skb_is_recyclable(struct sk_buff *skb)
  13182. +{
  13183. + /* No recycling possible if skb buffer is kmalloc'ed */
  13184. + if (skb->head_frag == 0)
  13185. + return false;
  13186. +
  13187. + /* or if it's an userspace buffer */
  13188. + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
  13189. + return false;
  13190. +
  13191. + /* or if it's cloned or shared */
  13192. + if (skb_shared(skb) || skb_cloned(skb) ||
  13193. + skb->fclone != SKB_FCLONE_UNAVAILABLE)
  13194. + return false;
  13195. +
  13196. + return true;
  13197. +}
  13198. +EXPORT_SYMBOL(dpa_skb_is_recyclable);
  13199. +
  13200. +bool dpa_buf_is_recyclable(struct sk_buff *skb,
  13201. + uint32_t min_size,
  13202. + uint16_t min_offset,
  13203. + unsigned char **new_buf_start)
  13204. +{
  13205. + unsigned char *new;
  13206. +
  13207. + /* In order to recycle a buffer, the following conditions must be met:
  13208. + * - buffer size no less than the buffer pool size
  13209. + * - buffer size no higher than an upper limit (to avoid moving too much
  13210. + * system memory to the buffer pools)
  13211. + * - buffer address aligned to cacheline bytes
  13212. + * - offset of data from start of buffer no lower than a minimum value
  13213. + * - offset of data from start of buffer no higher than a maximum value
  13214. + */
  13215. + new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
  13216. +
  13217. + /* left align to the nearest cacheline */
  13218. + new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
  13219. +
  13220. + if (likely(new >= skb->head &&
  13221. + new >= (skb->data - DPA_MAX_FD_OFFSET) &&
  13222. + skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
  13223. + *new_buf_start = new;
  13224. + return true;
  13225. + }
  13226. +
  13227. + return false;
  13228. +}
  13229. +EXPORT_SYMBOL(dpa_buf_is_recyclable);
  13230. +#endif
  13231. +
  13232. +/* Build a linear skb around the received buffer.
  13233. + * We are guaranteed there is enough room at the end of the data buffer to
  13234. + * accommodate the shared info area of the skb.
  13235. + */
  13236. +static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
  13237. + const struct qm_fd *fd, int *use_gro)
  13238. +{
  13239. + dma_addr_t addr = qm_fd_addr(fd);
  13240. + ssize_t fd_off = dpa_fd_offset(fd);
  13241. + void *vaddr;
  13242. + const fm_prs_result_t *parse_results;
  13243. + struct sk_buff *skb = NULL, **skbh;
  13244. +
  13245. + vaddr = phys_to_virt(addr);
  13246. + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
  13247. +
  13248. + /* Retrieve the skb and adjust data and tail pointers, to make sure
  13249. + * forwarded skbs will have enough space on Tx if extra headers
  13250. + * are added.
  13251. + */
  13252. + DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
  13253. +
  13254. +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
  13255. + /* When using jumbo Rx buffers, we risk having frames dropped due to
  13256. + * the socket backlog reaching its maximum allowed size.
  13257. + * Use the frame length for the skb truesize instead of the buffer
  13258. + * size, as this is the size of the data that actually gets copied to
  13259. + * userspace.
  13260. + */
  13261. + skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
  13262. +#endif
  13263. +
  13264. + DPA_BUG_ON(fd_off != priv->rx_headroom);
  13265. + skb_reserve(skb, fd_off);
  13266. + skb_put(skb, dpa_fd_length(fd));
  13267. +
  13268. + /* Peek at the parse results for csum validation */
  13269. + parse_results = (const fm_prs_result_t *)(vaddr +
  13270. + DPA_RX_PRIV_DATA_SIZE);
  13271. + _dpa_process_parse_results(parse_results, fd, skb, use_gro);
  13272. +
  13273. +#ifdef CONFIG_FSL_DPAA_1588
  13274. + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
  13275. + dpa_ptp_store_rxstamp(priv, skb, vaddr);
  13276. +#endif
  13277. +#ifdef CONFIG_FSL_DPAA_TS
  13278. + if (priv->ts_rx_en)
  13279. + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
  13280. +#endif /* CONFIG_FSL_DPAA_TS */
  13281. +
  13282. + return skb;
  13283. +}
  13284. +
  13285. +
  13286. +/* Build an skb with the data of the first S/G entry in the linear portion and
  13287. + * the rest of the frame as skb fragments.
  13288. + *
  13289. + * The page fragment holding the S/G Table is recycled here.
  13290. + */
  13291. +static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
  13292. + const struct qm_fd *fd, int *use_gro,
  13293. + int *count_ptr)
  13294. +{
  13295. + const struct qm_sg_entry *sgt;
  13296. + dma_addr_t addr = qm_fd_addr(fd);
  13297. + ssize_t fd_off = dpa_fd_offset(fd);
  13298. + dma_addr_t sg_addr;
  13299. + void *vaddr, *sg_vaddr;
  13300. + struct dpa_bp *dpa_bp;
  13301. + struct page *page, *head_page;
  13302. + int frag_offset, frag_len;
  13303. + int page_offset;
  13304. + int i;
  13305. + const fm_prs_result_t *parse_results;
  13306. + struct sk_buff *skb = NULL, *skb_tmp, **skbh;
  13307. +
  13308. + vaddr = phys_to_virt(addr);
  13309. + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
  13310. +
  13311. + dpa_bp = priv->dpa_bp;
  13312. + /* Iterate through the SGT entries and add data buffers to the skb */
  13313. + sgt = vaddr + fd_off;
  13314. + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
  13315. + /* Extension bit is not supported */
  13316. + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
  13317. +
  13318. + /* We use a single global Rx pool */
  13319. + DPA_BUG_ON(dpa_bp !=
  13320. + dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
  13321. +
  13322. + sg_addr = qm_sg_addr(&sgt[i]);
  13323. + sg_vaddr = phys_to_virt(sg_addr);
  13324. + DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
  13325. + SMP_CACHE_BYTES));
  13326. +
  13327. + dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
  13328. + DMA_BIDIRECTIONAL);
  13329. + if (i == 0) {
  13330. + DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
  13331. + DPA_BUG_ON(skb->head != sg_vaddr);
  13332. +#ifdef CONFIG_FSL_DPAA_1588
  13333. + if (priv->tsu && priv->tsu->valid &&
  13334. + priv->tsu->hwts_rx_en_ioctl)
  13335. + dpa_ptp_store_rxstamp(priv, skb, vaddr);
  13336. +#endif
  13337. +#ifdef CONFIG_FSL_DPAA_TS
  13338. + if (priv->ts_rx_en)
  13339. + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
  13340. +#endif /* CONFIG_FSL_DPAA_TS */
  13341. +
  13342. + /* In the case of a SG frame, FMan stores the Internal
  13343. + * Context in the buffer containing the sgt.
  13344. + * Inspect the parse results before anything else.
  13345. + */
  13346. + parse_results = (const fm_prs_result_t *)(vaddr +
  13347. + DPA_RX_PRIV_DATA_SIZE);
  13348. + _dpa_process_parse_results(parse_results, fd, skb,
  13349. + use_gro);
  13350. +
  13351. + /* Make sure forwarded skbs will have enough space
  13352. + * on Tx, if extra headers are added.
  13353. + */
  13354. + DPA_BUG_ON(fd_off != priv->rx_headroom);
  13355. + skb_reserve(skb, fd_off);
  13356. + skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
  13357. + } else {
  13358. + /* Not the first S/G entry; all data from buffer will
  13359. + * be added in an skb fragment; fragment index is offset
  13360. + * by one since first S/G entry was incorporated in the
  13361. + * linear part of the skb.
  13362. + *
  13363. + * Caution: 'page' may be a tail page.
  13364. + */
  13365. + DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
  13366. + page = virt_to_page(sg_vaddr);
  13367. + head_page = virt_to_head_page(sg_vaddr);
  13368. +
  13369. + /* Free (only) the skbuff shell because its data buffer
  13370. + * is already a frag in the main skb.
  13371. + */
  13372. + get_page(head_page);
  13373. + dev_kfree_skb(skb_tmp);
  13374. +
  13375. + /* Compute offset in (possibly tail) page */
  13376. + page_offset = ((unsigned long)sg_vaddr &
  13377. + (PAGE_SIZE - 1)) +
  13378. + (page_address(page) - page_address(head_page));
  13379. + /* page_offset only refers to the beginning of sgt[i];
  13380. + * but the buffer itself may have an internal offset.
  13381. + */
  13382. + frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
  13383. + page_offset;
  13384. + frag_len = qm_sg_entry_get_len(&sgt[i]);
  13385. + /* skb_add_rx_frag() does no checking on the page; if
  13386. + * we pass it a tail page, we'll end up with
  13387. + * bad page accounting and eventually with segafults.
  13388. + */
  13389. + skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
  13390. + frag_len, dpa_bp->size);
  13391. + }
  13392. + /* Update the pool count for the current {cpu x bpool} */
  13393. + (*count_ptr)--;
  13394. +
  13395. + if (qm_sg_entry_get_final(&sgt[i]))
  13396. + break;
  13397. + }
  13398. + WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
  13399. +
  13400. + /* recycle the SGT fragment */
  13401. + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
  13402. + dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
  13403. + return skb;
  13404. +}
  13405. +
  13406. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  13407. +static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
  13408. + struct sk_buff *skb)
  13409. +{
  13410. + if (unlikely(priv->loop_to < 0))
  13411. + return 0; /* loop disabled by default */
  13412. +
  13413. + skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
  13414. + dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
  13415. +
  13416. + return 1; /* Frame Tx on the selected interface */
  13417. +}
  13418. +#endif
  13419. +
  13420. +void __hot _dpa_rx(struct net_device *net_dev,
  13421. + struct qman_portal *portal,
  13422. + const struct dpa_priv_s *priv,
  13423. + struct dpa_percpu_priv_s *percpu_priv,
  13424. + const struct qm_fd *fd,
  13425. + u32 fqid,
  13426. + int *count_ptr)
  13427. +{
  13428. + struct dpa_bp *dpa_bp;
  13429. + struct sk_buff *skb;
  13430. + dma_addr_t addr = qm_fd_addr(fd);
  13431. + u32 fd_status = fd->status;
  13432. + unsigned int skb_len;
  13433. + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
  13434. + int use_gro = net_dev->features & NETIF_F_GRO;
  13435. +
  13436. + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
  13437. + if (netif_msg_hw(priv) && net_ratelimit())
  13438. + netdev_warn(net_dev, "FD status = 0x%08x\n",
  13439. + fd_status & FM_FD_STAT_RX_ERRORS);
  13440. +
  13441. + percpu_stats->rx_errors++;
  13442. + goto _release_frame;
  13443. + }
  13444. +
  13445. + dpa_bp = priv->dpa_bp;
  13446. + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
  13447. +
  13448. + /* prefetch the first 64 bytes of the frame or the SGT start */
  13449. + dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
  13450. + prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
  13451. +
  13452. + /* The only FD types that we may receive are contig and S/G */
  13453. + DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
  13454. +
  13455. + if (likely(fd->format == qm_fd_contig)) {
  13456. +#ifdef CONFIG_FSL_DPAA_HOOKS
  13457. + /* Execute the Rx processing hook, if it exists. */
  13458. + if (dpaa_eth_hooks.rx_default &&
  13459. + dpaa_eth_hooks.rx_default((void *)fd, net_dev,
  13460. + fqid) == DPAA_ETH_STOLEN) {
  13461. + /* won't count the rx bytes in */
  13462. + return;
  13463. + }
  13464. +#endif
  13465. + skb = contig_fd_to_skb(priv, fd, &use_gro);
  13466. + } else {
  13467. + skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
  13468. + percpu_priv->rx_sg++;
  13469. + }
  13470. +
  13471. + /* Account for either the contig buffer or the SGT buffer (depending on
  13472. + * which case we were in) having been removed from the pool.
  13473. + */
  13474. + (*count_ptr)--;
  13475. + skb->protocol = eth_type_trans(skb, net_dev);
  13476. +
  13477. + /* IP Reassembled frames are allowed to be larger than MTU */
  13478. + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
  13479. + !(fd_status & FM_FD_IPR))) {
  13480. + percpu_stats->rx_dropped++;
  13481. + goto drop_bad_frame;
  13482. + }
  13483. +
  13484. + skb_len = skb->len;
  13485. +
  13486. +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
  13487. + if (dpa_skb_loop(priv, skb)) {
  13488. + percpu_stats->rx_packets++;
  13489. + percpu_stats->rx_bytes += skb_len;
  13490. + return;
  13491. + }
  13492. +#endif
  13493. +
  13494. + if (use_gro) {
  13495. + gro_result_t gro_result;
  13496. + const struct qman_portal_config *pc =
  13497. + qman_p_get_portal_config(portal);
  13498. + struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
  13499. +
  13500. + np->p = portal;
  13501. + gro_result = napi_gro_receive(&np->napi, skb);
  13502. + /* If frame is dropped by the stack, rx_dropped counter is
  13503. + * incremented automatically, so no need for us to update it
  13504. + */
  13505. + if (unlikely(gro_result == GRO_DROP))
  13506. + goto packet_dropped;
  13507. + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
  13508. + goto packet_dropped;
  13509. +
  13510. + percpu_stats->rx_packets++;
  13511. + percpu_stats->rx_bytes += skb_len;
  13512. +
  13513. +packet_dropped:
  13514. + return;
  13515. +
  13516. +drop_bad_frame:
  13517. + dev_kfree_skb(skb);
  13518. + return;
  13519. +
  13520. +_release_frame:
  13521. + dpa_fd_release(net_dev, fd);
  13522. +}
  13523. +
  13524. +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
  13525. + struct sk_buff *skb, struct qm_fd *fd,
  13526. + int *count_ptr, int *offset)
  13527. +{
  13528. + struct sk_buff **skbh;
  13529. + dma_addr_t addr;
  13530. + struct dpa_bp *dpa_bp = priv->dpa_bp;
  13531. + struct net_device *net_dev = priv->net_dev;
  13532. + int err;
  13533. + enum dma_data_direction dma_dir;
  13534. + unsigned char *buffer_start;
  13535. +
  13536. +#ifndef CONFIG_FSL_DPAA_TS
  13537. + /* Check recycling conditions; only if timestamp support is not
  13538. + * enabled, otherwise we need the fd back on tx confirmation
  13539. + */
  13540. +
  13541. + /* We can recycle the buffer if:
  13542. + * - the pool is not full
  13543. + * - the buffer meets the skb recycling conditions
  13544. + * - the buffer meets our own (size, offset, align) conditions
  13545. + */
  13546. + if (likely((*count_ptr < dpa_bp->target_count) &&
  13547. + dpa_skb_is_recyclable(skb) &&
  13548. + dpa_buf_is_recyclable(skb, dpa_bp->size,
  13549. + priv->tx_headroom, &buffer_start))) {
  13550. + /* Buffer is recyclable; use the new start address
  13551. + * and set fd parameters and DMA mapping direction
  13552. + */
  13553. + fd->bpid = dpa_bp->bpid;
  13554. + DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
  13555. + fd->offset = (uint16_t)(skb->data - buffer_start);
  13556. + dma_dir = DMA_BIDIRECTIONAL;
  13557. +
  13558. + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
  13559. + *offset = skb_headroom(skb) - fd->offset;
  13560. + } else
  13561. +#endif
  13562. + {
  13563. + /* Not recyclable.
  13564. + * We are guaranteed to have at least tx_headroom bytes
  13565. + * available, so just use that for offset.
  13566. + */
  13567. + fd->bpid = 0xff;
  13568. + buffer_start = skb->data - priv->tx_headroom;
  13569. + fd->offset = priv->tx_headroom;
  13570. + dma_dir = DMA_TO_DEVICE;
  13571. +
  13572. + /* The buffer will be Tx-confirmed, but the TxConf cb must
  13573. + * necessarily look at our Tx private data to retrieve the
  13574. + * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
  13575. + */
  13576. + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
  13577. + }
  13578. +
  13579. + /* Enable L3/L4 hardware checksum computation.
  13580. + *
  13581. + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
  13582. + * need to write into the skb.
  13583. + */
  13584. + err = dpa_enable_tx_csum(priv, skb, fd,
  13585. + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
  13586. + if (unlikely(err < 0)) {
  13587. + if (netif_msg_tx_err(priv) && net_ratelimit())
  13588. + netdev_err(net_dev, "HW csum error: %d\n", err);
  13589. + return err;
  13590. + }
  13591. +
  13592. + /* Fill in the rest of the FD fields */
  13593. + fd->format = qm_fd_contig;
  13594. + fd->length20 = skb->len;
  13595. + fd->cmd |= FM_FD_CMD_FCO;
  13596. +
  13597. + /* Map the entire buffer size that may be seen by FMan, but no more */
  13598. + addr = dma_map_single(dpa_bp->dev, skbh,
  13599. + skb_tail_pointer(skb) - buffer_start, dma_dir);
  13600. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  13601. + if (netif_msg_tx_err(priv) && net_ratelimit())
  13602. + netdev_err(net_dev, "dma_map_single() failed\n");
  13603. + return -EINVAL;
  13604. + }
  13605. + fd->addr = addr;
  13606. +
  13607. +
  13608. + return 0;
  13609. +}
  13610. +EXPORT_SYMBOL(skb_to_contig_fd);
  13611. +
  13612. +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
  13613. + struct sk_buff *skb, struct qm_fd *fd)
  13614. +{
  13615. + struct dpa_bp *dpa_bp = priv->dpa_bp;
  13616. + dma_addr_t addr;
  13617. + dma_addr_t sg_addr;
  13618. + struct sk_buff **skbh;
  13619. + struct net_device *net_dev = priv->net_dev;
  13620. + int sg_len;
  13621. + int err;
  13622. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13623. + unsigned long boundary;
  13624. + int k;
  13625. +#endif
  13626. +
  13627. + struct qm_sg_entry *sgt;
  13628. + void *sgt_buf;
  13629. + void *buffer_start;
  13630. + skb_frag_t *frag;
  13631. + int i, j;
  13632. + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
  13633. + const int nr_frags = skb_shinfo(skb)->nr_frags;
  13634. +
  13635. + fd->format = qm_fd_sg;
  13636. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13637. + /* get a page frag to store the SGTable */
  13638. + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
  13639. + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
  13640. + if (unlikely(!sgt_buf)) {
  13641. + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
  13642. + return -ENOMEM;
  13643. + }
  13644. +
  13645. + /* it seems that the memory allocator does not zero the allocated mem */
  13646. + memset(sgt_buf, 0, priv->tx_headroom +
  13647. + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
  13648. +#else
  13649. + /* get a page frag to store the SGTable */
  13650. + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
  13651. + sizeof(struct qm_sg_entry) * (1 + nr_frags));
  13652. + if (unlikely(!sgt_buf)) {
  13653. + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
  13654. + return -ENOMEM;
  13655. + }
  13656. +
  13657. + memset(sgt_buf, 0, priv->tx_headroom +
  13658. + sizeof(struct qm_sg_entry) * (1 + nr_frags));
  13659. +#endif
  13660. +
  13661. + /* Enable L3/L4 hardware checksum computation.
  13662. + *
  13663. + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
  13664. + * need to write into the skb.
  13665. + */
  13666. + err = dpa_enable_tx_csum(priv, skb, fd,
  13667. + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
  13668. + if (unlikely(err < 0)) {
  13669. + if (netif_msg_tx_err(priv) && net_ratelimit())
  13670. + netdev_err(net_dev, "HW csum error: %d\n", err);
  13671. + goto csum_failed;
  13672. + }
  13673. +
  13674. + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
  13675. + sg_len = skb_headlen(skb);
  13676. + qm_sg_entry_set_bpid(&sgt[0], 0xff);
  13677. + qm_sg_entry_set_offset(&sgt[0], 0);
  13678. + qm_sg_entry_set_len(&sgt[0], sg_len);
  13679. + qm_sg_entry_set_ext(&sgt[0], 0);
  13680. + qm_sg_entry_set_final(&sgt[0], 0);
  13681. +
  13682. + addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
  13683. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  13684. + dev_err(dpa_bp->dev, "DMA mapping failed");
  13685. + err = -EINVAL;
  13686. + goto sg0_map_failed;
  13687. +
  13688. + }
  13689. +
  13690. + qm_sg_entry_set64(&sgt[0], addr);
  13691. +
  13692. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13693. + j = 0;
  13694. + if (unlikely(HAS_DMA_ISSUE(skb->data, sg_len))) {
  13695. + boundary = BOUNDARY_4K(skb->data, sg_len);
  13696. + qm_sg_entry_set_len(&sgt[j], boundary -
  13697. + (unsigned long)skb->data);
  13698. +
  13699. + j++;
  13700. + qm_sg_entry_set_bpid(&sgt[j], 0xff);
  13701. + qm_sg_entry_set_offset(&sgt[j], 0);
  13702. + qm_sg_entry_set_len(&sgt[j],
  13703. + ((unsigned long)skb->data + (unsigned long)sg_len) -
  13704. + boundary);
  13705. + qm_sg_entry_set_ext(&sgt[j], 0);
  13706. + qm_sg_entry_set_final(&sgt[j], 0);
  13707. +
  13708. + /* keep the offset in the address */
  13709. + qm_sg_entry_set64(&sgt[j], addr +
  13710. + (boundary -
  13711. + (unsigned long)skb->data));
  13712. + }
  13713. + j++;
  13714. +
  13715. + /* populate the rest of SGT entries */
  13716. + for (i = 1; i <= nr_frags; i++, j++) {
  13717. + frag = &skb_shinfo(skb)->frags[i - 1];
  13718. + qm_sg_entry_set_bpid(&sgt[j], 0xff);
  13719. + qm_sg_entry_set_offset(&sgt[j], 0);
  13720. + qm_sg_entry_set_len(&sgt[j], frag->size);
  13721. + qm_sg_entry_set_ext(&sgt[j], 0);
  13722. +
  13723. + DPA_BUG_ON(!skb_frag_page(frag));
  13724. + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
  13725. + dma_dir);
  13726. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  13727. + dev_err(dpa_bp->dev, "DMA mapping failed");
  13728. + err = -EINVAL;
  13729. + goto sg_map_failed;
  13730. + }
  13731. +
  13732. + /* keep the offset in the address */
  13733. + qm_sg_entry_set64(&sgt[j], addr);
  13734. +
  13735. + if (unlikely(HAS_DMA_ISSUE(frag, frag->size))) {
  13736. + boundary = BOUNDARY_4K(frag, frag->size);
  13737. + qm_sg_entry_set_len(&sgt[j], boundary -
  13738. + (unsigned long)frag);
  13739. +
  13740. + j++;
  13741. + qm_sg_entry_set_bpid(&sgt[j], 0xff);
  13742. + qm_sg_entry_set_offset(&sgt[j], 0);
  13743. + qm_sg_entry_set_len(&sgt[j],
  13744. + ((unsigned long)frag->size -
  13745. + (boundary - (unsigned long)frag)));
  13746. + qm_sg_entry_set_ext(&sgt[j], 0);
  13747. +
  13748. + /* keep the offset in the address */
  13749. + qm_sg_entry_set64(&sgt[j], addr +
  13750. + (boundary - (unsigned long)frag));
  13751. + }
  13752. +
  13753. + if (i == nr_frags)
  13754. + qm_sg_entry_set_final(&sgt[j], 1);
  13755. + else
  13756. + qm_sg_entry_set_final(&sgt[j], 0);
  13757. +#else
  13758. +
  13759. + /* populate the rest of SGT entries */
  13760. + for (i = 1; i <= nr_frags; i++) {
  13761. + frag = &skb_shinfo(skb)->frags[i - 1];
  13762. + qm_sg_entry_set_bpid(&sgt[i], 0xff);
  13763. + qm_sg_entry_set_offset(&sgt[i], 0);
  13764. + qm_sg_entry_set_len(&sgt[i], frag->size);
  13765. + qm_sg_entry_set_ext(&sgt[i], 0);
  13766. +
  13767. + if (i == nr_frags)
  13768. + qm_sg_entry_set_final(&sgt[i], 1);
  13769. + else
  13770. + qm_sg_entry_set_final(&sgt[i], 0);
  13771. +
  13772. + DPA_BUG_ON(!skb_frag_page(frag));
  13773. + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
  13774. + dma_dir);
  13775. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  13776. + dev_err(dpa_bp->dev, "DMA mapping failed");
  13777. + err = -EINVAL;
  13778. + goto sg_map_failed;
  13779. + }
  13780. +
  13781. + /* keep the offset in the address */
  13782. + qm_sg_entry_set64(&sgt[i], addr);
  13783. +#endif
  13784. + }
  13785. +
  13786. + fd->length20 = skb->len;
  13787. + fd->offset = priv->tx_headroom;
  13788. +
  13789. + /* DMA map the SGT page */
  13790. + buffer_start = (void *)sgt - priv->tx_headroom;
  13791. + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
  13792. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13793. + addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
  13794. + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
  13795. + dma_dir);
  13796. +#else
  13797. + addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
  13798. + sizeof(struct qm_sg_entry) * (1 + nr_frags),
  13799. + dma_dir);
  13800. +#endif
  13801. + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
  13802. + dev_err(dpa_bp->dev, "DMA mapping failed");
  13803. + err = -EINVAL;
  13804. + goto sgt_map_failed;
  13805. + }
  13806. +
  13807. + fd->bpid = 0xff;
  13808. + fd->cmd |= FM_FD_CMD_FCO;
  13809. + fd->addr = addr;
  13810. +
  13811. + return 0;
  13812. +
  13813. +sgt_map_failed:
  13814. +sg_map_failed:
  13815. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13816. + for (k = 0; k < j; k++) {
  13817. + sg_addr = qm_sg_addr(&sgt[k]);
  13818. + dma_unmap_page(dpa_bp->dev, sg_addr,
  13819. + qm_sg_entry_get_len(&sgt[k]), dma_dir);
  13820. + }
  13821. +#else
  13822. + for (j = 0; j < i; j++) {
  13823. + sg_addr = qm_sg_addr(&sgt[j]);
  13824. + dma_unmap_page(dpa_bp->dev, sg_addr,
  13825. + qm_sg_entry_get_len(&sgt[j]), dma_dir);
  13826. + }
  13827. +#endif
  13828. +sg0_map_failed:
  13829. +csum_failed:
  13830. + put_page(virt_to_head_page(sgt_buf));
  13831. +
  13832. + return err;
  13833. +}
  13834. +EXPORT_SYMBOL(skb_to_sg_fd);
  13835. +
  13836. +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
  13837. +{
  13838. + struct dpa_priv_s *priv;
  13839. + const int queue_mapping = dpa_get_queue_mapping(skb);
  13840. + struct qman_fq *egress_fq, *conf_fq;
  13841. +
  13842. +#ifdef CONFIG_FSL_DPAA_HOOKS
  13843. + /* If there is a Tx hook, run it. */
  13844. + if (dpaa_eth_hooks.tx &&
  13845. + dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
  13846. + /* won't update any Tx stats */
  13847. + return NETDEV_TX_OK;
  13848. +#endif
  13849. +
  13850. + priv = netdev_priv(net_dev);
  13851. +
  13852. +#ifdef CONFIG_FSL_DPAA_CEETM
  13853. + if (priv->ceetm_en)
  13854. + return ceetm_tx(skb, net_dev);
  13855. +#endif
  13856. +
  13857. + egress_fq = priv->egress_fqs[queue_mapping];
  13858. + conf_fq = priv->conf_fqs[queue_mapping];
  13859. +
  13860. + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
  13861. +}
  13862. +
  13863. +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
  13864. + struct qman_fq *egress_fq, struct qman_fq *conf_fq)
  13865. +{
  13866. + struct dpa_priv_s *priv;
  13867. + struct qm_fd fd;
  13868. + struct dpa_percpu_priv_s *percpu_priv;
  13869. + struct rtnl_link_stats64 *percpu_stats;
  13870. + int err = 0;
  13871. + const bool nonlinear = skb_is_nonlinear(skb);
  13872. + int *countptr, offset = 0;
  13873. +
  13874. + priv = netdev_priv(net_dev);
  13875. + /* Non-migratable context, safe to use raw_cpu_ptr */
  13876. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  13877. + percpu_stats = &percpu_priv->stats;
  13878. + countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
  13879. +
  13880. + clear_fd(&fd);
  13881. +
  13882. +#ifdef CONFIG_FSL_DPAA_1588
  13883. + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
  13884. + fd.cmd |= FM_FD_CMD_UPD;
  13885. +#endif
  13886. +#ifdef CONFIG_FSL_DPAA_TS
  13887. + if (unlikely(priv->ts_tx_en &&
  13888. + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
  13889. + fd.cmd |= FM_FD_CMD_UPD;
  13890. + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  13891. +#endif /* CONFIG_FSL_DPAA_TS */
  13892. +
  13893. + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
  13894. + * we don't feed FMan with more fragments than it supports.
  13895. + * Btw, we're using the first sgt entry to store the linear part of
  13896. + * the skb, so we're one extra frag short.
  13897. + */
  13898. + if (nonlinear &&
  13899. + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_ENTRIES_THRESHOLD)) {
  13900. + /* Just create a S/G fd based on the skb */
  13901. + err = skb_to_sg_fd(priv, skb, &fd);
  13902. + percpu_priv->tx_frag_skbuffs++;
  13903. + } else {
  13904. + /* Make sure we have enough headroom to accommodate private
  13905. + * data, parse results, etc. Normally this shouldn't happen if
  13906. + * we're here via the standard kernel stack.
  13907. + */
  13908. + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
  13909. + struct sk_buff *skb_new;
  13910. +
  13911. + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
  13912. + if (unlikely(!skb_new)) {
  13913. + dev_kfree_skb(skb);
  13914. + percpu_stats->tx_errors++;
  13915. + return NETDEV_TX_OK;
  13916. + }
  13917. + dev_kfree_skb(skb);
  13918. + skb = skb_new;
  13919. + }
  13920. +
  13921. + /* We're going to store the skb backpointer at the beginning
  13922. + * of the data buffer, so we need a privately owned skb
  13923. + */
  13924. +
  13925. + /* Code borrowed from skb_unshare(). */
  13926. + if (skb_cloned(skb)) {
  13927. + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
  13928. + kfree_skb(skb);
  13929. + skb = nskb;
  13930. + /* skb_copy() has now linearized the skbuff. */
  13931. + } else if (unlikely(nonlinear)) {
  13932. + /* We are here because the egress skb contains
  13933. + * more fragments than we support. In this case,
  13934. + * we have no choice but to linearize it ourselves.
  13935. + */
  13936. + err = __skb_linearize(skb);
  13937. + }
  13938. + if (unlikely(!skb || err < 0))
  13939. + /* Common out-of-memory error path */
  13940. + goto enomem;
  13941. +
  13942. +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
  13943. + if (unlikely(HAS_DMA_ISSUE(skb->data, skb->len))) {
  13944. + err = skb_to_sg_fd(priv, skb, &fd);
  13945. + percpu_priv->tx_frag_skbuffs++;
  13946. + } else {
  13947. + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
  13948. + }
  13949. +#else
  13950. + /* Finally, create a contig FD from this skb */
  13951. + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
  13952. +#endif
  13953. + }
  13954. + if (unlikely(err < 0))
  13955. + goto skb_to_fd_failed;
  13956. +
  13957. + if (fd.bpid != 0xff) {
  13958. + skb_recycle(skb);
  13959. + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
  13960. + * but we need the skb to look as if returned by build_skb().
  13961. + * We need to manually adjust the tailptr as well.
  13962. + */
  13963. + skb->data = skb->head + offset;
  13964. + skb_reset_tail_pointer(skb);
  13965. +
  13966. + (*countptr)++;
  13967. + percpu_priv->tx_returned++;
  13968. + }
  13969. +
  13970. + if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
  13971. + goto xmit_failed;
  13972. +
  13973. + net_dev->trans_start = jiffies;
  13974. + return NETDEV_TX_OK;
  13975. +
  13976. +xmit_failed:
  13977. + if (fd.bpid != 0xff) {
  13978. + (*countptr)--;
  13979. + percpu_priv->tx_returned--;
  13980. + dpa_fd_release(net_dev, &fd);
  13981. + percpu_stats->tx_errors++;
  13982. + return NETDEV_TX_OK;
  13983. + }
  13984. + _dpa_cleanup_tx_fd(priv, &fd);
  13985. +skb_to_fd_failed:
  13986. +enomem:
  13987. + percpu_stats->tx_errors++;
  13988. + dev_kfree_skb(skb);
  13989. + return NETDEV_TX_OK;
  13990. +}
  13991. +EXPORT_SYMBOL(dpa_tx_extended);
  13992. --- /dev/null
  13993. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
  13994. @@ -0,0 +1,914 @@
  13995. +/* Copyright 2008-2013 Freescale Semiconductor Inc.
  13996. + *
  13997. + * Redistribution and use in source and binary forms, with or without
  13998. + * modification, are permitted provided that the following conditions are met:
  13999. + * * Redistributions of source code must retain the above copyright
  14000. + * notice, this list of conditions and the following disclaimer.
  14001. + * * Redistributions in binary form must reproduce the above copyright
  14002. + * notice, this list of conditions and the following disclaimer in the
  14003. + * documentation and/or other materials provided with the distribution.
  14004. + * * Neither the name of Freescale Semiconductor nor the
  14005. + * names of its contributors may be used to endorse or promote products
  14006. + * derived from this software without specific prior written permission.
  14007. + *
  14008. + *
  14009. + * ALTERNATIVELY, this software may be distributed under the terms of the
  14010. + * GNU General Public License ("GPL") as published by the Free Software
  14011. + * Foundation, either version 2 of that License or (at your option) any
  14012. + * later version.
  14013. + *
  14014. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  14015. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  14016. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  14017. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  14018. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  14019. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  14020. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  14021. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  14022. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  14023. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  14024. + */
  14025. +
  14026. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  14027. +#define pr_fmt(fmt) \
  14028. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  14029. + KBUILD_BASENAME".c", __LINE__, __func__
  14030. +#else
  14031. +#define pr_fmt(fmt) \
  14032. + KBUILD_MODNAME ": " fmt
  14033. +#endif
  14034. +
  14035. +#include <linux/init.h>
  14036. +#include <linux/module.h>
  14037. +#include <linux/of_platform.h>
  14038. +#include <linux/etherdevice.h>
  14039. +#include <linux/kthread.h>
  14040. +#include <linux/percpu.h>
  14041. +#include <linux/highmem.h>
  14042. +#include <linux/fsl_qman.h>
  14043. +#include "dpaa_eth.h"
  14044. +#include "dpaa_eth_common.h"
  14045. +#include "dpaa_eth_base.h"
  14046. +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
  14047. +#include "mac.h"
  14048. +
  14049. +/* forward declarations */
  14050. +static enum qman_cb_dqrr_result __hot
  14051. +shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
  14052. + const struct qm_dqrr_entry *dq);
  14053. +static enum qman_cb_dqrr_result __hot
  14054. +shared_tx_default_dqrr(struct qman_portal *portal,
  14055. + struct qman_fq *fq,
  14056. + const struct qm_dqrr_entry *dq);
  14057. +static enum qman_cb_dqrr_result
  14058. +shared_tx_error_dqrr(struct qman_portal *portal,
  14059. + struct qman_fq *fq,
  14060. + const struct qm_dqrr_entry *dq);
  14061. +static void shared_ern(struct qman_portal *portal,
  14062. + struct qman_fq *fq,
  14063. + const struct qm_mr_entry *msg);
  14064. +
  14065. +#define DPA_DESCRIPTION "FSL DPAA Shared Ethernet driver"
  14066. +
  14067. +MODULE_LICENSE("Dual BSD/GPL");
  14068. +
  14069. +MODULE_DESCRIPTION(DPA_DESCRIPTION);
  14070. +
  14071. +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
  14072. +static uint16_t shared_tx_timeout = 1000;
  14073. +module_param(shared_tx_timeout, ushort, S_IRUGO);
  14074. +MODULE_PARM_DESC(shared_tx_timeout, "The Tx timeout in ms");
  14075. +
  14076. +static const struct of_device_id dpa_shared_match[];
  14077. +
  14078. +static const struct net_device_ops dpa_shared_ops = {
  14079. + .ndo_open = dpa_start,
  14080. + .ndo_start_xmit = dpa_shared_tx,
  14081. + .ndo_stop = dpa_stop,
  14082. + .ndo_tx_timeout = dpa_timeout,
  14083. + .ndo_get_stats64 = dpa_get_stats64,
  14084. + .ndo_set_mac_address = dpa_set_mac_address,
  14085. + .ndo_validate_addr = eth_validate_addr,
  14086. +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
  14087. + .ndo_select_queue = dpa_select_queue,
  14088. +#endif
  14089. + .ndo_change_mtu = dpa_change_mtu,
  14090. + .ndo_set_rx_mode = dpa_set_rx_mode,
  14091. + .ndo_init = dpa_ndo_init,
  14092. + .ndo_set_features = dpa_set_features,
  14093. + .ndo_fix_features = dpa_fix_features,
  14094. + .ndo_do_ioctl = dpa_ioctl,
  14095. +};
  14096. +
  14097. +const struct dpa_fq_cbs_t shared_fq_cbs = {
  14098. + .rx_defq = { .cb = { .dqrr = shared_rx_dqrr } },
  14099. + .tx_defq = { .cb = { .dqrr = shared_tx_default_dqrr } },
  14100. + .rx_errq = { .cb = { .dqrr = shared_rx_dqrr } },
  14101. + .tx_errq = { .cb = { .dqrr = shared_tx_error_dqrr } },
  14102. + .egress_ern = { .cb = { .ern = shared_ern } }
  14103. +};
  14104. +EXPORT_SYMBOL(shared_fq_cbs);
  14105. +
  14106. +static inline void * __must_check __attribute__((nonnull))
  14107. +dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
  14108. +{
  14109. + return dpa_bp->vaddr + (addr - dpa_bp->paddr);
  14110. +}
  14111. +
  14112. +static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
  14113. +{
  14114. + int i;
  14115. +
  14116. + for (i = 0; i < priv->bp_count; i++)
  14117. + if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size)
  14118. + return dpa_bpid2pool(priv->dpa_bp[i].bpid);
  14119. + return ERR_PTR(-ENODEV);
  14120. +}
  14121. +
  14122. +/* Copy to a memory region that requires kmapping from a linear buffer,
  14123. + * taking into account page boundaries in the destination
  14124. + */
  14125. +static void
  14126. +copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size)
  14127. +{
  14128. + struct page *page;
  14129. + size_t size, offset;
  14130. + void *page_vaddr;
  14131. +
  14132. + while (buf_size > 0) {
  14133. + offset = offset_in_page(phys_start);
  14134. + size = (offset + buf_size > PAGE_SIZE) ?
  14135. + PAGE_SIZE - offset : buf_size;
  14136. +
  14137. + page = pfn_to_page(phys_start >> PAGE_SHIFT);
  14138. + page_vaddr = kmap_atomic(page);
  14139. +
  14140. + memcpy(page_vaddr + offset, src, size);
  14141. +
  14142. + kunmap_atomic(page_vaddr);
  14143. +
  14144. + phys_start += size;
  14145. + src += size;
  14146. + buf_size -= size;
  14147. + }
  14148. +}
  14149. +
  14150. +/* Copy from a memory region that requires kmapping to a linear buffer,
  14151. + * taking into account page boundaries in the source
  14152. + */
  14153. +static void
  14154. +copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size)
  14155. +{
  14156. + struct page *page;
  14157. + size_t size, offset;
  14158. + void *page_vaddr;
  14159. +
  14160. + while (buf_size > 0) {
  14161. + offset = offset_in_page(phys_start);
  14162. + size = (offset + buf_size > PAGE_SIZE) ?
  14163. + PAGE_SIZE - offset : buf_size;
  14164. +
  14165. + page = pfn_to_page(phys_start >> PAGE_SHIFT);
  14166. + page_vaddr = kmap_atomic(page);
  14167. +
  14168. + memcpy(dest, page_vaddr + offset, size);
  14169. +
  14170. + kunmap_atomic(page_vaddr);
  14171. +
  14172. + phys_start += size;
  14173. + dest += size;
  14174. + buf_size -= size;
  14175. + }
  14176. +}
  14177. +
  14178. +static void
  14179. +dpa_fd_release_sg(const struct net_device *net_dev,
  14180. + const struct qm_fd *fd)
  14181. +{
  14182. + const struct dpa_priv_s *priv;
  14183. + struct qm_sg_entry *sgt;
  14184. + struct dpa_bp *_dpa_bp;
  14185. + struct bm_buffer _bmb;
  14186. +
  14187. + priv = netdev_priv(net_dev);
  14188. +
  14189. + _bmb.hi = fd->addr_hi;
  14190. + _bmb.lo = fd->addr_lo;
  14191. +
  14192. + _dpa_bp = dpa_bpid2pool(fd->bpid);
  14193. + BUG_ON(!_dpa_bp);
  14194. +
  14195. + if (_dpa_bp->vaddr) {
  14196. + sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) +
  14197. + dpa_fd_offset(fd);
  14198. + dpa_release_sgt(sgt);
  14199. + } else {
  14200. + sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC);
  14201. + if (sgt == NULL) {
  14202. + if (netif_msg_tx_err(priv) && net_ratelimit())
  14203. + netdev_err(net_dev,
  14204. + "Memory allocation failed\n");
  14205. + return;
  14206. + }
  14207. +
  14208. + copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) +
  14209. + dpa_fd_offset(fd),
  14210. + min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
  14211. + _dpa_bp->size));
  14212. + dpa_release_sgt(sgt);
  14213. + kfree(sgt);
  14214. + }
  14215. +
  14216. + while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
  14217. + cpu_relax();
  14218. +}
  14219. +
  14220. +static enum qman_cb_dqrr_result __hot
  14221. +shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
  14222. + const struct qm_dqrr_entry *dq)
  14223. +{
  14224. + struct net_device *net_dev;
  14225. + struct dpa_priv_s *priv;
  14226. + struct dpa_percpu_priv_s *percpu_priv;
  14227. + const struct qm_fd *fd = &dq->fd;
  14228. + struct dpa_bp *dpa_bp;
  14229. + struct sk_buff *skb;
  14230. + struct qm_sg_entry *sgt;
  14231. + int i;
  14232. + void *frag_addr;
  14233. + u32 frag_length;
  14234. + u32 offset;
  14235. +
  14236. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  14237. + priv = netdev_priv(net_dev);
  14238. +
  14239. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  14240. +
  14241. + dpa_bp = dpa_bpid2pool(fd->bpid);
  14242. + BUG_ON(!dpa_bp);
  14243. +
  14244. + if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
  14245. + if (netif_msg_hw(priv) && net_ratelimit())
  14246. + netdev_warn(net_dev, "FD status = 0x%08x\n",
  14247. + fd->status & FM_FD_STAT_RX_ERRORS);
  14248. +
  14249. + percpu_priv->stats.rx_errors++;
  14250. +
  14251. + goto out;
  14252. + }
  14253. +
  14254. + skb = __netdev_alloc_skb(net_dev,
  14255. + priv->tx_headroom + dpa_fd_length(fd),
  14256. + GFP_ATOMIC);
  14257. + if (unlikely(skb == NULL)) {
  14258. + if (netif_msg_rx_err(priv) && net_ratelimit())
  14259. + netdev_err(net_dev, "Could not alloc skb\n");
  14260. +
  14261. + percpu_priv->stats.rx_dropped++;
  14262. +
  14263. + goto out;
  14264. + }
  14265. +
  14266. + skb_reserve(skb, priv->tx_headroom);
  14267. +
  14268. + if (fd->format == qm_fd_sg) {
  14269. + if (dpa_bp->vaddr) {
  14270. + sgt = dpa_phys2virt(dpa_bp,
  14271. + qm_fd_addr(fd)) + dpa_fd_offset(fd);
  14272. +
  14273. + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
  14274. + offset = qm_sg_entry_get_offset(&sgt[i]);
  14275. + frag_addr = dpa_phys2virt(dpa_bp,
  14276. + qm_sg_addr(&sgt[i]) +
  14277. + offset);
  14278. + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
  14279. + frag_length = qm_sg_entry_get_len(&sgt[i]);
  14280. +
  14281. + /* copy from sgt[i] */
  14282. + memcpy(skb_put(skb, frag_length), frag_addr,
  14283. + frag_length);
  14284. + if (qm_sg_entry_get_final(&sgt[i]))
  14285. + break;
  14286. + }
  14287. + } else {
  14288. + sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
  14289. + GFP_ATOMIC);
  14290. + if (unlikely(sgt == NULL)) {
  14291. + if (netif_msg_tx_err(priv) && net_ratelimit())
  14292. + netdev_err(net_dev,
  14293. + "Memory allocation failed\n");
  14294. + return -ENOMEM;
  14295. + }
  14296. +
  14297. + copy_from_unmapped_area(sgt,
  14298. + qm_fd_addr(fd) + dpa_fd_offset(fd),
  14299. + min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
  14300. + dpa_bp->size));
  14301. +
  14302. + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
  14303. + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
  14304. + frag_length = qm_sg_entry_get_len(&sgt[i]);
  14305. + copy_from_unmapped_area(
  14306. + skb_put(skb, frag_length),
  14307. + qm_sg_addr(&sgt[i]) +
  14308. + qm_sg_entry_get_offset(&sgt[i]),
  14309. + frag_length);
  14310. +
  14311. + if (qm_sg_entry_get_final(&sgt[i]))
  14312. + break;
  14313. + }
  14314. +
  14315. + kfree(sgt);
  14316. + }
  14317. + goto skb_copied;
  14318. + }
  14319. +
  14320. + /* otherwise fd->format == qm_fd_contig */
  14321. + if (dpa_bp->vaddr) {
  14322. + /* Fill the SKB */
  14323. + memcpy(skb_put(skb, dpa_fd_length(fd)),
  14324. + dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
  14325. + dpa_fd_offset(fd), dpa_fd_length(fd));
  14326. + } else {
  14327. + copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)),
  14328. + qm_fd_addr(fd) + dpa_fd_offset(fd),
  14329. + dpa_fd_length(fd));
  14330. + }
  14331. +
  14332. +skb_copied:
  14333. + skb->protocol = eth_type_trans(skb, net_dev);
  14334. +
  14335. + /* IP Reassembled frames are allowed to be larger than MTU */
  14336. + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
  14337. + !(fd->status & FM_FD_IPR))) {
  14338. + percpu_priv->stats.rx_dropped++;
  14339. + dev_kfree_skb_any(skb);
  14340. + goto out;
  14341. + }
  14342. +
  14343. + if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
  14344. + goto out;
  14345. + else {
  14346. + percpu_priv->stats.rx_packets++;
  14347. + percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
  14348. + }
  14349. +
  14350. +out:
  14351. + if (fd->format == qm_fd_sg)
  14352. + dpa_fd_release_sg(net_dev, fd);
  14353. + else
  14354. + dpa_fd_release(net_dev, fd);
  14355. +
  14356. + return qman_cb_dqrr_consume;
  14357. +}
  14358. +
  14359. +static enum qman_cb_dqrr_result
  14360. +shared_tx_error_dqrr(struct qman_portal *portal,
  14361. + struct qman_fq *fq,
  14362. + const struct qm_dqrr_entry *dq)
  14363. +{
  14364. + struct net_device *net_dev;
  14365. + struct dpa_priv_s *priv;
  14366. + struct dpa_percpu_priv_s *percpu_priv;
  14367. + struct dpa_bp *dpa_bp;
  14368. + const struct qm_fd *fd = &dq->fd;
  14369. +
  14370. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  14371. + priv = netdev_priv(net_dev);
  14372. +
  14373. + dpa_bp = dpa_bpid2pool(fd->bpid);
  14374. + BUG_ON(!dpa_bp);
  14375. +
  14376. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  14377. +
  14378. + if (netif_msg_hw(priv) && net_ratelimit())
  14379. + netdev_warn(net_dev, "FD status = 0x%08x\n",
  14380. + fd->status & FM_FD_STAT_TX_ERRORS);
  14381. +
  14382. + if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
  14383. + dpa_fd_release_sg(net_dev, fd);
  14384. + else
  14385. + dpa_fd_release(net_dev, fd);
  14386. +
  14387. + percpu_priv->stats.tx_errors++;
  14388. +
  14389. + return qman_cb_dqrr_consume;
  14390. +}
  14391. +
  14392. +static enum qman_cb_dqrr_result __hot
  14393. +shared_tx_default_dqrr(struct qman_portal *portal,
  14394. + struct qman_fq *fq,
  14395. + const struct qm_dqrr_entry *dq)
  14396. +{
  14397. + struct net_device *net_dev;
  14398. + struct dpa_priv_s *priv;
  14399. + struct dpa_percpu_priv_s *percpu_priv;
  14400. + struct dpa_bp *dpa_bp;
  14401. + const struct qm_fd *fd = &dq->fd;
  14402. +
  14403. + net_dev = ((struct dpa_fq *)fq)->net_dev;
  14404. + priv = netdev_priv(net_dev);
  14405. +
  14406. + dpa_bp = dpa_bpid2pool(fd->bpid);
  14407. + BUG_ON(!dpa_bp);
  14408. +
  14409. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  14410. +
  14411. + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
  14412. + if (netif_msg_hw(priv) && net_ratelimit())
  14413. + netdev_warn(net_dev, "FD status = 0x%08x\n",
  14414. + fd->status & FM_FD_STAT_TX_ERRORS);
  14415. +
  14416. + percpu_priv->stats.tx_errors++;
  14417. + }
  14418. +
  14419. + if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
  14420. + dpa_fd_release_sg(net_dev, fd);
  14421. + else
  14422. + dpa_fd_release(net_dev, fd);
  14423. +
  14424. + percpu_priv->tx_confirm++;
  14425. +
  14426. + return qman_cb_dqrr_consume;
  14427. +}
  14428. +
  14429. +static void shared_ern(struct qman_portal *portal,
  14430. + struct qman_fq *fq,
  14431. + const struct qm_mr_entry *msg)
  14432. +{
  14433. + struct net_device *net_dev;
  14434. + const struct dpa_priv_s *priv;
  14435. + struct dpa_percpu_priv_s *percpu_priv;
  14436. + struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
  14437. +
  14438. + net_dev = dpa_fq->net_dev;
  14439. + priv = netdev_priv(net_dev);
  14440. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  14441. +
  14442. + dpa_fd_release(net_dev, &msg->ern.fd);
  14443. +
  14444. + percpu_priv->stats.tx_dropped++;
  14445. + percpu_priv->stats.tx_fifo_errors++;
  14446. + count_ern(percpu_priv, msg);
  14447. +}
  14448. +
  14449. +int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
  14450. +{
  14451. + struct dpa_bp *dpa_bp;
  14452. + struct bm_buffer bmb;
  14453. + struct dpa_percpu_priv_s *percpu_priv;
  14454. + struct dpa_priv_s *priv;
  14455. + struct qm_fd fd;
  14456. + int queue_mapping;
  14457. + int err;
  14458. + void *dpa_bp_vaddr;
  14459. + fm_prs_result_t parse_results;
  14460. + fm_prs_result_t *parse_results_ref;
  14461. + struct qman_fq *egress_fq, *conf_fq;
  14462. +
  14463. + priv = netdev_priv(net_dev);
  14464. + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
  14465. +
  14466. + memset(&fd, 0, sizeof(fd));
  14467. + fd.format = qm_fd_contig;
  14468. +
  14469. + queue_mapping = smp_processor_id();
  14470. +
  14471. + dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
  14472. + if (unlikely(!dpa_bp)) {
  14473. + percpu_priv->stats.tx_errors++;
  14474. + err = PTR_ERR(dpa_bp);
  14475. + goto bpools_too_small_error;
  14476. + }
  14477. +
  14478. + err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
  14479. + if (unlikely(err <= 0)) {
  14480. + percpu_priv->stats.tx_errors++;
  14481. + if (err == 0)
  14482. + err = -ENOMEM;
  14483. + goto buf_acquire_failed;
  14484. + }
  14485. + fd.bpid = dpa_bp->bpid;
  14486. +
  14487. + fd.length20 = skb_headlen(skb);
  14488. + fd.addr_hi = (uint8_t)bmb.hi;
  14489. + fd.addr_lo = bmb.lo;
  14490. + fd.offset = priv->tx_headroom;
  14491. +
  14492. + /* The virtual address of the buffer pool is expected to be NULL
  14493. + * in scenarios like MAC-less or Shared-MAC between Linux and
  14494. + * USDPAA. In this case the buffers are dynamically mapped/unmapped.
  14495. + */
  14496. + if (dpa_bp->vaddr) {
  14497. + dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
  14498. +
  14499. + /* Copy the packet payload */
  14500. + skb_copy_from_linear_data(skb,
  14501. + dpa_bp_vaddr + dpa_fd_offset(&fd),
  14502. + dpa_fd_length(&fd));
  14503. +
  14504. + /* if no mac device or peer set it's macless */
  14505. + if (!priv->mac_dev || priv->peer) {
  14506. + parse_results_ref = (fm_prs_result_t *) (dpa_bp_vaddr +
  14507. + DPA_TX_PRIV_DATA_SIZE);
  14508. + /* Default values; FMan will not generate/validate
  14509. + * CSUM;
  14510. + */
  14511. + parse_results_ref->l3r = 0;
  14512. + parse_results_ref->l4r = 0;
  14513. + parse_results_ref->ip_off[0] = 0xff;
  14514. + parse_results_ref->ip_off[1] = 0xff;
  14515. + parse_results_ref->l4_off = 0xff;
  14516. +
  14517. + fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
  14518. + } else {
  14519. + /* Enable L3/L4 hardware checksum computation,
  14520. + * if applicable
  14521. + */
  14522. + err = dpa_enable_tx_csum(priv, skb, &fd,
  14523. + dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE);
  14524. +
  14525. + if (unlikely(err < 0)) {
  14526. + if (netif_msg_tx_err(priv) && net_ratelimit())
  14527. + netdev_err(net_dev,
  14528. + "Tx HW csum error: %d\n", err);
  14529. + percpu_priv->stats.tx_errors++;
  14530. + goto l3_l4_csum_failed;
  14531. + }
  14532. + }
  14533. +
  14534. + } else {
  14535. + if (!priv->mac_dev || priv->peer) {
  14536. + /* Default values; FMan will not generate/validate
  14537. + * CSUM;
  14538. + */
  14539. + parse_results.l3r = 0;
  14540. + parse_results.l4r = 0;
  14541. + parse_results.ip_off[0] = 0xff;
  14542. + parse_results.ip_off[1] = 0xff;
  14543. + parse_results.l4_off = 0xff;
  14544. +
  14545. + fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
  14546. + } else {
  14547. + /* Enable L3/L4 hardware checksum computation,
  14548. + * if applicable
  14549. + */
  14550. + err = dpa_enable_tx_csum(priv, skb, &fd,
  14551. + (char *)&parse_results);
  14552. +
  14553. + if (unlikely(err < 0)) {
  14554. + if (netif_msg_tx_err(priv) && net_ratelimit())
  14555. + netdev_err(net_dev,
  14556. + "Tx HW csum error: %d\n", err);
  14557. + percpu_priv->stats.tx_errors++;
  14558. + goto l3_l4_csum_failed;
  14559. + }
  14560. +
  14561. + }
  14562. +
  14563. + copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE,
  14564. + &parse_results,
  14565. + DPA_PARSE_RESULTS_SIZE);
  14566. +
  14567. + copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd),
  14568. + skb->data,
  14569. + dpa_fd_length(&fd));
  14570. + }
  14571. +
  14572. + egress_fq = priv->egress_fqs[queue_mapping];
  14573. + conf_fq = priv->conf_fqs[queue_mapping];
  14574. +
  14575. + err = dpa_xmit(priv, &percpu_priv->stats, &fd, egress_fq, conf_fq);
  14576. +
  14577. +l3_l4_csum_failed:
  14578. +bpools_too_small_error:
  14579. +buf_acquire_failed:
  14580. + /* We're done with the skb */
  14581. + dev_kfree_skb(skb);
  14582. +
  14583. + /* err remains unused, NETDEV_TX_OK must be returned here */
  14584. + return NETDEV_TX_OK;
  14585. +}
  14586. +EXPORT_SYMBOL(dpa_shared_tx);
  14587. +
  14588. +static int dpa_shared_netdev_init(struct device_node *dpa_node,
  14589. + struct net_device *net_dev)
  14590. +{
  14591. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  14592. + const uint8_t *mac_addr;
  14593. +
  14594. + net_dev->netdev_ops = &dpa_shared_ops;
  14595. +
  14596. + net_dev->mem_start = priv->mac_dev->res->start;
  14597. + net_dev->mem_end = priv->mac_dev->res->end;
  14598. +
  14599. + mac_addr = priv->mac_dev->addr;
  14600. +
  14601. + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  14602. + NETIF_F_LLTX);
  14603. +
  14604. + return dpa_netdev_init(net_dev, mac_addr, shared_tx_timeout);
  14605. +}
  14606. +
  14607. +#ifdef CONFIG_PM
  14608. +
  14609. +static int dpa_shared_suspend(struct device *dev)
  14610. +{
  14611. + struct net_device *net_dev;
  14612. + struct dpa_priv_s *priv;
  14613. + struct mac_device *mac_dev;
  14614. + int err = 0;
  14615. +
  14616. + net_dev = dev_get_drvdata(dev);
  14617. + if (net_dev->flags & IFF_UP) {
  14618. + priv = netdev_priv(net_dev);
  14619. + mac_dev = priv->mac_dev;
  14620. +
  14621. + err = fm_port_suspend(mac_dev->port_dev[RX]);
  14622. + if (err)
  14623. + goto port_suspend_failed;
  14624. +
  14625. + err = fm_port_suspend(mac_dev->port_dev[TX]);
  14626. + if (err)
  14627. + err = fm_port_resume(mac_dev->port_dev[RX]);
  14628. + }
  14629. +
  14630. +port_suspend_failed:
  14631. + return err;
  14632. +}
  14633. +
  14634. +static int dpa_shared_resume(struct device *dev)
  14635. +{
  14636. + struct net_device *net_dev;
  14637. + struct dpa_priv_s *priv;
  14638. + struct mac_device *mac_dev;
  14639. + int err = 0;
  14640. +
  14641. + net_dev = dev_get_drvdata(dev);
  14642. + if (net_dev->flags & IFF_UP) {
  14643. + priv = netdev_priv(net_dev);
  14644. + mac_dev = priv->mac_dev;
  14645. +
  14646. + err = fm_port_resume(mac_dev->port_dev[TX]);
  14647. + if (err)
  14648. + goto port_resume_failed;
  14649. +
  14650. + err = fm_port_resume(mac_dev->port_dev[RX]);
  14651. + if (err)
  14652. + err = fm_port_suspend(mac_dev->port_dev[TX]);
  14653. + }
  14654. +
  14655. +port_resume_failed:
  14656. + return err;
  14657. +}
  14658. +
  14659. +static const struct dev_pm_ops shared_pm_ops = {
  14660. + .suspend = dpa_shared_suspend,
  14661. + .resume = dpa_shared_resume,
  14662. +};
  14663. +
  14664. +#define SHARED_PM_OPS (&shared_pm_ops)
  14665. +
  14666. +#else /* CONFIG_PM */
  14667. +
  14668. +#define SHARED_PM_OPS NULL
  14669. +
  14670. +#endif /* CONFIG_PM */
  14671. +
  14672. +static int
  14673. +dpaa_eth_shared_probe(struct platform_device *_of_dev)
  14674. +{
  14675. + int err = 0, i, channel;
  14676. + struct device *dev;
  14677. + struct device_node *dpa_node;
  14678. + struct dpa_bp *dpa_bp;
  14679. + struct dpa_fq *dpa_fq, *tmp;
  14680. + size_t count;
  14681. + struct net_device *net_dev = NULL;
  14682. + struct dpa_priv_s *priv = NULL;
  14683. + struct dpa_percpu_priv_s *percpu_priv;
  14684. + struct fm_port_fqs port_fqs;
  14685. + struct dpa_buffer_layout_s *buf_layout = NULL;
  14686. + struct mac_device *mac_dev;
  14687. + struct task_struct *kth;
  14688. +
  14689. + dev = &_of_dev->dev;
  14690. +
  14691. + dpa_node = dev->of_node;
  14692. +
  14693. + if (!of_device_is_available(dpa_node))
  14694. + return -ENODEV;
  14695. +
  14696. + /* Get the buffer pools assigned to this interface */
  14697. + dpa_bp = dpa_bp_probe(_of_dev, &count);
  14698. + if (IS_ERR(dpa_bp))
  14699. + return PTR_ERR(dpa_bp);
  14700. +
  14701. + for (i = 0; i < count; i++)
  14702. + dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
  14703. +
  14704. + /* Allocate this early, so we can store relevant information in
  14705. + * the private area (needed by 1588 code in dpa_mac_probe)
  14706. + */
  14707. + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
  14708. + if (!net_dev) {
  14709. + dev_err(dev, "alloc_etherdev_mq() failed\n");
  14710. + return -ENOMEM;
  14711. + }
  14712. +
  14713. + /* Do this here, so we can be verbose early */
  14714. + SET_NETDEV_DEV(net_dev, dev);
  14715. + dev_set_drvdata(dev, net_dev);
  14716. +
  14717. + priv = netdev_priv(net_dev);
  14718. + priv->net_dev = net_dev;
  14719. + strcpy(priv->if_type, "shared");
  14720. +
  14721. + priv->msg_enable = netif_msg_init(advanced_debug, -1);
  14722. +
  14723. + mac_dev = dpa_mac_probe(_of_dev);
  14724. + if (IS_ERR(mac_dev) || !mac_dev) {
  14725. + err = PTR_ERR(mac_dev);
  14726. + goto mac_probe_failed;
  14727. + }
  14728. +
  14729. + /* We have physical ports, so we need to establish
  14730. + * the buffer layout.
  14731. + */
  14732. + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
  14733. + GFP_KERNEL);
  14734. + if (!buf_layout) {
  14735. + dev_err(dev, "devm_kzalloc() failed\n");
  14736. + goto alloc_failed;
  14737. + }
  14738. + dpa_set_buffers_layout(mac_dev, buf_layout);
  14739. +
  14740. + INIT_LIST_HEAD(&priv->dpa_fq_list);
  14741. +
  14742. + memset(&port_fqs, 0, sizeof(port_fqs));
  14743. +
  14744. + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs,
  14745. + false, RX);
  14746. + if (!err)
  14747. + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
  14748. + &port_fqs, false, TX);
  14749. + if (err < 0)
  14750. + goto fq_probe_failed;
  14751. +
  14752. + /* bp init */
  14753. + priv->bp_count = count;
  14754. + err = dpa_bp_create(net_dev, dpa_bp, count);
  14755. + if (err < 0)
  14756. + goto bp_create_failed;
  14757. +
  14758. + priv->mac_dev = mac_dev;
  14759. +
  14760. + channel = dpa_get_channel();
  14761. +
  14762. + if (channel < 0) {
  14763. + err = channel;
  14764. + goto get_channel_failed;
  14765. + }
  14766. +
  14767. + priv->channel = (uint16_t)channel;
  14768. +
  14769. + /* Start a thread that will walk the cpus with affine portals
  14770. + * and add this pool channel to each's dequeue mask.
  14771. + */
  14772. + kth = kthread_run(dpaa_eth_add_channel,
  14773. + (void *)(unsigned long)priv->channel,
  14774. + "dpaa_%p:%d", net_dev, priv->channel);
  14775. + if (!kth) {
  14776. + err = -ENOMEM;
  14777. + goto add_channel_failed;
  14778. + }
  14779. +
  14780. + dpa_fq_setup(priv, &shared_fq_cbs, priv->mac_dev->port_dev[TX]);
  14781. +
  14782. + /* Create a congestion group for this netdev, with
  14783. + * dynamically-allocated CGR ID.
  14784. + * Must be executed after probing the MAC, but before
  14785. + * assigning the egress FQs to the CGRs.
  14786. + */
  14787. + err = dpaa_eth_cgr_init(priv);
  14788. + if (err < 0) {
  14789. + dev_err(dev, "Error initializing CGR\n");
  14790. + goto cgr_init_failed;
  14791. + }
  14792. +
  14793. + /* Add the FQs to the interface, and make them active */
  14794. + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
  14795. + err = dpa_fq_init(dpa_fq, false);
  14796. + if (err < 0)
  14797. + goto fq_alloc_failed;
  14798. + }
  14799. +
  14800. + priv->buf_layout = buf_layout;
  14801. + priv->tx_headroom =
  14802. + dpa_get_headroom(&priv->buf_layout[TX]);
  14803. +
  14804. + /* All real interfaces need their ports initialized */
  14805. + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
  14806. + buf_layout, dev);
  14807. +
  14808. + /* Now we need to initialize either a private or shared interface */
  14809. + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
  14810. +
  14811. + if (priv->percpu_priv == NULL) {
  14812. + dev_err(dev, "devm_alloc_percpu() failed\n");
  14813. + err = -ENOMEM;
  14814. + goto alloc_percpu_failed;
  14815. + }
  14816. + for_each_possible_cpu(i) {
  14817. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  14818. + memset(percpu_priv, 0, sizeof(*percpu_priv));
  14819. + }
  14820. +
  14821. + err = dpa_shared_netdev_init(dpa_node, net_dev);
  14822. +
  14823. + if (err < 0)
  14824. + goto netdev_init_failed;
  14825. +
  14826. + dpaa_eth_sysfs_init(&net_dev->dev);
  14827. +
  14828. + pr_info("fsl_dpa_shared: Probed shared interface %s\n",
  14829. + net_dev->name);
  14830. +
  14831. + return 0;
  14832. +
  14833. +netdev_init_failed:
  14834. +alloc_percpu_failed:
  14835. +fq_alloc_failed:
  14836. + if (net_dev) {
  14837. + dpa_fq_free(dev, &priv->dpa_fq_list);
  14838. + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
  14839. + qman_delete_cgr(&priv->cgr_data.cgr);
  14840. + }
  14841. +cgr_init_failed:
  14842. +add_channel_failed:
  14843. +get_channel_failed:
  14844. + if (net_dev)
  14845. + dpa_bp_free(priv);
  14846. +bp_create_failed:
  14847. +fq_probe_failed:
  14848. + devm_kfree(dev, buf_layout);
  14849. +alloc_failed:
  14850. +mac_probe_failed:
  14851. + dev_set_drvdata(dev, NULL);
  14852. + if (net_dev)
  14853. + free_netdev(net_dev);
  14854. +
  14855. + return err;
  14856. +}
  14857. +
  14858. +static const struct of_device_id dpa_shared_match[] = {
  14859. + {
  14860. + .compatible = "fsl,dpa-ethernet-shared"
  14861. + },
  14862. + {}
  14863. +};
  14864. +MODULE_DEVICE_TABLE(of, dpa_shared_match);
  14865. +
  14866. +static struct platform_driver dpa_shared_driver = {
  14867. + .driver = {
  14868. + .name = KBUILD_MODNAME "-shared",
  14869. + .of_match_table = dpa_shared_match,
  14870. + .owner = THIS_MODULE,
  14871. + .pm = SHARED_PM_OPS,
  14872. + },
  14873. + .probe = dpaa_eth_shared_probe,
  14874. + .remove = dpa_remove
  14875. +};
  14876. +
  14877. +static int __init __cold dpa_shared_load(void)
  14878. +{
  14879. + int _errno;
  14880. +
  14881. + pr_info(DPA_DESCRIPTION "\n");
  14882. +
  14883. + /* Initialize dpaa_eth mirror values */
  14884. + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
  14885. + dpa_max_frm = fm_get_max_frm();
  14886. +
  14887. + _errno = platform_driver_register(&dpa_shared_driver);
  14888. + if (unlikely(_errno < 0)) {
  14889. + pr_err(KBUILD_MODNAME
  14890. + ": %s:%hu:%s(): platform_driver_register() = %d\n",
  14891. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  14892. + }
  14893. +
  14894. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  14895. + KBUILD_BASENAME".c", __func__);
  14896. +
  14897. + return _errno;
  14898. +}
  14899. +module_init(dpa_shared_load);
  14900. +
  14901. +static void __exit __cold dpa_shared_unload(void)
  14902. +{
  14903. + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
  14904. + KBUILD_BASENAME".c", __func__);
  14905. +
  14906. + platform_driver_unregister(&dpa_shared_driver);
  14907. +}
  14908. +module_exit(dpa_shared_unload);
  14909. --- /dev/null
  14910. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
  14911. @@ -0,0 +1,278 @@
  14912. +/* Copyright 2008-2012 Freescale Semiconductor Inc.
  14913. + *
  14914. + * Redistribution and use in source and binary forms, with or without
  14915. + * modification, are permitted provided that the following conditions are met:
  14916. + * * Redistributions of source code must retain the above copyright
  14917. + * notice, this list of conditions and the following disclaimer.
  14918. + * * Redistributions in binary form must reproduce the above copyright
  14919. + * notice, this list of conditions and the following disclaimer in the
  14920. + * documentation and/or other materials provided with the distribution.
  14921. + * * Neither the name of Freescale Semiconductor nor the
  14922. + * names of its contributors may be used to endorse or promote products
  14923. + * derived from this software without specific prior written permission.
  14924. + *
  14925. + *
  14926. + * ALTERNATIVELY, this software may be distributed under the terms of the
  14927. + * GNU General Public License ("GPL") as published by the Free Software
  14928. + * Foundation, either version 2 of that License or (at your option) any
  14929. + * later version.
  14930. + *
  14931. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  14932. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  14933. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  14934. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  14935. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  14936. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  14937. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  14938. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  14939. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  14940. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  14941. + */
  14942. +
  14943. +#include <linux/init.h>
  14944. +#include <linux/module.h>
  14945. +#include <linux/kthread.h>
  14946. +#include <linux/io.h>
  14947. +#include <linux/of_net.h>
  14948. +#include "dpaa_eth.h"
  14949. +#include "mac.h" /* struct mac_device */
  14950. +#ifdef CONFIG_FSL_DPAA_1588
  14951. +#include "dpaa_1588.h"
  14952. +#endif
  14953. +
  14954. +static ssize_t dpaa_eth_show_addr(struct device *dev,
  14955. + struct device_attribute *attr, char *buf)
  14956. +{
  14957. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  14958. + struct mac_device *mac_dev = priv->mac_dev;
  14959. +
  14960. + if (mac_dev)
  14961. + return sprintf(buf, "%llx",
  14962. + (unsigned long long)mac_dev->res->start);
  14963. + else
  14964. + return sprintf(buf, "none");
  14965. +}
  14966. +
  14967. +static ssize_t dpaa_eth_show_type(struct device *dev,
  14968. + struct device_attribute *attr, char *buf)
  14969. +{
  14970. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  14971. + ssize_t res = 0;
  14972. +
  14973. + if (priv)
  14974. + res = sprintf(buf, "%s", priv->if_type);
  14975. +
  14976. + return res;
  14977. +}
  14978. +
  14979. +static ssize_t dpaa_eth_show_fqids(struct device *dev,
  14980. + struct device_attribute *attr, char *buf)
  14981. +{
  14982. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  14983. + ssize_t bytes = 0;
  14984. + int i = 0;
  14985. + char *str;
  14986. + struct dpa_fq *fq;
  14987. + struct dpa_fq *tmp;
  14988. + struct dpa_fq *prev = NULL;
  14989. + u32 first_fqid = 0;
  14990. + u32 last_fqid = 0;
  14991. + char *prevstr = NULL;
  14992. +
  14993. + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
  14994. + switch (fq->fq_type) {
  14995. + case FQ_TYPE_RX_DEFAULT:
  14996. + str = "Rx default";
  14997. + break;
  14998. + case FQ_TYPE_RX_ERROR:
  14999. + str = "Rx error";
  15000. + break;
  15001. + case FQ_TYPE_RX_PCD:
  15002. + str = "Rx PCD";
  15003. + break;
  15004. + case FQ_TYPE_TX_CONFIRM:
  15005. + str = "Tx default confirmation";
  15006. + break;
  15007. + case FQ_TYPE_TX_CONF_MQ:
  15008. + str = "Tx confirmation (mq)";
  15009. + break;
  15010. + case FQ_TYPE_TX_ERROR:
  15011. + str = "Tx error";
  15012. + break;
  15013. + case FQ_TYPE_TX:
  15014. + str = "Tx";
  15015. + break;
  15016. + case FQ_TYPE_RX_PCD_HI_PRIO:
  15017. + str ="Rx PCD High Priority";
  15018. + break;
  15019. + default:
  15020. + str = "Unknown";
  15021. + }
  15022. +
  15023. + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
  15024. + str != prevstr)) {
  15025. + if (last_fqid == first_fqid)
  15026. + bytes += sprintf(buf + bytes,
  15027. + "%s: %d\n", prevstr, prev->fqid);
  15028. + else
  15029. + bytes += sprintf(buf + bytes,
  15030. + "%s: %d - %d\n", prevstr,
  15031. + first_fqid, last_fqid);
  15032. + }
  15033. +
  15034. + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
  15035. + last_fqid = fq->fqid;
  15036. + else
  15037. + first_fqid = last_fqid = fq->fqid;
  15038. +
  15039. + prev = fq;
  15040. + prevstr = str;
  15041. + i++;
  15042. + }
  15043. +
  15044. + if (prev) {
  15045. + if (last_fqid == first_fqid)
  15046. + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
  15047. + prev->fqid);
  15048. + else
  15049. + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
  15050. + first_fqid, last_fqid);
  15051. + }
  15052. +
  15053. + return bytes;
  15054. +}
  15055. +
  15056. +static ssize_t dpaa_eth_show_bpids(struct device *dev,
  15057. + struct device_attribute *attr, char *buf)
  15058. +{
  15059. + ssize_t bytes = 0;
  15060. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  15061. + struct dpa_bp *dpa_bp = priv->dpa_bp;
  15062. + int i = 0;
  15063. +
  15064. + for (i = 0; i < priv->bp_count; i++)
  15065. + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
  15066. + dpa_bp[i].bpid);
  15067. +
  15068. + return bytes;
  15069. +}
  15070. +
  15071. +static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
  15072. + struct device_attribute *attr, char *buf)
  15073. +{
  15074. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  15075. + struct mac_device *mac_dev = priv->mac_dev;
  15076. + int n = 0;
  15077. +
  15078. + if (mac_dev)
  15079. + n = fm_mac_dump_regs(mac_dev, buf, n);
  15080. + else
  15081. + return sprintf(buf, "no mac registers\n");
  15082. +
  15083. + return n;
  15084. +}
  15085. +
  15086. +static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
  15087. + struct device_attribute *attr, char *buf)
  15088. +{
  15089. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  15090. + struct mac_device *mac_dev = priv->mac_dev;
  15091. + int n = 0;
  15092. +
  15093. + if (mac_dev)
  15094. + n = fm_mac_dump_rx_stats(mac_dev, buf, n);
  15095. + else
  15096. + return sprintf(buf, "no mac rx stats\n");
  15097. +
  15098. + return n;
  15099. +}
  15100. +
  15101. +static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
  15102. + struct device_attribute *attr, char *buf)
  15103. +{
  15104. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  15105. + struct mac_device *mac_dev = priv->mac_dev;
  15106. + int n = 0;
  15107. +
  15108. + if (mac_dev)
  15109. + n = fm_mac_dump_tx_stats(mac_dev, buf, n);
  15110. + else
  15111. + return sprintf(buf, "no mac tx stats\n");
  15112. +
  15113. + return n;
  15114. +}
  15115. +
  15116. +#ifdef CONFIG_FSL_DPAA_1588
  15117. +static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
  15118. + struct device_attribute *attr, char *buf)
  15119. +{
  15120. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  15121. +
  15122. + if (priv->tsu && priv->tsu->valid)
  15123. + return sprintf(buf, "1\n");
  15124. + else
  15125. + return sprintf(buf, "0\n");
  15126. +}
  15127. +
  15128. +static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
  15129. + struct device_attribute *attr,
  15130. + const char *buf, size_t count)
  15131. +{
  15132. + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
  15133. + unsigned int num;
  15134. + unsigned long flags;
  15135. +
  15136. + if (kstrtouint(buf, 0, &num) < 0)
  15137. + return -EINVAL;
  15138. +
  15139. + local_irq_save(flags);
  15140. +
  15141. + if (num) {
  15142. + if (priv->tsu)
  15143. + priv->tsu->valid = TRUE;
  15144. + } else {
  15145. + if (priv->tsu)
  15146. + priv->tsu->valid = FALSE;
  15147. + }
  15148. +
  15149. + local_irq_restore(flags);
  15150. +
  15151. + return count;
  15152. +}
  15153. +#endif
  15154. +
  15155. +static struct device_attribute dpaa_eth_attrs[] = {
  15156. + __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
  15157. + __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
  15158. + __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
  15159. + __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
  15160. + __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
  15161. + __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
  15162. + __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
  15163. +#ifdef CONFIG_FSL_DPAA_1588
  15164. + __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
  15165. + dpaa_eth_set_ptp_1588),
  15166. +#endif
  15167. +};
  15168. +
  15169. +void dpaa_eth_sysfs_init(struct device *dev)
  15170. +{
  15171. + int i;
  15172. +
  15173. + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
  15174. + if (device_create_file(dev, &dpaa_eth_attrs[i])) {
  15175. + dev_err(dev, "Error creating sysfs file\n");
  15176. + while (i > 0)
  15177. + device_remove_file(dev, &dpaa_eth_attrs[--i]);
  15178. + return;
  15179. + }
  15180. +}
  15181. +EXPORT_SYMBOL(dpaa_eth_sysfs_init);
  15182. +
  15183. +void dpaa_eth_sysfs_remove(struct device *dev)
  15184. +{
  15185. + int i;
  15186. +
  15187. + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
  15188. + device_remove_file(dev, &dpaa_eth_attrs[i]);
  15189. +}
  15190. --- /dev/null
  15191. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
  15192. @@ -0,0 +1,144 @@
  15193. +/* Copyright 2013 Freescale Semiconductor Inc.
  15194. + *
  15195. + * Redistribution and use in source and binary forms, with or without
  15196. + * modification, are permitted provided that the following conditions are met:
  15197. + * * Redistributions of source code must retain the above copyright
  15198. + * notice, this list of conditions and the following disclaimer.
  15199. + * * Redistributions in binary form must reproduce the above copyright
  15200. + * notice, this list of conditions and the following disclaimer in the
  15201. + * documentation and/or other materials provided with the distribution.
  15202. + * * Neither the name of Freescale Semiconductor nor the
  15203. + * names of its contributors may be used to endorse or promote products
  15204. + * derived from this software without specific prior written permission.
  15205. + *
  15206. + *
  15207. + * ALTERNATIVELY, this software may be distributed under the terms of the
  15208. + * GNU General Public License ("GPL") as published by the Free Software
  15209. + * Foundation, either version 2 of that License or (at your option) any
  15210. + * later version.
  15211. + *
  15212. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  15213. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  15214. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  15215. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  15216. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  15217. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  15218. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  15219. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  15220. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  15221. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  15222. + */
  15223. +
  15224. +#undef TRACE_SYSTEM
  15225. +#define TRACE_SYSTEM dpaa_eth
  15226. +
  15227. +#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
  15228. +#define _DPAA_ETH_TRACE_H
  15229. +
  15230. +#include <linux/skbuff.h>
  15231. +#include <linux/netdevice.h>
  15232. +#include "dpaa_eth.h"
  15233. +#include <linux/tracepoint.h>
  15234. +
  15235. +#define fd_format_name(format) { qm_fd_##format, #format }
  15236. +#define fd_format_list \
  15237. + fd_format_name(contig), \
  15238. + fd_format_name(sg)
  15239. +#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
  15240. + " status=0x%08x"
  15241. +
  15242. +/* This is used to declare a class of events.
  15243. + * individual events of this type will be defined below.
  15244. + */
  15245. +
  15246. +/* Store details about a frame descriptor and the FQ on which it was
  15247. + * transmitted/received.
  15248. + */
  15249. +DECLARE_EVENT_CLASS(dpaa_eth_fd,
  15250. + /* Trace function prototype */
  15251. + TP_PROTO(struct net_device *netdev,
  15252. + struct qman_fq *fq,
  15253. + const struct qm_fd *fd),
  15254. +
  15255. + /* Repeat argument list here */
  15256. + TP_ARGS(netdev, fq, fd),
  15257. +
  15258. + /* A structure containing the relevant information we want to record.
  15259. + * Declare name and type for each normal element, name, type and size
  15260. + * for arrays. Use __string for variable length strings.
  15261. + */
  15262. + TP_STRUCT__entry(
  15263. + __field(u32, fqid)
  15264. + __field(u64, fd_addr)
  15265. + __field(u8, fd_format)
  15266. + __field(u16, fd_offset)
  15267. + __field(u32, fd_length)
  15268. + __field(u32, fd_status)
  15269. + __string(name, netdev->name)
  15270. + ),
  15271. +
  15272. + /* The function that assigns values to the above declared fields */
  15273. + TP_fast_assign(
  15274. + __entry->fqid = fq->fqid;
  15275. + __entry->fd_addr = qm_fd_addr_get64(fd);
  15276. + __entry->fd_format = fd->format;
  15277. + __entry->fd_offset = dpa_fd_offset(fd);
  15278. + __entry->fd_length = dpa_fd_length(fd);
  15279. + __entry->fd_status = fd->status;
  15280. + __assign_str(name, netdev->name);
  15281. + ),
  15282. +
  15283. + /* This is what gets printed when the trace event is triggered */
  15284. + /* TODO: print the status using __print_flags() */
  15285. + TP_printk(TR_FMT,
  15286. + __get_str(name), __entry->fqid, __entry->fd_addr,
  15287. + __print_symbolic(__entry->fd_format, fd_format_list),
  15288. + __entry->fd_offset, __entry->fd_length, __entry->fd_status)
  15289. +);
  15290. +
  15291. +/* Now declare events of the above type. Format is:
  15292. + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
  15293. + */
  15294. +
  15295. +/* Tx (egress) fd */
  15296. +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
  15297. +
  15298. + TP_PROTO(struct net_device *netdev,
  15299. + struct qman_fq *fq,
  15300. + const struct qm_fd *fd),
  15301. +
  15302. + TP_ARGS(netdev, fq, fd)
  15303. +);
  15304. +
  15305. +/* Rx fd */
  15306. +DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
  15307. +
  15308. + TP_PROTO(struct net_device *netdev,
  15309. + struct qman_fq *fq,
  15310. + const struct qm_fd *fd),
  15311. +
  15312. + TP_ARGS(netdev, fq, fd)
  15313. +);
  15314. +
  15315. +/* Tx confirmation fd */
  15316. +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
  15317. +
  15318. + TP_PROTO(struct net_device *netdev,
  15319. + struct qman_fq *fq,
  15320. + const struct qm_fd *fd),
  15321. +
  15322. + TP_ARGS(netdev, fq, fd)
  15323. +);
  15324. +
  15325. +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
  15326. + * The syntax is the same as for DECLARE_EVENT_CLASS().
  15327. + */
  15328. +
  15329. +#endif /* _DPAA_ETH_TRACE_H */
  15330. +
  15331. +/* This must be outside ifdef _DPAA_ETH_TRACE_H */
  15332. +#undef TRACE_INCLUDE_PATH
  15333. +#define TRACE_INCLUDE_PATH .
  15334. +#undef TRACE_INCLUDE_FILE
  15335. +#define TRACE_INCLUDE_FILE dpaa_eth_trace
  15336. +#include <trace/define_trace.h>
  15337. --- /dev/null
  15338. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
  15339. @@ -0,0 +1,544 @@
  15340. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  15341. + *
  15342. + * Redistribution and use in source and binary forms, with or without
  15343. + * modification, are permitted provided that the following conditions are met:
  15344. + * * Redistributions of source code must retain the above copyright
  15345. + * notice, this list of conditions and the following disclaimer.
  15346. + * * Redistributions in binary form must reproduce the above copyright
  15347. + * notice, this list of conditions and the following disclaimer in the
  15348. + * documentation and/or other materials provided with the distribution.
  15349. + * * Neither the name of Freescale Semiconductor nor the
  15350. + * names of its contributors may be used to endorse or promote products
  15351. + * derived from this software without specific prior written permission.
  15352. + *
  15353. + *
  15354. + * ALTERNATIVELY, this software may be distributed under the terms of the
  15355. + * GNU General Public License ("GPL") as published by the Free Software
  15356. + * Foundation, either version 2 of that License or (at your option) any
  15357. + * later version.
  15358. + *
  15359. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  15360. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  15361. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  15362. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  15363. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  15364. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  15365. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  15366. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  15367. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  15368. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  15369. + */
  15370. +
  15371. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  15372. +#define pr_fmt(fmt) \
  15373. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  15374. + KBUILD_BASENAME".c", __LINE__, __func__
  15375. +#else
  15376. +#define pr_fmt(fmt) \
  15377. + KBUILD_MODNAME ": " fmt
  15378. +#endif
  15379. +
  15380. +#include <linux/string.h>
  15381. +
  15382. +#include "dpaa_eth.h"
  15383. +#include "mac.h" /* struct mac_device */
  15384. +#include "dpaa_eth_common.h"
  15385. +
  15386. +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
  15387. + "interrupts",
  15388. + "rx packets",
  15389. + "tx packets",
  15390. + "tx recycled",
  15391. + "tx confirm",
  15392. + "tx S/G",
  15393. + "rx S/G",
  15394. + "tx error",
  15395. + "rx error",
  15396. + "bp count"
  15397. +};
  15398. +
  15399. +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
  15400. + /* dpa rx errors */
  15401. + "rx dma error",
  15402. + "rx frame physical error",
  15403. + "rx frame size error",
  15404. + "rx header error",
  15405. + "rx csum error",
  15406. +
  15407. + /* demultiplexing errors */
  15408. + "qman cg_tdrop",
  15409. + "qman wred",
  15410. + "qman error cond",
  15411. + "qman early window",
  15412. + "qman late window",
  15413. + "qman fq tdrop",
  15414. + "qman fq retired",
  15415. + "qman orp disabled",
  15416. +
  15417. + /* congestion related stats */
  15418. + "congestion time (ms)",
  15419. + "entered congestion",
  15420. + "congested (0/1)"
  15421. +};
  15422. +
  15423. +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
  15424. +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
  15425. +
  15426. +static int __cold dpa_get_settings(struct net_device *net_dev,
  15427. + struct ethtool_cmd *et_cmd)
  15428. +{
  15429. + int _errno;
  15430. + struct dpa_priv_s *priv;
  15431. +
  15432. + priv = netdev_priv(net_dev);
  15433. +
  15434. + if (priv->mac_dev == NULL) {
  15435. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15436. + return -ENODEV;
  15437. + }
  15438. + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
  15439. + netdev_dbg(net_dev, "phy device not initialized\n");
  15440. + return 0;
  15441. + }
  15442. +
  15443. + _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
  15444. + if (unlikely(_errno < 0))
  15445. + netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
  15446. +
  15447. + return _errno;
  15448. +}
  15449. +
  15450. +static int __cold dpa_set_settings(struct net_device *net_dev,
  15451. + struct ethtool_cmd *et_cmd)
  15452. +{
  15453. + int _errno;
  15454. + struct dpa_priv_s *priv;
  15455. +
  15456. + priv = netdev_priv(net_dev);
  15457. +
  15458. + if (priv->mac_dev == NULL) {
  15459. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15460. + return -ENODEV;
  15461. + }
  15462. + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
  15463. + netdev_err(net_dev, "phy device not initialized\n");
  15464. + return -ENODEV;
  15465. + }
  15466. +
  15467. + _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
  15468. + if (unlikely(_errno < 0))
  15469. + netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
  15470. +
  15471. + return _errno;
  15472. +}
  15473. +
  15474. +static void __cold dpa_get_drvinfo(struct net_device *net_dev,
  15475. + struct ethtool_drvinfo *drvinfo)
  15476. +{
  15477. + int _errno;
  15478. +
  15479. + strncpy(drvinfo->driver, KBUILD_MODNAME,
  15480. + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
  15481. + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  15482. + "%X", 0);
  15483. +
  15484. + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
  15485. + /* Truncated output */
  15486. + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
  15487. + } else if (unlikely(_errno < 0)) {
  15488. + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
  15489. + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
  15490. + }
  15491. + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
  15492. + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
  15493. +}
  15494. +
  15495. +static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
  15496. +{
  15497. + return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
  15498. +}
  15499. +
  15500. +static void __cold dpa_set_msglevel(struct net_device *net_dev,
  15501. + uint32_t msg_enable)
  15502. +{
  15503. + ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
  15504. +}
  15505. +
  15506. +static int __cold dpa_nway_reset(struct net_device *net_dev)
  15507. +{
  15508. + int _errno;
  15509. + struct dpa_priv_s *priv;
  15510. +
  15511. + priv = netdev_priv(net_dev);
  15512. +
  15513. + if (priv->mac_dev == NULL) {
  15514. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15515. + return -ENODEV;
  15516. + }
  15517. + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
  15518. + netdev_err(net_dev, "phy device not initialized\n");
  15519. + return -ENODEV;
  15520. + }
  15521. +
  15522. + _errno = 0;
  15523. + if (priv->mac_dev->phy_dev->autoneg) {
  15524. + _errno = phy_start_aneg(priv->mac_dev->phy_dev);
  15525. + if (unlikely(_errno < 0))
  15526. + netdev_err(net_dev, "phy_start_aneg() = %d\n",
  15527. + _errno);
  15528. + }
  15529. +
  15530. + return _errno;
  15531. +}
  15532. +
  15533. +static void __cold dpa_get_pauseparam(struct net_device *net_dev,
  15534. + struct ethtool_pauseparam *epause)
  15535. +{
  15536. + struct dpa_priv_s *priv;
  15537. + struct mac_device *mac_dev;
  15538. + struct phy_device *phy_dev;
  15539. +
  15540. + priv = netdev_priv(net_dev);
  15541. + mac_dev = priv->mac_dev;
  15542. +
  15543. + if (mac_dev == NULL) {
  15544. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15545. + return;
  15546. + }
  15547. +
  15548. + phy_dev = mac_dev->phy_dev;
  15549. + if (unlikely(phy_dev == NULL)) {
  15550. + netdev_err(net_dev, "phy device not initialized\n");
  15551. + return;
  15552. + }
  15553. +
  15554. + epause->autoneg = mac_dev->autoneg_pause;
  15555. + epause->rx_pause = mac_dev->rx_pause_active;
  15556. + epause->tx_pause = mac_dev->tx_pause_active;
  15557. +}
  15558. +
  15559. +static int __cold dpa_set_pauseparam(struct net_device *net_dev,
  15560. + struct ethtool_pauseparam *epause)
  15561. +{
  15562. + struct dpa_priv_s *priv;
  15563. + struct mac_device *mac_dev;
  15564. + struct phy_device *phy_dev;
  15565. + int _errno;
  15566. + u32 newadv, oldadv;
  15567. + bool rx_pause, tx_pause;
  15568. +
  15569. + priv = netdev_priv(net_dev);
  15570. + mac_dev = priv->mac_dev;
  15571. +
  15572. + if (mac_dev == NULL) {
  15573. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15574. + return -ENODEV;
  15575. + }
  15576. +
  15577. + phy_dev = mac_dev->phy_dev;
  15578. + if (unlikely(phy_dev == NULL)) {
  15579. + netdev_err(net_dev, "phy device not initialized\n");
  15580. + return -ENODEV;
  15581. + }
  15582. +
  15583. + if (!(phy_dev->supported & SUPPORTED_Pause) ||
  15584. + (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
  15585. + (epause->rx_pause != epause->tx_pause)))
  15586. + return -EINVAL;
  15587. +
  15588. + /* The MAC should know how to handle PAUSE frame autonegotiation before
  15589. + * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
  15590. + * settings.
  15591. + */
  15592. + mac_dev->autoneg_pause = !!epause->autoneg;
  15593. + mac_dev->rx_pause_req = !!epause->rx_pause;
  15594. + mac_dev->tx_pause_req = !!epause->tx_pause;
  15595. +
  15596. + /* Determine the sym/asym advertised PAUSE capabilities from the desired
  15597. + * rx/tx pause settings.
  15598. + */
  15599. + newadv = 0;
  15600. + if (epause->rx_pause)
  15601. + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
  15602. + if (epause->tx_pause)
  15603. + newadv |= ADVERTISED_Asym_Pause;
  15604. +
  15605. + oldadv = phy_dev->advertising &
  15606. + (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
  15607. +
  15608. + /* If there are differences between the old and the new advertised
  15609. + * values, restart PHY autonegotiation and advertise the new values.
  15610. + */
  15611. + if (oldadv != newadv) {
  15612. + phy_dev->advertising &= ~(ADVERTISED_Pause
  15613. + | ADVERTISED_Asym_Pause);
  15614. + phy_dev->advertising |= newadv;
  15615. + if (phy_dev->autoneg) {
  15616. + _errno = phy_start_aneg(phy_dev);
  15617. + if (unlikely(_errno < 0))
  15618. + netdev_err(net_dev, "phy_start_aneg() = %d\n",
  15619. + _errno);
  15620. + }
  15621. + }
  15622. +
  15623. + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
  15624. + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
  15625. + if (unlikely(_errno < 0))
  15626. + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
  15627. +
  15628. + return _errno;
  15629. +}
  15630. +
  15631. +#ifdef CONFIG_PM
  15632. +static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
  15633. +{
  15634. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  15635. +
  15636. + wol->supported = 0;
  15637. + wol->wolopts = 0;
  15638. +
  15639. + if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
  15640. + return;
  15641. +
  15642. + if (priv->wol & DPAA_WOL_MAGIC) {
  15643. + wol->supported = WAKE_MAGIC;
  15644. + wol->wolopts = WAKE_MAGIC;
  15645. + }
  15646. +}
  15647. +
  15648. +static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
  15649. +{
  15650. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  15651. +
  15652. + if (priv->mac_dev == NULL) {
  15653. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15654. + return -ENODEV;
  15655. + }
  15656. +
  15657. + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
  15658. + netdev_dbg(net_dev, "phy device not initialized\n");
  15659. + return -ENODEV;
  15660. + }
  15661. +
  15662. + if (!device_can_wakeup(net_dev->dev.parent) ||
  15663. + (wol->wolopts & ~WAKE_MAGIC))
  15664. + return -EOPNOTSUPP;
  15665. +
  15666. + priv->wol = 0;
  15667. +
  15668. + if (wol->wolopts & WAKE_MAGIC) {
  15669. + priv->wol = DPAA_WOL_MAGIC;
  15670. + device_set_wakeup_enable(net_dev->dev.parent, 1);
  15671. + } else {
  15672. + device_set_wakeup_enable(net_dev->dev.parent, 0);
  15673. + }
  15674. +
  15675. + return 0;
  15676. +}
  15677. +#endif
  15678. +
  15679. +static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
  15680. +{
  15681. + struct dpa_priv_s *priv;
  15682. +
  15683. + priv = netdev_priv(net_dev);
  15684. + if (priv->mac_dev == NULL) {
  15685. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15686. + return -ENODEV;
  15687. + }
  15688. +
  15689. + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
  15690. + netdev_err(net_dev, "phy device not initialized\n");
  15691. + return -ENODEV;
  15692. + }
  15693. +
  15694. + return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
  15695. +}
  15696. +
  15697. +static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
  15698. +{
  15699. + struct dpa_priv_s *priv;
  15700. +
  15701. + priv = netdev_priv(net_dev);
  15702. + if (priv->mac_dev == NULL) {
  15703. + netdev_info(net_dev, "This is a MAC-less interface\n");
  15704. + return -ENODEV;
  15705. + }
  15706. +
  15707. + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
  15708. + netdev_err(net_dev, "phy device not initialized\n");
  15709. + return -ENODEV;
  15710. + }
  15711. +
  15712. + return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
  15713. +}
  15714. +
  15715. +static int dpa_get_sset_count(struct net_device *net_dev, int type)
  15716. +{
  15717. + unsigned int total_stats, num_stats;
  15718. +
  15719. + num_stats = num_online_cpus() + 1;
  15720. + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
  15721. +
  15722. + switch (type) {
  15723. + case ETH_SS_STATS:
  15724. + return total_stats;
  15725. + default:
  15726. + return -EOPNOTSUPP;
  15727. + }
  15728. +}
  15729. +
  15730. +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
  15731. + int crr_cpu, u64 bp_count, u64 *data)
  15732. +{
  15733. + int num_stat_values = num_cpus + 1;
  15734. + int crr_stat = 0;
  15735. +
  15736. + /* update current CPU's stats and also add them to the total values */
  15737. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
  15738. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
  15739. +
  15740. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
  15741. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
  15742. +
  15743. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
  15744. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
  15745. +
  15746. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
  15747. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
  15748. +
  15749. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
  15750. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
  15751. +
  15752. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
  15753. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
  15754. +
  15755. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
  15756. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
  15757. +
  15758. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
  15759. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
  15760. +
  15761. + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
  15762. + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
  15763. +
  15764. + data[crr_stat * num_stat_values + crr_cpu] = bp_count;
  15765. + data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
  15766. +}
  15767. +
  15768. +static void dpa_get_ethtool_stats(struct net_device *net_dev,
  15769. + struct ethtool_stats *stats, u64 *data)
  15770. +{
  15771. + u64 bp_count, cg_time, cg_num, cg_status;
  15772. + struct dpa_percpu_priv_s *percpu_priv;
  15773. + struct qm_mcr_querycgr query_cgr;
  15774. + struct dpa_rx_errors rx_errors;
  15775. + struct dpa_ern_cnt ern_cnt;
  15776. + struct dpa_priv_s *priv;
  15777. + unsigned int num_cpus, offset;
  15778. + struct dpa_bp *dpa_bp;
  15779. + int total_stats, i;
  15780. +
  15781. + total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
  15782. + priv = netdev_priv(net_dev);
  15783. + dpa_bp = priv->dpa_bp;
  15784. + num_cpus = num_online_cpus();
  15785. + bp_count = 0;
  15786. +
  15787. + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
  15788. + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
  15789. + memset(data, 0, total_stats * sizeof(u64));
  15790. +
  15791. + for_each_online_cpu(i) {
  15792. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  15793. +
  15794. + if (dpa_bp->percpu_count)
  15795. + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
  15796. +
  15797. + rx_errors.dme += percpu_priv->rx_errors.dme;
  15798. + rx_errors.fpe += percpu_priv->rx_errors.fpe;
  15799. + rx_errors.fse += percpu_priv->rx_errors.fse;
  15800. + rx_errors.phe += percpu_priv->rx_errors.phe;
  15801. + rx_errors.cse += percpu_priv->rx_errors.cse;
  15802. +
  15803. + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
  15804. + ern_cnt.wred += percpu_priv->ern_cnt.wred;
  15805. + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
  15806. + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
  15807. + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
  15808. + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
  15809. + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
  15810. + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
  15811. +
  15812. + copy_stats(percpu_priv, num_cpus, i, bp_count, data);
  15813. + }
  15814. +
  15815. + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
  15816. + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
  15817. +
  15818. + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
  15819. + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
  15820. +
  15821. + /* gather congestion related counters */
  15822. + cg_num = 0;
  15823. + cg_status = 0;
  15824. + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
  15825. + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
  15826. + cg_num = priv->cgr_data.cgr_congested_count;
  15827. + cg_status = query_cgr.cgr.cs;
  15828. +
  15829. + /* reset congestion stats (like QMan API does */
  15830. + priv->cgr_data.congested_jiffies = 0;
  15831. + priv->cgr_data.cgr_congested_count = 0;
  15832. + }
  15833. +
  15834. + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
  15835. + data[offset++] = cg_time;
  15836. + data[offset++] = cg_num;
  15837. + data[offset++] = cg_status;
  15838. +}
  15839. +
  15840. +static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
  15841. +{
  15842. + unsigned int i, j, num_cpus, size;
  15843. + char stat_string_cpu[ETH_GSTRING_LEN];
  15844. + u8 *strings;
  15845. +
  15846. + strings = data;
  15847. + num_cpus = num_online_cpus();
  15848. + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
  15849. +
  15850. + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
  15851. + for (j = 0; j < num_cpus; j++) {
  15852. + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
  15853. + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
  15854. + strings += ETH_GSTRING_LEN;
  15855. + }
  15856. + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
  15857. + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
  15858. + strings += ETH_GSTRING_LEN;
  15859. + }
  15860. + memcpy(strings, dpa_stats_global, size);
  15861. +}
  15862. +
  15863. +const struct ethtool_ops dpa_ethtool_ops = {
  15864. + .get_settings = dpa_get_settings,
  15865. + .set_settings = dpa_set_settings,
  15866. + .get_drvinfo = dpa_get_drvinfo,
  15867. + .get_msglevel = dpa_get_msglevel,
  15868. + .set_msglevel = dpa_set_msglevel,
  15869. + .nway_reset = dpa_nway_reset,
  15870. + .get_pauseparam = dpa_get_pauseparam,
  15871. + .set_pauseparam = dpa_set_pauseparam,
  15872. + .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
  15873. + .get_link = ethtool_op_get_link,
  15874. + .get_eee = dpa_get_eee,
  15875. + .set_eee = dpa_set_eee,
  15876. + .get_sset_count = dpa_get_sset_count,
  15877. + .get_ethtool_stats = dpa_get_ethtool_stats,
  15878. + .get_strings = dpa_get_strings,
  15879. +#ifdef CONFIG_PM
  15880. + .get_wol = dpa_get_wol,
  15881. + .set_wol = dpa_set_wol,
  15882. +#endif
  15883. +};
  15884. --- /dev/null
  15885. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
  15886. @@ -0,0 +1,286 @@
  15887. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  15888. + *
  15889. + * Redistribution and use in source and binary forms, with or without
  15890. + * modification, are permitted provided that the following conditions are met:
  15891. + * * Redistributions of source code must retain the above copyright
  15892. + * notice, this list of conditions and the following disclaimer.
  15893. + * * Redistributions in binary form must reproduce the above copyright
  15894. + * notice, this list of conditions and the following disclaimer in the
  15895. + * documentation and/or other materials provided with the distribution.
  15896. + * * Neither the name of Freescale Semiconductor nor the
  15897. + * names of its contributors may be used to endorse or promote products
  15898. + * derived from this software without specific prior written permission.
  15899. + *
  15900. + *
  15901. + * ALTERNATIVELY, this software may be distributed under the terms of the
  15902. + * GNU General Public License ("GPL") as published by the Free Software
  15903. + * Foundation, either version 2 of that License or (at your option) any
  15904. + * later version.
  15905. + *
  15906. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  15907. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  15908. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  15909. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  15910. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  15911. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  15912. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  15913. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  15914. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  15915. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  15916. + */
  15917. +
  15918. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  15919. +#define pr_fmt(fmt) \
  15920. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  15921. + KBUILD_BASENAME".c", __LINE__, __func__
  15922. +#else
  15923. +#define pr_fmt(fmt) \
  15924. + KBUILD_MODNAME ": " fmt
  15925. +#endif
  15926. +
  15927. +#include <linux/string.h>
  15928. +
  15929. +#include "dpaa_eth.h"
  15930. +#include "dpaa_eth_common.h"
  15931. +#include "dpaa_eth_generic.h"
  15932. +
  15933. +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
  15934. + "interrupts",
  15935. + "rx packets",
  15936. + "tx packets",
  15937. + "tx recycled",
  15938. + "tx confirm",
  15939. + "tx S/G",
  15940. + "rx S/G (N/A)",
  15941. + "tx error",
  15942. + "rx error",
  15943. + "bp count",
  15944. + "bp draining count"
  15945. +};
  15946. +
  15947. +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
  15948. + /* dpa rx errors */
  15949. + "rx dma error",
  15950. + "rx frame physical error",
  15951. + "rx frame size error",
  15952. + "rx header error",
  15953. + "rx csum error",
  15954. +
  15955. + /* demultiplexing errors */
  15956. + "qman cg_tdrop",
  15957. + "qman wred",
  15958. + "qman error cond",
  15959. + "qman early window",
  15960. + "qman late window",
  15961. + "qman fq tdrop",
  15962. + "qman fq retired",
  15963. + "qman orp disabled",
  15964. +};
  15965. +
  15966. +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
  15967. +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
  15968. +
  15969. +static int __cold dpa_generic_get_settings(struct net_device *net_dev,
  15970. + struct ethtool_cmd *et_cmd)
  15971. +{
  15972. + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
  15973. + return -ENODEV;
  15974. +}
  15975. +
  15976. +static int __cold dpa_generic_set_settings(struct net_device *net_dev,
  15977. + struct ethtool_cmd *et_cmd)
  15978. +{
  15979. + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
  15980. + return -ENODEV;
  15981. +}
  15982. +
  15983. +static void __cold dpa_generic_get_drvinfo(struct net_device *net_dev,
  15984. + struct ethtool_drvinfo *drvinfo)
  15985. +{
  15986. + int _errno;
  15987. +
  15988. + strncpy(drvinfo->driver, KBUILD_MODNAME,
  15989. + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
  15990. + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  15991. + "%X", 0);
  15992. +
  15993. + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
  15994. + /* Truncated output */
  15995. + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
  15996. + } else if (unlikely(_errno < 0)) {
  15997. + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
  15998. + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
  15999. + }
  16000. + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
  16001. + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
  16002. +}
  16003. +
  16004. +static uint32_t __cold dpa_generic_get_msglevel(struct net_device *net_dev)
  16005. +{
  16006. + return ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable;
  16007. +}
  16008. +
  16009. +static void __cold dpa_generic_set_msglevel(struct net_device *net_dev,
  16010. + uint32_t msg_enable)
  16011. +{
  16012. + ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable =
  16013. + msg_enable;
  16014. +}
  16015. +
  16016. +static int __cold dpa_generic_nway_reset(struct net_device *net_dev)
  16017. +{
  16018. + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
  16019. + return -ENODEV;
  16020. +}
  16021. +
  16022. +static int dpa_generic_get_sset_count(struct net_device *net_dev, int type)
  16023. +{
  16024. + unsigned int total_stats, num_stats;
  16025. +
  16026. + num_stats = num_online_cpus() + 1;
  16027. + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
  16028. +
  16029. + switch (type) {
  16030. + case ETH_SS_STATS:
  16031. + return total_stats;
  16032. + default:
  16033. + return -EOPNOTSUPP;
  16034. + }
  16035. +}
  16036. +
  16037. +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv,
  16038. + int num_cpus, int crr_cpu, u64 bp_count,
  16039. + u64 bp_drain_count, u64 *data)
  16040. +{
  16041. + int num_values = num_cpus + 1;
  16042. + int crr = 0;
  16043. +
  16044. + /* update current CPU's stats and also add them to the total values */
  16045. + data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
  16046. + data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
  16047. +
  16048. + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
  16049. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
  16050. +
  16051. + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
  16052. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
  16053. +
  16054. + data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
  16055. + data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
  16056. +
  16057. + data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
  16058. + data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
  16059. +
  16060. + data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
  16061. + data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
  16062. +
  16063. + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
  16064. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
  16065. +
  16066. + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
  16067. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
  16068. +
  16069. + data[crr * num_values + crr_cpu] = bp_count;
  16070. + data[crr++ * num_values + num_cpus] += bp_count;
  16071. +
  16072. + data[crr * num_values + crr_cpu] = bp_drain_count;
  16073. + data[crr++ * num_values + num_cpus] += bp_drain_count;
  16074. +}
  16075. +
  16076. +static void dpa_generic_get_ethtool_stats(struct net_device *net_dev,
  16077. + struct ethtool_stats *stats,
  16078. + u64 *data)
  16079. +{
  16080. + struct dpa_percpu_priv_s *percpu_priv;
  16081. + struct dpa_bp *dpa_bp, *drain_bp;
  16082. + struct dpa_generic_priv_s *priv;
  16083. + struct dpa_rx_errors rx_errors;
  16084. + struct dpa_ern_cnt ern_cnt;
  16085. + unsigned int num_cpus, offset;
  16086. + u64 bp_cnt, drain_cnt;
  16087. + int total_stats, i;
  16088. +
  16089. + total_stats = dpa_generic_get_sset_count(net_dev, ETH_SS_STATS);
  16090. + priv = netdev_priv(net_dev);
  16091. + drain_bp = priv->draining_tx_bp;
  16092. + dpa_bp = priv->rx_bp;
  16093. + num_cpus = num_online_cpus();
  16094. + drain_cnt = 0;
  16095. + bp_cnt = 0;
  16096. +
  16097. + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
  16098. + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
  16099. + memset(data, 0, total_stats * sizeof(u64));
  16100. +
  16101. + for_each_online_cpu(i) {
  16102. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  16103. +
  16104. + if (dpa_bp->percpu_count)
  16105. + bp_cnt = *(per_cpu_ptr(dpa_bp->percpu_count, i));
  16106. +
  16107. + if (drain_bp->percpu_count)
  16108. + drain_cnt = *(per_cpu_ptr(drain_bp->percpu_count, i));
  16109. +
  16110. + rx_errors.dme += percpu_priv->rx_errors.dme;
  16111. + rx_errors.fpe += percpu_priv->rx_errors.fpe;
  16112. + rx_errors.fse += percpu_priv->rx_errors.fse;
  16113. + rx_errors.phe += percpu_priv->rx_errors.phe;
  16114. + rx_errors.cse += percpu_priv->rx_errors.cse;
  16115. +
  16116. + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
  16117. + ern_cnt.wred += percpu_priv->ern_cnt.wred;
  16118. + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
  16119. + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
  16120. + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
  16121. + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
  16122. + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
  16123. + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
  16124. +
  16125. + copy_stats(percpu_priv, num_cpus, i, bp_cnt, drain_cnt, data);
  16126. + }
  16127. +
  16128. + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
  16129. + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
  16130. +
  16131. + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
  16132. + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
  16133. +}
  16134. +
  16135. +static void dpa_generic_get_strings(struct net_device *net_dev,
  16136. + u32 stringset, u8 *data)
  16137. +{
  16138. + unsigned int i, j, num_cpus, size;
  16139. + char string_cpu[ETH_GSTRING_LEN];
  16140. + u8 *strings;
  16141. +
  16142. + strings = data;
  16143. + num_cpus = num_online_cpus();
  16144. + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
  16145. +
  16146. + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
  16147. + for (j = 0; j < num_cpus; j++) {
  16148. + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
  16149. + dpa_stats_percpu[i], j);
  16150. + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
  16151. + strings += ETH_GSTRING_LEN;
  16152. + }
  16153. + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
  16154. + dpa_stats_percpu[i]);
  16155. + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
  16156. + strings += ETH_GSTRING_LEN;
  16157. + }
  16158. + memcpy(strings, dpa_stats_global, size);
  16159. +}
  16160. +
  16161. +const struct ethtool_ops dpa_generic_ethtool_ops = {
  16162. + .get_settings = dpa_generic_get_settings,
  16163. + .set_settings = dpa_generic_set_settings,
  16164. + .get_drvinfo = dpa_generic_get_drvinfo,
  16165. + .get_msglevel = dpa_generic_get_msglevel,
  16166. + .set_msglevel = dpa_generic_set_msglevel,
  16167. + .nway_reset = dpa_generic_nway_reset,
  16168. + .get_link = ethtool_op_get_link,
  16169. + .get_sset_count = dpa_generic_get_sset_count,
  16170. + .get_ethtool_stats = dpa_generic_get_ethtool_stats,
  16171. + .get_strings = dpa_generic_get_strings,
  16172. +};
  16173. --- /dev/null
  16174. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
  16175. @@ -0,0 +1,250 @@
  16176. +/* Copyright 2015 Freescale Semiconductor, Inc.
  16177. + *
  16178. + * Redistribution and use in source and binary forms, with or without
  16179. + * modification, are permitted provided that the following conditions are met:
  16180. + * * Redistributions of source code must retain the above copyright
  16181. + * notice, this list of conditions and the following disclaimer.
  16182. + * * Redistributions in binary form must reproduce the above copyright
  16183. + * notice, this list of conditions and the following disclaimer in the
  16184. + * documentation and/or other materials provided with the distribution.
  16185. + * * Neither the name of Freescale Semiconductor nor the
  16186. + * names of its contributors may be used to endorse or promote products
  16187. + * derived from this software without specific prior written permission.
  16188. + *
  16189. + *
  16190. + * ALTERNATIVELY, this software may be distributed under the terms of the
  16191. + * GNU General Public License ("GPL") as published by the Free Software
  16192. + * Foundation, either version 2 of that License or (at your option) any
  16193. + * later version.
  16194. + *
  16195. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  16196. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  16197. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  16198. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  16199. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  16200. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  16201. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  16202. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  16203. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  16204. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  16205. + */
  16206. +
  16207. +#include <linux/string.h>
  16208. +
  16209. +#include "dpaa_eth.h"
  16210. +#include "dpaa_eth_macsec.h"
  16211. +
  16212. +static const char dpa_macsec_stats_percpu[][ETH_GSTRING_LEN] = {
  16213. + "interrupts",
  16214. + "rx packets",
  16215. + "tx packets",
  16216. + "tx recycled",
  16217. + "tx confirm",
  16218. + "tx S/G",
  16219. + "rx S/G",
  16220. + "tx error",
  16221. + "rx error",
  16222. + "bp count",
  16223. + "tx macsec",
  16224. + "rx macsec"
  16225. +};
  16226. +
  16227. +static char dpa_macsec_stats_global[][ETH_GSTRING_LEN] = {
  16228. + /* dpa rx errors */
  16229. + "rx dma error",
  16230. + "rx frame physical error",
  16231. + "rx frame size error",
  16232. + "rx header error",
  16233. + "rx csum error",
  16234. +
  16235. + /* demultiplexing errors */
  16236. + "qman cg_tdrop",
  16237. + "qman wred",
  16238. + "qman error cond",
  16239. + "qman early window",
  16240. + "qman late window",
  16241. + "qman fq tdrop",
  16242. + "qman fq retired",
  16243. + "qman orp disabled",
  16244. +
  16245. + /* congestion related stats */
  16246. + "congestion time (ms)",
  16247. + "entered congestion",
  16248. + "congested (0/1)"
  16249. +};
  16250. +
  16251. +#define DPA_MACSEC_STATS_PERCPU_LEN ARRAY_SIZE(dpa_macsec_stats_percpu)
  16252. +#define DPA_MACSEC_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_macsec_stats_global)
  16253. +
  16254. +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
  16255. + int crr_cpu, u64 bp_count, u64 tx_macsec,
  16256. + u64 rx_macsec, u64 *data)
  16257. +{
  16258. + int num_values = num_cpus + 1;
  16259. + int crr = 0;
  16260. +
  16261. + /* update current CPU's stats and also add them to the total values */
  16262. + data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
  16263. + data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
  16264. +
  16265. + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
  16266. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
  16267. +
  16268. + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
  16269. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
  16270. +
  16271. + data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
  16272. + data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
  16273. +
  16274. + data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
  16275. + data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
  16276. +
  16277. + data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
  16278. + data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
  16279. +
  16280. + data[crr * num_values + crr_cpu] = percpu_priv->rx_sg;
  16281. + data[crr++ * num_values + num_cpus] += percpu_priv->rx_sg;
  16282. +
  16283. + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
  16284. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
  16285. +
  16286. + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
  16287. + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
  16288. +
  16289. + data[crr * num_values + crr_cpu] = bp_count;
  16290. + data[crr++ * num_values + num_cpus] += bp_count;
  16291. +
  16292. + data[crr * num_values + crr_cpu] = tx_macsec;
  16293. + data[crr++ * num_values + num_cpus] += tx_macsec;
  16294. +
  16295. + data[crr * num_values + crr_cpu] = rx_macsec;
  16296. + data[crr++ * num_values + num_cpus] += rx_macsec;
  16297. +}
  16298. +
  16299. +int dpa_macsec_get_sset_count(struct net_device *net_dev, int type)
  16300. +{
  16301. + unsigned int total_stats, num_stats;
  16302. +
  16303. + num_stats = num_online_cpus() + 1;
  16304. + total_stats = num_stats * DPA_MACSEC_STATS_PERCPU_LEN +
  16305. + DPA_MACSEC_STATS_GLOBAL_LEN;
  16306. +
  16307. + switch (type) {
  16308. + case ETH_SS_STATS:
  16309. + return total_stats;
  16310. + default:
  16311. + return -EOPNOTSUPP;
  16312. + }
  16313. +}
  16314. +
  16315. +void dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
  16316. + struct ethtool_stats *stats, u64 *data)
  16317. +{
  16318. + u64 bp_count, bp_total, cg_time, cg_num, cg_status;
  16319. + struct macsec_percpu_priv_s *percpu_priv_macsec;
  16320. + struct dpa_percpu_priv_s *percpu_priv;
  16321. + struct macsec_priv_s *macsec_priv;
  16322. + struct qm_mcr_querycgr query_cgr;
  16323. + struct dpa_rx_errors rx_errors;
  16324. + struct dpa_ern_cnt ern_cnt;
  16325. + struct dpa_priv_s *priv;
  16326. + unsigned int num_cpus, offset;
  16327. + struct dpa_bp *dpa_bp;
  16328. + int total_stats, i;
  16329. +
  16330. + macsec_priv = dpa_macsec_get_priv(net_dev);
  16331. + if (unlikely(!macsec_priv)) {
  16332. + pr_err("selected macsec_priv is NULL\n");
  16333. + return;
  16334. + }
  16335. +
  16336. + total_stats = dpa_macsec_get_sset_count(net_dev, ETH_SS_STATS);
  16337. + priv = netdev_priv(net_dev);
  16338. + dpa_bp = priv->dpa_bp;
  16339. + num_cpus = num_online_cpus();
  16340. + bp_count = 0;
  16341. + bp_total = 0;
  16342. +
  16343. + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
  16344. + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
  16345. + memset(data, 0, total_stats * sizeof(u64));
  16346. +
  16347. + for_each_online_cpu(i) {
  16348. + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
  16349. + percpu_priv_macsec = per_cpu_ptr(macsec_priv->percpu_priv, i);
  16350. +
  16351. + if (dpa_bp->percpu_count)
  16352. + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
  16353. +
  16354. + rx_errors.dme += percpu_priv->rx_errors.dme;
  16355. + rx_errors.fpe += percpu_priv->rx_errors.fpe;
  16356. + rx_errors.fse += percpu_priv->rx_errors.fse;
  16357. + rx_errors.phe += percpu_priv->rx_errors.phe;
  16358. + rx_errors.cse += percpu_priv->rx_errors.cse;
  16359. +
  16360. + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
  16361. + ern_cnt.wred += percpu_priv->ern_cnt.wred;
  16362. + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
  16363. + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
  16364. + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
  16365. + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
  16366. + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
  16367. + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
  16368. +
  16369. + copy_stats(percpu_priv, num_cpus, i, bp_count,
  16370. + percpu_priv_macsec->tx_macsec,
  16371. + percpu_priv_macsec->rx_macsec,
  16372. + data);
  16373. + }
  16374. +
  16375. + offset = (num_cpus + 1) * DPA_MACSEC_STATS_PERCPU_LEN;
  16376. + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
  16377. +
  16378. + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
  16379. + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
  16380. +
  16381. + /* gather congestion related counters */
  16382. + cg_num = 0;
  16383. + cg_status = 0;
  16384. + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
  16385. + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
  16386. + cg_num = priv->cgr_data.cgr_congested_count;
  16387. + cg_status = query_cgr.cgr.cs;
  16388. +
  16389. + /* reset congestion stats (like QMan API does */
  16390. + priv->cgr_data.congested_jiffies = 0;
  16391. + priv->cgr_data.cgr_congested_count = 0;
  16392. + }
  16393. +
  16394. + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
  16395. + data[offset++] = cg_time;
  16396. + data[offset++] = cg_num;
  16397. + data[offset++] = cg_status;
  16398. +}
  16399. +
  16400. +void dpa_macsec_get_strings(struct net_device *net_dev,
  16401. + u32 stringset, u8 *data)
  16402. +{
  16403. + unsigned int i, j, num_cpus, size;
  16404. + char string_cpu[ETH_GSTRING_LEN];
  16405. + u8 *strings;
  16406. +
  16407. + strings = data;
  16408. + num_cpus = num_online_cpus();
  16409. + size = DPA_MACSEC_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
  16410. +
  16411. + for (i = 0; i < DPA_MACSEC_STATS_PERCPU_LEN; i++) {
  16412. + for (j = 0; j < num_cpus; j++) {
  16413. + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
  16414. + dpa_macsec_stats_percpu[i], j);
  16415. + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
  16416. + strings += ETH_GSTRING_LEN;
  16417. + }
  16418. + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
  16419. + dpa_macsec_stats_percpu[i]);
  16420. + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
  16421. + strings += ETH_GSTRING_LEN;
  16422. + }
  16423. + memcpy(strings, dpa_macsec_stats_global, size);
  16424. +}
  16425. +
  16426. --- /dev/null
  16427. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
  16428. @@ -0,0 +1,287 @@
  16429. +/*
  16430. + * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
  16431. + *
  16432. + * Author: Yangbo Lu <yangbo.lu@freescale.com>
  16433. + *
  16434. + * Copyright 2014 Freescale Semiconductor, Inc.
  16435. + *
  16436. + * This program is free software; you can redistribute it and/or modify it
  16437. + * under the terms of the GNU General Public License as published by the
  16438. + * Free Software Foundation; either version 2 of the License, or (at your
  16439. + * option) any later version.
  16440. +*/
  16441. +
  16442. +#include <linux/device.h>
  16443. +#include <linux/hrtimer.h>
  16444. +#include <linux/init.h>
  16445. +#include <linux/interrupt.h>
  16446. +#include <linux/kernel.h>
  16447. +#include <linux/module.h>
  16448. +#include <linux/of.h>
  16449. +#include <linux/of_platform.h>
  16450. +#include <linux/timex.h>
  16451. +#include <linux/io.h>
  16452. +
  16453. +#include <linux/ptp_clock_kernel.h>
  16454. +
  16455. +#include "dpaa_eth.h"
  16456. +#include "mac.h"
  16457. +
  16458. +struct ptp_clock *clock;
  16459. +
  16460. +static struct mac_device *mac_dev;
  16461. +static u32 freqCompensation;
  16462. +
  16463. +/* Bit definitions for the TMR_CTRL register */
  16464. +#define ALM1P (1<<31) /* Alarm1 output polarity */
  16465. +#define ALM2P (1<<30) /* Alarm2 output polarity */
  16466. +#define FS (1<<28) /* FIPER start indication */
  16467. +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
  16468. +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
  16469. +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
  16470. +#define TCLK_PERIOD_MASK (0x3ff)
  16471. +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
  16472. +#define FRD (1<<14) /* FIPER Realignment Disable */
  16473. +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
  16474. +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
  16475. +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
  16476. +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
  16477. +#define COPH (1<<7) /* Generated clock output phase. */
  16478. +#define CIPH (1<<6) /* External oscillator input clock phase */
  16479. +#define TMSR (1<<5) /* Timer soft reset. */
  16480. +#define BYP (1<<3) /* Bypass drift compensated clock */
  16481. +#define TE (1<<2) /* 1588 timer enable. */
  16482. +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
  16483. +#define CKSEL_MASK (0x3)
  16484. +
  16485. +/* Bit definitions for the TMR_TEVENT register */
  16486. +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
  16487. +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
  16488. +#define ALM2 (1<<17) /* Current time = alarm time register 2 */
  16489. +#define ALM1 (1<<16) /* Current time = alarm time register 1 */
  16490. +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
  16491. +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
  16492. +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
  16493. +
  16494. +/* Bit definitions for the TMR_TEMASK register */
  16495. +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
  16496. +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
  16497. +#define ALM2EN (1<<17) /* Timer ALM2 event enable */
  16498. +#define ALM1EN (1<<16) /* Timer ALM1 event enable */
  16499. +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
  16500. +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
  16501. +
  16502. +/* Bit definitions for the TMR_PEVENT register */
  16503. +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
  16504. +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
  16505. +#define RXP (1<<0) /* PTP frame has been received */
  16506. +
  16507. +/* Bit definitions for the TMR_PEMASK register */
  16508. +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
  16509. +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
  16510. +#define RXPEN (1<<0) /* Receive PTP packet event enable */
  16511. +
  16512. +/* Bit definitions for the TMR_STAT register */
  16513. +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
  16514. +#define STAT_VEC_MASK (0x3f)
  16515. +
  16516. +/* Bit definitions for the TMR_PRSC register */
  16517. +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
  16518. +#define PRSC_OCK_MASK (0xffff)
  16519. +
  16520. +
  16521. +#define N_EXT_TS 2
  16522. +
  16523. +static void set_alarm(void)
  16524. +{
  16525. + u64 ns;
  16526. +
  16527. + if (mac_dev->fm_rtc_get_cnt)
  16528. + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
  16529. + ns += 1500000000ULL;
  16530. + ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
  16531. + ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
  16532. + if (mac_dev->fm_rtc_set_alarm)
  16533. + mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
  16534. +}
  16535. +
  16536. +static void set_fipers(void)
  16537. +{
  16538. + u64 fiper;
  16539. +
  16540. + if (mac_dev->fm_rtc_disable)
  16541. + mac_dev->fm_rtc_disable(mac_dev->fm_dev);
  16542. +
  16543. + set_alarm();
  16544. + fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
  16545. + if (mac_dev->fm_rtc_set_fiper)
  16546. + mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
  16547. +
  16548. + if (mac_dev->fm_rtc_enable)
  16549. + mac_dev->fm_rtc_enable(mac_dev->fm_dev);
  16550. +}
  16551. +
  16552. +/* PTP clock operations */
  16553. +
  16554. +static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  16555. +{
  16556. + u64 adj;
  16557. + u32 diff, tmr_add;
  16558. + int neg_adj = 0;
  16559. +
  16560. + if (ppb < 0) {
  16561. + neg_adj = 1;
  16562. + ppb = -ppb;
  16563. + }
  16564. +
  16565. + tmr_add = freqCompensation;
  16566. + adj = tmr_add;
  16567. + adj *= ppb;
  16568. + diff = div_u64(adj, 1000000000ULL);
  16569. +
  16570. + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
  16571. +
  16572. + if (mac_dev->fm_rtc_set_drift)
  16573. + mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
  16574. +
  16575. + return 0;
  16576. +}
  16577. +
  16578. +static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
  16579. +{
  16580. + s64 now;
  16581. +
  16582. + if (mac_dev->fm_rtc_get_cnt)
  16583. + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
  16584. +
  16585. + now += delta;
  16586. +
  16587. + if (mac_dev->fm_rtc_set_cnt)
  16588. + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
  16589. + set_fipers();
  16590. +
  16591. + return 0;
  16592. +}
  16593. +
  16594. +static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  16595. +{
  16596. + u64 ns;
  16597. + u32 remainder;
  16598. +
  16599. + if (mac_dev->fm_rtc_get_cnt)
  16600. + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
  16601. +
  16602. + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
  16603. + ts->tv_nsec = remainder;
  16604. + return 0;
  16605. +}
  16606. +
  16607. +static int ptp_dpa_settime(struct ptp_clock_info *ptp,
  16608. + const struct timespec *ts)
  16609. +{
  16610. + u64 ns;
  16611. +
  16612. + ns = ts->tv_sec * 1000000000ULL;
  16613. + ns += ts->tv_nsec;
  16614. +
  16615. + if (mac_dev->fm_rtc_set_cnt)
  16616. + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
  16617. + set_fipers();
  16618. + return 0;
  16619. +}
  16620. +
  16621. +static int ptp_dpa_enable(struct ptp_clock_info *ptp,
  16622. + struct ptp_clock_request *rq, int on)
  16623. +{
  16624. + u32 bit;
  16625. +
  16626. + switch (rq->type) {
  16627. + case PTP_CLK_REQ_EXTTS:
  16628. + switch (rq->extts.index) {
  16629. + case 0:
  16630. + bit = ETS1EN;
  16631. + break;
  16632. + case 1:
  16633. + bit = ETS2EN;
  16634. + break;
  16635. + default:
  16636. + return -EINVAL;
  16637. + }
  16638. + if (on) {
  16639. + if (mac_dev->fm_rtc_enable_interrupt)
  16640. + mac_dev->fm_rtc_enable_interrupt(
  16641. + mac_dev->fm_dev, bit);
  16642. + } else {
  16643. + if (mac_dev->fm_rtc_disable_interrupt)
  16644. + mac_dev->fm_rtc_disable_interrupt(
  16645. + mac_dev->fm_dev, bit);
  16646. + }
  16647. + return 0;
  16648. +
  16649. + case PTP_CLK_REQ_PPS:
  16650. + if (on) {
  16651. + if (mac_dev->fm_rtc_enable_interrupt)
  16652. + mac_dev->fm_rtc_enable_interrupt(
  16653. + mac_dev->fm_dev, PP1EN);
  16654. + } else {
  16655. + if (mac_dev->fm_rtc_disable_interrupt)
  16656. + mac_dev->fm_rtc_disable_interrupt(
  16657. + mac_dev->fm_dev, PP1EN);
  16658. + }
  16659. + return 0;
  16660. +
  16661. + default:
  16662. + break;
  16663. + }
  16664. +
  16665. + return -EOPNOTSUPP;
  16666. +}
  16667. +
  16668. +static struct ptp_clock_info ptp_dpa_caps = {
  16669. + .owner = THIS_MODULE,
  16670. + .name = "dpaa clock",
  16671. + .max_adj = 512000,
  16672. + .n_alarm = 0,
  16673. + .n_ext_ts = N_EXT_TS,
  16674. + .n_per_out = 0,
  16675. + .pps = 1,
  16676. + .adjfreq = ptp_dpa_adjfreq,
  16677. + .adjtime = ptp_dpa_adjtime,
  16678. + .gettime = ptp_dpa_gettime,
  16679. + .settime = ptp_dpa_settime,
  16680. + .enable = ptp_dpa_enable,
  16681. +};
  16682. +
  16683. +static int __init __cold dpa_ptp_load(void)
  16684. +{
  16685. + struct device *ptp_dev;
  16686. + struct timespec now;
  16687. + int dpa_phc_index;
  16688. + int err;
  16689. +
  16690. + ptp_dev = &ptp_priv.of_dev->dev;
  16691. + mac_dev = ptp_priv.mac_dev;
  16692. +
  16693. + if (mac_dev->fm_rtc_get_drift)
  16694. + mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
  16695. +
  16696. + getnstimeofday(&now);
  16697. + ptp_dpa_settime(&ptp_dpa_caps, &now);
  16698. +
  16699. + clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
  16700. + if (IS_ERR(clock)) {
  16701. + err = PTR_ERR(clock);
  16702. + return err;
  16703. + }
  16704. + dpa_phc_index = ptp_clock_index(clock);
  16705. + return 0;
  16706. +}
  16707. +module_init(dpa_ptp_load);
  16708. +
  16709. +static void __exit __cold dpa_ptp_unload(void)
  16710. +{
  16711. + if (mac_dev->fm_rtc_disable_interrupt)
  16712. + mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
  16713. + ptp_clock_unregister(clock);
  16714. +}
  16715. +module_exit(dpa_ptp_unload);
  16716. --- /dev/null
  16717. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
  16718. @@ -0,0 +1,915 @@
  16719. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  16720. + *
  16721. + * Redistribution and use in source and binary forms, with or without
  16722. + * modification, are permitted provided that the following conditions are met:
  16723. + * * Redistributions of source code must retain the above copyright
  16724. + * notice, this list of conditions and the following disclaimer.
  16725. + * * Redistributions in binary form must reproduce the above copyright
  16726. + * notice, this list of conditions and the following disclaimer in the
  16727. + * documentation and/or other materials provided with the distribution.
  16728. + * * Neither the name of Freescale Semiconductor nor the
  16729. + * names of its contributors may be used to endorse or promote products
  16730. + * derived from this software without specific prior written permission.
  16731. + *
  16732. + *
  16733. + * ALTERNATIVELY, this software may be distributed under the terms of the
  16734. + * GNU General Public License ("GPL") as published by the Free Software
  16735. + * Foundation, either version 2 of that License or (at your option) any
  16736. + * later version.
  16737. + *
  16738. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  16739. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  16740. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  16741. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  16742. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  16743. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  16744. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  16745. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  16746. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  16747. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  16748. + */
  16749. +
  16750. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  16751. +#define pr_fmt(fmt) \
  16752. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  16753. + KBUILD_BASENAME".c", __LINE__, __func__
  16754. +#else
  16755. +#define pr_fmt(fmt) \
  16756. + KBUILD_MODNAME ": " fmt
  16757. +#endif
  16758. +
  16759. +#include <linux/init.h>
  16760. +#include <linux/module.h>
  16761. +#include <linux/io.h>
  16762. +#include <linux/of_platform.h>
  16763. +#include <linux/of_mdio.h>
  16764. +#include <linux/phy.h>
  16765. +#include <linux/netdevice.h>
  16766. +
  16767. +#include "dpaa_eth.h"
  16768. +#include "mac.h"
  16769. +#include "lnxwrp_fsl_fman.h"
  16770. +
  16771. +#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
  16772. +
  16773. +#include "fsl_fman_dtsec.h"
  16774. +#include "fsl_fman_tgec.h"
  16775. +#include "fsl_fman_memac.h"
  16776. +#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
  16777. +
  16778. +#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
  16779. +
  16780. +MODULE_LICENSE("Dual BSD/GPL");
  16781. +
  16782. +MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
  16783. +
  16784. +MODULE_DESCRIPTION(MAC_DESCRIPTION);
  16785. +
  16786. +struct mac_priv_s {
  16787. + struct fm_mac_dev *fm_mac;
  16788. +};
  16789. +
  16790. +const char *mac_driver_description __initconst = MAC_DESCRIPTION;
  16791. +const size_t mac_sizeof_priv[] = {
  16792. + [DTSEC] = sizeof(struct mac_priv_s),
  16793. + [XGMAC] = sizeof(struct mac_priv_s),
  16794. + [MEMAC] = sizeof(struct mac_priv_s)
  16795. +};
  16796. +
  16797. +static const enet_mode_t _100[] = {
  16798. + [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
  16799. + [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
  16800. +};
  16801. +
  16802. +static const enet_mode_t _1000[] = {
  16803. + [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
  16804. + [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
  16805. + [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
  16806. + [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
  16807. + [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
  16808. + [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
  16809. + [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
  16810. + [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
  16811. + [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
  16812. +};
  16813. +
  16814. +static enet_mode_t __cold __attribute__((nonnull))
  16815. +macdev2enetinterface(const struct mac_device *mac_dev)
  16816. +{
  16817. + switch (mac_dev->max_speed) {
  16818. + case SPEED_100:
  16819. + return _100[mac_dev->phy_if];
  16820. + case SPEED_1000:
  16821. + return _1000[mac_dev->phy_if];
  16822. + case SPEED_2500:
  16823. + return e_ENET_MODE_SGMII_2500;
  16824. + case SPEED_10000:
  16825. + return e_ENET_MODE_XGMII_10000;
  16826. + default:
  16827. + return e_ENET_MODE_MII_100;
  16828. + }
  16829. +}
  16830. +
  16831. +static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
  16832. +{
  16833. + struct mac_device *mac_dev;
  16834. +
  16835. + mac_dev = (struct mac_device *)_mac_dev;
  16836. +
  16837. + if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
  16838. + /* don't flag RX FIFO after the first */
  16839. + fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
  16840. + e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
  16841. + dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
  16842. + exception);
  16843. + }
  16844. +
  16845. + dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
  16846. + exception);
  16847. +}
  16848. +
  16849. +static int __cold init(struct mac_device *mac_dev)
  16850. +{
  16851. + int _errno;
  16852. + struct mac_priv_s *priv;
  16853. + t_FmMacParams param;
  16854. + uint32_t version;
  16855. +
  16856. + priv = macdev_priv(mac_dev);
  16857. +
  16858. + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
  16859. + mac_dev->dev, mac_dev->res->start, 0x2000);
  16860. + param.enetMode = macdev2enetinterface(mac_dev);
  16861. + memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
  16862. + sizeof(mac_dev->addr)));
  16863. + param.macId = mac_dev->cell_index;
  16864. + param.h_Fm = (handle_t)mac_dev->fm;
  16865. + param.mdioIrq = NO_IRQ;
  16866. + param.f_Exception = mac_exception;
  16867. + param.f_Event = mac_exception;
  16868. + param.h_App = mac_dev;
  16869. +
  16870. + priv->fm_mac = fm_mac_config(&param);
  16871. + if (unlikely(priv->fm_mac == NULL)) {
  16872. + _errno = -EINVAL;
  16873. + goto _return;
  16874. + }
  16875. +
  16876. + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
  16877. + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
  16878. + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
  16879. +
  16880. + _errno = fm_mac_config_max_frame_length(priv->fm_mac,
  16881. + fm_get_max_frm());
  16882. + if (unlikely(_errno < 0))
  16883. + goto _return_fm_mac_free;
  16884. +
  16885. + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
  16886. + /* 10G always works with pad and CRC */
  16887. + _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
  16888. + if (unlikely(_errno < 0))
  16889. + goto _return_fm_mac_free;
  16890. +
  16891. + _errno = fm_mac_config_half_duplex(priv->fm_mac,
  16892. + mac_dev->half_duplex);
  16893. + if (unlikely(_errno < 0))
  16894. + goto _return_fm_mac_free;
  16895. + } else {
  16896. + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
  16897. + if (unlikely(_errno < 0))
  16898. + goto _return_fm_mac_free;
  16899. + }
  16900. +
  16901. + _errno = fm_mac_init(priv->fm_mac);
  16902. + if (unlikely(_errno < 0))
  16903. + goto _return_fm_mac_free;
  16904. +
  16905. +#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
  16906. + /* For 1G MAC, disable by default the MIB counters overflow interrupt */
  16907. + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
  16908. + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
  16909. + e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
  16910. + if (unlikely(_errno < 0))
  16911. + goto _return_fm_mac_free;
  16912. + }
  16913. +#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
  16914. +
  16915. + /* For 10G MAC, disable Tx ECC exception */
  16916. + if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
  16917. + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
  16918. + e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
  16919. + if (unlikely(_errno < 0))
  16920. + goto _return_fm_mac_free;
  16921. + }
  16922. +
  16923. + _errno = fm_mac_get_version(priv->fm_mac, &version);
  16924. + if (unlikely(_errno < 0))
  16925. + goto _return_fm_mac_free;
  16926. +
  16927. + dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
  16928. + ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
  16929. + "dTSEC" : "XGEC"), version);
  16930. +
  16931. + goto _return;
  16932. +
  16933. +
  16934. +_return_fm_mac_free:
  16935. + fm_mac_free(mac_dev->get_mac_handle(mac_dev));
  16936. +
  16937. +_return:
  16938. + return _errno;
  16939. +}
  16940. +
  16941. +static int __cold memac_init(struct mac_device *mac_dev)
  16942. +{
  16943. + int _errno;
  16944. + struct mac_priv_s *priv;
  16945. + t_FmMacParams param;
  16946. +
  16947. + priv = macdev_priv(mac_dev);
  16948. +
  16949. + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
  16950. + mac_dev->dev, mac_dev->res->start, 0x2000);
  16951. + param.enetMode = macdev2enetinterface(mac_dev);
  16952. + memcpy(&param.addr, mac_dev->addr, sizeof(mac_dev->addr));
  16953. + param.macId = mac_dev->cell_index;
  16954. + param.h_Fm = (handle_t)mac_dev->fm;
  16955. + param.mdioIrq = NO_IRQ;
  16956. + param.f_Exception = mac_exception;
  16957. + param.f_Event = mac_exception;
  16958. + param.h_App = mac_dev;
  16959. +
  16960. + priv->fm_mac = fm_mac_config(&param);
  16961. + if (unlikely(priv->fm_mac == NULL)) {
  16962. + _errno = -EINVAL;
  16963. + goto _return;
  16964. + }
  16965. +
  16966. + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
  16967. + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
  16968. + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
  16969. +
  16970. + _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
  16971. + if (unlikely(_errno < 0))
  16972. + goto _return_fm_mac_free;
  16973. +
  16974. + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
  16975. + if (unlikely(_errno < 0))
  16976. + goto _return_fm_mac_free;
  16977. +
  16978. + _errno = fm_mac_init(priv->fm_mac);
  16979. + if (unlikely(_errno < 0))
  16980. + goto _return_fm_mac_free;
  16981. +
  16982. + dev_info(mac_dev->dev, "FMan MEMAC\n");
  16983. +
  16984. + goto _return;
  16985. +
  16986. +_return_fm_mac_free:
  16987. + fm_mac_free(priv->fm_mac);
  16988. +
  16989. +_return:
  16990. + return _errno;
  16991. +}
  16992. +
  16993. +static int __cold start(struct mac_device *mac_dev)
  16994. +{
  16995. + int _errno;
  16996. + struct phy_device *phy_dev = mac_dev->phy_dev;
  16997. +
  16998. + _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
  16999. +
  17000. + if (!_errno && phy_dev) {
  17001. + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)
  17002. + phy_start(phy_dev);
  17003. + else if (phy_dev->drv->read_status)
  17004. + phy_dev->drv->read_status(phy_dev);
  17005. + }
  17006. +
  17007. + return _errno;
  17008. +}
  17009. +
  17010. +static int __cold stop(struct mac_device *mac_dev)
  17011. +{
  17012. + if (mac_dev->phy_dev &&
  17013. + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000))
  17014. + phy_stop(mac_dev->phy_dev);
  17015. +
  17016. + return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
  17017. +}
  17018. +
  17019. +static int __cold set_multi(struct net_device *net_dev,
  17020. + struct mac_device *mac_dev)
  17021. +{
  17022. + struct mac_priv_s *mac_priv;
  17023. + struct mac_address *old_addr, *tmp;
  17024. + struct netdev_hw_addr *ha;
  17025. + int _errno;
  17026. +
  17027. + mac_priv = macdev_priv(mac_dev);
  17028. +
  17029. + /* Clear previous address list */
  17030. + list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
  17031. + _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
  17032. + (t_EnetAddr *)old_addr->addr);
  17033. + if (_errno < 0)
  17034. + return _errno;
  17035. +
  17036. + list_del(&old_addr->list);
  17037. + kfree(old_addr);
  17038. + }
  17039. +
  17040. + /* Add all the addresses from the new list */
  17041. + netdev_for_each_mc_addr(ha, net_dev) {
  17042. + _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
  17043. + (t_EnetAddr *)ha->addr);
  17044. + if (_errno < 0)
  17045. + return _errno;
  17046. +
  17047. + tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
  17048. + if (!tmp) {
  17049. + dev_err(mac_dev->dev, "Out of memory\n");
  17050. + return -ENOMEM;
  17051. + }
  17052. + memcpy(tmp->addr, ha->addr, ETH_ALEN);
  17053. + list_add(&tmp->list, &mac_dev->mc_addr_list);
  17054. + }
  17055. + return 0;
  17056. +}
  17057. +
  17058. +/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
  17059. + * active PAUSE settings. Otherwise, the new active settings should be reflected
  17060. + * in FMan.
  17061. + */
  17062. +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
  17063. +{
  17064. + struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
  17065. + int _errno = 0;
  17066. +
  17067. + if (unlikely(rx != mac_dev->rx_pause_active)) {
  17068. + _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
  17069. + if (likely(_errno == 0))
  17070. + mac_dev->rx_pause_active = rx;
  17071. + }
  17072. +
  17073. + if (unlikely(tx != mac_dev->tx_pause_active)) {
  17074. + _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
  17075. + if (likely(_errno == 0))
  17076. + mac_dev->tx_pause_active = tx;
  17077. + }
  17078. +
  17079. + return _errno;
  17080. +}
  17081. +EXPORT_SYMBOL(set_mac_active_pause);
  17082. +
  17083. +/* Determine the MAC RX/TX PAUSE frames settings based on PHY
  17084. + * autonegotiation or values set by eththool.
  17085. + */
  17086. +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
  17087. +{
  17088. + struct phy_device *phy_dev = mac_dev->phy_dev;
  17089. + u16 lcl_adv, rmt_adv;
  17090. + u8 flowctrl;
  17091. +
  17092. + *rx_pause = *tx_pause = false;
  17093. +
  17094. + if (!phy_dev->duplex)
  17095. + return;
  17096. +
  17097. + /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
  17098. + * are those set by ethtool.
  17099. + */
  17100. + if (!mac_dev->autoneg_pause) {
  17101. + *rx_pause = mac_dev->rx_pause_req;
  17102. + *tx_pause = mac_dev->tx_pause_req;
  17103. + return;
  17104. + }
  17105. +
  17106. + /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
  17107. + * settings depend on the result of the link negotiation.
  17108. + */
  17109. +
  17110. + /* get local capabilities */
  17111. + lcl_adv = 0;
  17112. + if (phy_dev->advertising & ADVERTISED_Pause)
  17113. + lcl_adv |= ADVERTISE_PAUSE_CAP;
  17114. + if (phy_dev->advertising & ADVERTISED_Asym_Pause)
  17115. + lcl_adv |= ADVERTISE_PAUSE_ASYM;
  17116. +
  17117. + /* get link partner capabilities */
  17118. + rmt_adv = 0;
  17119. + if (phy_dev->pause)
  17120. + rmt_adv |= LPA_PAUSE_CAP;
  17121. + if (phy_dev->asym_pause)
  17122. + rmt_adv |= LPA_PAUSE_ASYM;
  17123. +
  17124. + /* Calculate TX/RX settings based on local and peer advertised
  17125. + * symmetric/asymmetric PAUSE capabilities.
  17126. + */
  17127. + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
  17128. + if (flowctrl & FLOW_CTRL_RX)
  17129. + *rx_pause = true;
  17130. + if (flowctrl & FLOW_CTRL_TX)
  17131. + *tx_pause = true;
  17132. +}
  17133. +EXPORT_SYMBOL(get_pause_cfg);
  17134. +
  17135. +static void adjust_link(struct net_device *net_dev)
  17136. +{
  17137. + struct dpa_priv_s *priv = netdev_priv(net_dev);
  17138. + struct mac_device *mac_dev = priv->mac_dev;
  17139. + struct phy_device *phy_dev = mac_dev->phy_dev;
  17140. + struct fm_mac_dev *fm_mac_dev;
  17141. + bool rx_pause, tx_pause;
  17142. + int _errno;
  17143. +
  17144. + fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
  17145. + fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
  17146. + phy_dev->duplex);
  17147. +
  17148. + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
  17149. + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
  17150. + if (unlikely(_errno < 0))
  17151. + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
  17152. +}
  17153. +
  17154. +/* Initializes driver's PHY state, and attaches to the PHY.
  17155. + * Returns 0 on success.
  17156. + */
  17157. +static int dtsec_init_phy(struct net_device *net_dev,
  17158. + struct mac_device *mac_dev)
  17159. +{
  17160. + struct phy_device *phy_dev;
  17161. +
  17162. + if (!mac_dev->phy_node)
  17163. + phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
  17164. + &adjust_link, mac_dev->phy_if);
  17165. + else
  17166. + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
  17167. + &adjust_link, 0, mac_dev->phy_if);
  17168. + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
  17169. + netdev_err(net_dev, "Could not connect to PHY %s\n",
  17170. + mac_dev->phy_node ?
  17171. + mac_dev->phy_node->full_name :
  17172. + mac_dev->fixed_bus_id);
  17173. + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
  17174. + }
  17175. +
  17176. + /* Remove any features not supported by the controller */
  17177. + phy_dev->supported &= mac_dev->if_support;
  17178. + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
  17179. + * as most of the PHY drivers do not enable them by default.
  17180. + */
  17181. + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
  17182. + phy_dev->advertising = phy_dev->supported;
  17183. +
  17184. + mac_dev->phy_dev = phy_dev;
  17185. +
  17186. + return 0;
  17187. +}
  17188. +
  17189. +static int xgmac_init_phy(struct net_device *net_dev,
  17190. + struct mac_device *mac_dev)
  17191. +{
  17192. + struct phy_device *phy_dev;
  17193. +
  17194. + if (!mac_dev->phy_node)
  17195. + phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id,
  17196. + mac_dev->phy_if);
  17197. + else
  17198. + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
  17199. + mac_dev->phy_if);
  17200. + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
  17201. + netdev_err(net_dev, "Could not attach to PHY %s\n",
  17202. + mac_dev->phy_node ?
  17203. + mac_dev->phy_node->full_name :
  17204. + mac_dev->fixed_bus_id);
  17205. + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
  17206. + }
  17207. +
  17208. + phy_dev->supported &= mac_dev->if_support;
  17209. + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
  17210. + * as most of the PHY drivers do not enable them by default.
  17211. + */
  17212. + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
  17213. + phy_dev->advertising = phy_dev->supported;
  17214. +
  17215. + mac_dev->phy_dev = phy_dev;
  17216. +
  17217. + return 0;
  17218. +}
  17219. +
  17220. +static int memac_init_phy(struct net_device *net_dev,
  17221. + struct mac_device *mac_dev)
  17222. +{
  17223. + struct phy_device *phy_dev;
  17224. +
  17225. + if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
  17226. + (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)){
  17227. + if (!mac_dev->phy_node) {
  17228. + mac_dev->phy_dev = NULL;
  17229. + return 0;
  17230. + } else
  17231. + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
  17232. + mac_dev->phy_if);
  17233. + } else {
  17234. + if (!mac_dev->phy_node)
  17235. + phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
  17236. + &adjust_link, mac_dev->phy_if);
  17237. + else
  17238. + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
  17239. + &adjust_link, 0,
  17240. + mac_dev->phy_if);
  17241. + }
  17242. +
  17243. + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
  17244. + netdev_err(net_dev, "Could not connect to PHY %s\n",
  17245. + mac_dev->phy_node ?
  17246. + mac_dev->phy_node->full_name :
  17247. + mac_dev->fixed_bus_id);
  17248. + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
  17249. + }
  17250. +
  17251. + /* Remove any features not supported by the controller */
  17252. + phy_dev->supported &= mac_dev->if_support;
  17253. + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
  17254. + * as most of the PHY drivers do not enable them by default.
  17255. + */
  17256. + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
  17257. + phy_dev->advertising = phy_dev->supported;
  17258. +
  17259. + mac_dev->phy_dev = phy_dev;
  17260. +
  17261. + return 0;
  17262. +}
  17263. +
  17264. +static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
  17265. +{
  17266. + int _errno, __errno;
  17267. +
  17268. + _errno = fm_mac_disable(fm_mac_dev);
  17269. + __errno = fm_mac_free(fm_mac_dev);
  17270. +
  17271. + if (unlikely(__errno < 0))
  17272. + _errno = __errno;
  17273. +
  17274. + return _errno;
  17275. +}
  17276. +
  17277. +static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
  17278. +{
  17279. + const struct mac_priv_s *priv;
  17280. + priv = macdev_priv(mac_dev);
  17281. + return priv->fm_mac;
  17282. +}
  17283. +
  17284. +static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
  17285. +{
  17286. + struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
  17287. + int i = 0, n = nn;
  17288. +
  17289. + FM_DMP_SUBTITLE(buf, n, "\n");
  17290. +
  17291. + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
  17292. +
  17293. + FM_DMP_V32(buf, n, p_mm, tsec_id);
  17294. + FM_DMP_V32(buf, n, p_mm, tsec_id2);
  17295. + FM_DMP_V32(buf, n, p_mm, ievent);
  17296. + FM_DMP_V32(buf, n, p_mm, imask);
  17297. + FM_DMP_V32(buf, n, p_mm, ecntrl);
  17298. + FM_DMP_V32(buf, n, p_mm, ptv);
  17299. + FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
  17300. + FM_DMP_V32(buf, n, p_mm, tmr_pevent);
  17301. + FM_DMP_V32(buf, n, p_mm, tmr_pemask);
  17302. + FM_DMP_V32(buf, n, p_mm, tctrl);
  17303. + FM_DMP_V32(buf, n, p_mm, rctrl);
  17304. + FM_DMP_V32(buf, n, p_mm, maccfg1);
  17305. + FM_DMP_V32(buf, n, p_mm, maccfg2);
  17306. + FM_DMP_V32(buf, n, p_mm, ipgifg);
  17307. + FM_DMP_V32(buf, n, p_mm, hafdup);
  17308. + FM_DMP_V32(buf, n, p_mm, maxfrm);
  17309. +
  17310. + FM_DMP_V32(buf, n, p_mm, macstnaddr1);
  17311. + FM_DMP_V32(buf, n, p_mm, macstnaddr2);
  17312. +
  17313. + for (i = 0; i < 7; ++i) {
  17314. + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
  17315. + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
  17316. + }
  17317. +
  17318. + FM_DMP_V32(buf, n, p_mm, car1);
  17319. + FM_DMP_V32(buf, n, p_mm, car2);
  17320. +
  17321. + return n;
  17322. +}
  17323. +
  17324. +static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
  17325. +{
  17326. + struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
  17327. + int n = nn;
  17328. +
  17329. + FM_DMP_SUBTITLE(buf, n, "\n");
  17330. + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
  17331. +
  17332. + FM_DMP_V32(buf, n, p_mm, tgec_id);
  17333. + FM_DMP_V32(buf, n, p_mm, command_config);
  17334. + FM_DMP_V32(buf, n, p_mm, mac_addr_0);
  17335. + FM_DMP_V32(buf, n, p_mm, mac_addr_1);
  17336. + FM_DMP_V32(buf, n, p_mm, maxfrm);
  17337. + FM_DMP_V32(buf, n, p_mm, pause_quant);
  17338. + FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
  17339. + FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
  17340. + FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
  17341. + FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
  17342. + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
  17343. + FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
  17344. + FM_DMP_V32(buf, n, p_mm, mdio_command);
  17345. + FM_DMP_V32(buf, n, p_mm, mdio_data);
  17346. + FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
  17347. + FM_DMP_V32(buf, n, p_mm, status);
  17348. + FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
  17349. + FM_DMP_V32(buf, n, p_mm, mac_addr_2);
  17350. + FM_DMP_V32(buf, n, p_mm, mac_addr_3);
  17351. + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
  17352. + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
  17353. + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
  17354. + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
  17355. + FM_DMP_V32(buf, n, p_mm, imask);
  17356. + FM_DMP_V32(buf, n, p_mm, ievent);
  17357. +
  17358. + return n;
  17359. +}
  17360. +
  17361. +static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
  17362. +{
  17363. + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
  17364. + int i = 0, n = nn;
  17365. +
  17366. + FM_DMP_SUBTITLE(buf, n, "\n");
  17367. + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
  17368. +
  17369. + FM_DMP_V32(buf, n, p_mm, command_config);
  17370. + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
  17371. + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
  17372. + FM_DMP_V32(buf, n, p_mm, maxfrm);
  17373. + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
  17374. + FM_DMP_V32(buf, n, p_mm, ievent);
  17375. + FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
  17376. + FM_DMP_V32(buf, n, p_mm, imask);
  17377. +
  17378. + for (i = 0; i < 4; ++i)
  17379. + FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
  17380. +
  17381. + for (i = 0; i < 4; ++i)
  17382. + FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
  17383. +
  17384. + FM_DMP_V32(buf, n, p_mm, rx_pause_status);
  17385. +
  17386. + for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
  17387. + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
  17388. + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
  17389. + }
  17390. +
  17391. + FM_DMP_V32(buf, n, p_mm, lpwake_timer);
  17392. + FM_DMP_V32(buf, n, p_mm, sleep_timer);
  17393. + FM_DMP_V32(buf, n, p_mm, statn_config);
  17394. + FM_DMP_V32(buf, n, p_mm, if_mode);
  17395. + FM_DMP_V32(buf, n, p_mm, if_status);
  17396. + FM_DMP_V32(buf, n, p_mm, hg_config);
  17397. + FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
  17398. + FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
  17399. + FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
  17400. + FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
  17401. + FM_DMP_V32(buf, n, p_mm, rhm);
  17402. + FM_DMP_V32(buf, n, p_mm, thm);
  17403. +
  17404. + return n;
  17405. +}
  17406. +
  17407. +static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
  17408. +{
  17409. + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
  17410. + int n = nn;
  17411. +
  17412. + FM_DMP_SUBTITLE(buf, n, "\n");
  17413. + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
  17414. +
  17415. + /* Rx Statistics Counter */
  17416. + FM_DMP_V32(buf, n, p_mm, reoct_l);
  17417. + FM_DMP_V32(buf, n, p_mm, reoct_u);
  17418. + FM_DMP_V32(buf, n, p_mm, roct_l);
  17419. + FM_DMP_V32(buf, n, p_mm, roct_u);
  17420. + FM_DMP_V32(buf, n, p_mm, raln_l);
  17421. + FM_DMP_V32(buf, n, p_mm, raln_u);
  17422. + FM_DMP_V32(buf, n, p_mm, rxpf_l);
  17423. + FM_DMP_V32(buf, n, p_mm, rxpf_u);
  17424. + FM_DMP_V32(buf, n, p_mm, rfrm_l);
  17425. + FM_DMP_V32(buf, n, p_mm, rfrm_u);
  17426. + FM_DMP_V32(buf, n, p_mm, rfcs_l);
  17427. + FM_DMP_V32(buf, n, p_mm, rfcs_u);
  17428. + FM_DMP_V32(buf, n, p_mm, rvlan_l);
  17429. + FM_DMP_V32(buf, n, p_mm, rvlan_u);
  17430. + FM_DMP_V32(buf, n, p_mm, rerr_l);
  17431. + FM_DMP_V32(buf, n, p_mm, rerr_u);
  17432. + FM_DMP_V32(buf, n, p_mm, ruca_l);
  17433. + FM_DMP_V32(buf, n, p_mm, ruca_u);
  17434. + FM_DMP_V32(buf, n, p_mm, rmca_l);
  17435. + FM_DMP_V32(buf, n, p_mm, rmca_u);
  17436. + FM_DMP_V32(buf, n, p_mm, rbca_l);
  17437. + FM_DMP_V32(buf, n, p_mm, rbca_u);
  17438. + FM_DMP_V32(buf, n, p_mm, rdrp_l);
  17439. + FM_DMP_V32(buf, n, p_mm, rdrp_u);
  17440. + FM_DMP_V32(buf, n, p_mm, rpkt_l);
  17441. + FM_DMP_V32(buf, n, p_mm, rpkt_u);
  17442. + FM_DMP_V32(buf, n, p_mm, rund_l);
  17443. + FM_DMP_V32(buf, n, p_mm, rund_u);
  17444. + FM_DMP_V32(buf, n, p_mm, r64_l);
  17445. + FM_DMP_V32(buf, n, p_mm, r64_u);
  17446. + FM_DMP_V32(buf, n, p_mm, r127_l);
  17447. + FM_DMP_V32(buf, n, p_mm, r127_u);
  17448. + FM_DMP_V32(buf, n, p_mm, r255_l);
  17449. + FM_DMP_V32(buf, n, p_mm, r255_u);
  17450. + FM_DMP_V32(buf, n, p_mm, r511_l);
  17451. + FM_DMP_V32(buf, n, p_mm, r511_u);
  17452. + FM_DMP_V32(buf, n, p_mm, r1023_l);
  17453. + FM_DMP_V32(buf, n, p_mm, r1023_u);
  17454. + FM_DMP_V32(buf, n, p_mm, r1518_l);
  17455. + FM_DMP_V32(buf, n, p_mm, r1518_u);
  17456. + FM_DMP_V32(buf, n, p_mm, r1519x_l);
  17457. + FM_DMP_V32(buf, n, p_mm, r1519x_u);
  17458. + FM_DMP_V32(buf, n, p_mm, rovr_l);
  17459. + FM_DMP_V32(buf, n, p_mm, rovr_u);
  17460. + FM_DMP_V32(buf, n, p_mm, rjbr_l);
  17461. + FM_DMP_V32(buf, n, p_mm, rjbr_u);
  17462. + FM_DMP_V32(buf, n, p_mm, rfrg_l);
  17463. + FM_DMP_V32(buf, n, p_mm, rfrg_u);
  17464. + FM_DMP_V32(buf, n, p_mm, rcnp_l);
  17465. + FM_DMP_V32(buf, n, p_mm, rcnp_u);
  17466. + FM_DMP_V32(buf, n, p_mm, rdrntp_l);
  17467. + FM_DMP_V32(buf, n, p_mm, rdrntp_u);
  17468. +
  17469. + return n;
  17470. +}
  17471. +
  17472. +static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
  17473. +{
  17474. + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
  17475. + int n = nn;
  17476. +
  17477. + FM_DMP_SUBTITLE(buf, n, "\n");
  17478. + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
  17479. +
  17480. +
  17481. + /* Tx Statistics Counter */
  17482. + FM_DMP_V32(buf, n, p_mm, teoct_l);
  17483. + FM_DMP_V32(buf, n, p_mm, teoct_u);
  17484. + FM_DMP_V32(buf, n, p_mm, toct_l);
  17485. + FM_DMP_V32(buf, n, p_mm, toct_u);
  17486. + FM_DMP_V32(buf, n, p_mm, txpf_l);
  17487. + FM_DMP_V32(buf, n, p_mm, txpf_u);
  17488. + FM_DMP_V32(buf, n, p_mm, tfrm_l);
  17489. + FM_DMP_V32(buf, n, p_mm, tfrm_u);
  17490. + FM_DMP_V32(buf, n, p_mm, tfcs_l);
  17491. + FM_DMP_V32(buf, n, p_mm, tfcs_u);
  17492. + FM_DMP_V32(buf, n, p_mm, tvlan_l);
  17493. + FM_DMP_V32(buf, n, p_mm, tvlan_u);
  17494. + FM_DMP_V32(buf, n, p_mm, terr_l);
  17495. + FM_DMP_V32(buf, n, p_mm, terr_u);
  17496. + FM_DMP_V32(buf, n, p_mm, tuca_l);
  17497. + FM_DMP_V32(buf, n, p_mm, tuca_u);
  17498. + FM_DMP_V32(buf, n, p_mm, tmca_l);
  17499. + FM_DMP_V32(buf, n, p_mm, tmca_u);
  17500. + FM_DMP_V32(buf, n, p_mm, tbca_l);
  17501. + FM_DMP_V32(buf, n, p_mm, tbca_u);
  17502. + FM_DMP_V32(buf, n, p_mm, tpkt_l);
  17503. + FM_DMP_V32(buf, n, p_mm, tpkt_u);
  17504. + FM_DMP_V32(buf, n, p_mm, tund_l);
  17505. + FM_DMP_V32(buf, n, p_mm, tund_u);
  17506. + FM_DMP_V32(buf, n, p_mm, t64_l);
  17507. + FM_DMP_V32(buf, n, p_mm, t64_u);
  17508. + FM_DMP_V32(buf, n, p_mm, t127_l);
  17509. + FM_DMP_V32(buf, n, p_mm, t127_u);
  17510. + FM_DMP_V32(buf, n, p_mm, t255_l);
  17511. + FM_DMP_V32(buf, n, p_mm, t255_u);
  17512. + FM_DMP_V32(buf, n, p_mm, t511_l);
  17513. + FM_DMP_V32(buf, n, p_mm, t511_u);
  17514. + FM_DMP_V32(buf, n, p_mm, t1023_l);
  17515. + FM_DMP_V32(buf, n, p_mm, t1023_u);
  17516. + FM_DMP_V32(buf, n, p_mm, t1518_l);
  17517. + FM_DMP_V32(buf, n, p_mm, t1518_u);
  17518. + FM_DMP_V32(buf, n, p_mm, t1519x_l);
  17519. + FM_DMP_V32(buf, n, p_mm, t1519x_u);
  17520. + FM_DMP_V32(buf, n, p_mm, tcnp_l);
  17521. + FM_DMP_V32(buf, n, p_mm, tcnp_u);
  17522. +
  17523. + return n;
  17524. +}
  17525. +
  17526. +int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
  17527. +{
  17528. + int n = nn;
  17529. +
  17530. + n = h_mac->dump_mac_regs(h_mac, buf, n);
  17531. +
  17532. + return n;
  17533. +}
  17534. +EXPORT_SYMBOL(fm_mac_dump_regs);
  17535. +
  17536. +int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
  17537. +{
  17538. + int n = nn;
  17539. +
  17540. + if(h_mac->dump_mac_rx_stats)
  17541. + n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
  17542. +
  17543. + return n;
  17544. +}
  17545. +EXPORT_SYMBOL(fm_mac_dump_rx_stats);
  17546. +
  17547. +int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
  17548. +{
  17549. + int n = nn;
  17550. +
  17551. + if(h_mac->dump_mac_tx_stats)
  17552. + n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
  17553. +
  17554. + return n;
  17555. +}
  17556. +EXPORT_SYMBOL(fm_mac_dump_tx_stats);
  17557. +
  17558. +static void __cold setup_dtsec(struct mac_device *mac_dev)
  17559. +{
  17560. + mac_dev->init_phy = dtsec_init_phy;
  17561. + mac_dev->init = init;
  17562. + mac_dev->start = start;
  17563. + mac_dev->stop = stop;
  17564. + mac_dev->set_promisc = fm_mac_set_promiscuous;
  17565. + mac_dev->change_addr = fm_mac_modify_mac_addr;
  17566. + mac_dev->set_multi = set_multi;
  17567. + mac_dev->uninit = uninit;
  17568. + mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
  17569. + mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
  17570. + mac_dev->get_mac_handle = get_mac_handle;
  17571. + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
  17572. + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
  17573. + mac_dev->fm_rtc_enable = fm_rtc_enable;
  17574. + mac_dev->fm_rtc_disable = fm_rtc_disable;
  17575. + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
  17576. + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
  17577. + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
  17578. + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
  17579. + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
  17580. + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
  17581. + mac_dev->set_wol = fm_mac_set_wol;
  17582. + mac_dev->dump_mac_regs = dtsec_dump_regs;
  17583. +}
  17584. +
  17585. +static void __cold setup_xgmac(struct mac_device *mac_dev)
  17586. +{
  17587. + mac_dev->init_phy = xgmac_init_phy;
  17588. + mac_dev->init = init;
  17589. + mac_dev->start = start;
  17590. + mac_dev->stop = stop;
  17591. + mac_dev->set_promisc = fm_mac_set_promiscuous;
  17592. + mac_dev->change_addr = fm_mac_modify_mac_addr;
  17593. + mac_dev->set_multi = set_multi;
  17594. + mac_dev->uninit = uninit;
  17595. + mac_dev->get_mac_handle = get_mac_handle;
  17596. + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
  17597. + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
  17598. + mac_dev->set_wol = fm_mac_set_wol;
  17599. + mac_dev->dump_mac_regs = xgmac_dump_regs;
  17600. +}
  17601. +
  17602. +static void __cold setup_memac(struct mac_device *mac_dev)
  17603. +{
  17604. + mac_dev->init_phy = memac_init_phy;
  17605. + mac_dev->init = memac_init;
  17606. + mac_dev->start = start;
  17607. + mac_dev->stop = stop;
  17608. + mac_dev->set_promisc = fm_mac_set_promiscuous;
  17609. + mac_dev->change_addr = fm_mac_modify_mac_addr;
  17610. + mac_dev->set_multi = set_multi;
  17611. + mac_dev->uninit = uninit;
  17612. + mac_dev->get_mac_handle = get_mac_handle;
  17613. + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
  17614. + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
  17615. + mac_dev->fm_rtc_enable = fm_rtc_enable;
  17616. + mac_dev->fm_rtc_disable = fm_rtc_disable;
  17617. + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
  17618. + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
  17619. + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
  17620. + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
  17621. + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
  17622. + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
  17623. + mac_dev->set_wol = fm_mac_set_wol;
  17624. + mac_dev->dump_mac_regs = memac_dump_regs;
  17625. + mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
  17626. + mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
  17627. +}
  17628. +
  17629. +void (*const mac_setup[])(struct mac_device *mac_dev) = {
  17630. + [DTSEC] = setup_dtsec,
  17631. + [XGMAC] = setup_xgmac,
  17632. + [MEMAC] = setup_memac
  17633. +};
  17634. --- /dev/null
  17635. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
  17636. @@ -0,0 +1,470 @@
  17637. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  17638. + *
  17639. + * Redistribution and use in source and binary forms, with or without
  17640. + * modification, are permitted provided that the following conditions are met:
  17641. + * * Redistributions of source code must retain the above copyright
  17642. + * notice, this list of conditions and the following disclaimer.
  17643. + * * Redistributions in binary form must reproduce the above copyright
  17644. + * notice, this list of conditions and the following disclaimer in the
  17645. + * documentation and/or other materials provided with the distribution.
  17646. + * * Neither the name of Freescale Semiconductor nor the
  17647. + * names of its contributors may be used to endorse or promote products
  17648. + * derived from this software without specific prior written permission.
  17649. + *
  17650. + *
  17651. + * ALTERNATIVELY, this software may be distributed under the terms of the
  17652. + * GNU General Public License ("GPL") as published by the Free Software
  17653. + * Foundation, either version 2 of that License or (at your option) any
  17654. + * later version.
  17655. + *
  17656. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  17657. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  17658. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  17659. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  17660. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  17661. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  17662. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  17663. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  17664. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  17665. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  17666. + */
  17667. +
  17668. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  17669. +#define pr_fmt(fmt) \
  17670. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  17671. + KBUILD_BASENAME".c", __LINE__, __func__
  17672. +#else
  17673. +#define pr_fmt(fmt) \
  17674. + KBUILD_MODNAME ": " fmt
  17675. +#endif
  17676. +
  17677. +#include <linux/init.h>
  17678. +#include <linux/module.h>
  17679. +#include <linux/of_address.h>
  17680. +#include <linux/of_platform.h>
  17681. +#include <linux/of_net.h>
  17682. +#include <linux/device.h>
  17683. +#include <linux/phy.h>
  17684. +#include <linux/io.h>
  17685. +
  17686. +#include "lnxwrp_fm_ext.h"
  17687. +
  17688. +#include "mac.h"
  17689. +
  17690. +#define DTSEC_SUPPORTED \
  17691. + (SUPPORTED_10baseT_Half \
  17692. + | SUPPORTED_10baseT_Full \
  17693. + | SUPPORTED_100baseT_Half \
  17694. + | SUPPORTED_100baseT_Full \
  17695. + | SUPPORTED_Autoneg \
  17696. + | SUPPORTED_Pause \
  17697. + | SUPPORTED_Asym_Pause \
  17698. + | SUPPORTED_MII)
  17699. +
  17700. +static const char phy_str[][11] = {
  17701. + [PHY_INTERFACE_MODE_MII] = "mii",
  17702. + [PHY_INTERFACE_MODE_GMII] = "gmii",
  17703. + [PHY_INTERFACE_MODE_SGMII] = "sgmii",
  17704. + [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
  17705. + [PHY_INTERFACE_MODE_TBI] = "tbi",
  17706. + [PHY_INTERFACE_MODE_RMII] = "rmii",
  17707. + [PHY_INTERFACE_MODE_RGMII] = "rgmii",
  17708. + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
  17709. + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
  17710. + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
  17711. + [PHY_INTERFACE_MODE_RTBI] = "rtbi",
  17712. + [PHY_INTERFACE_MODE_XGMII] = "xgmii",
  17713. + [PHY_INTERFACE_MODE_QSGMII] = "sgmii-2500"
  17714. +};
  17715. +
  17716. +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
  17717. +{
  17718. + int i;
  17719. +
  17720. + for (i = 0; i < ARRAY_SIZE(phy_str); i++)
  17721. + if (strcmp(str, phy_str[i]) == 0)
  17722. + return (phy_interface_t)i;
  17723. +
  17724. + return PHY_INTERFACE_MODE_MII;
  17725. +}
  17726. +
  17727. +static const uint16_t phy2speed[] = {
  17728. + [PHY_INTERFACE_MODE_MII] = SPEED_100,
  17729. + [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
  17730. + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
  17731. + [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
  17732. + [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
  17733. + [PHY_INTERFACE_MODE_RMII] = SPEED_100,
  17734. + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
  17735. + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
  17736. + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
  17737. + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
  17738. + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
  17739. + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
  17740. + [PHY_INTERFACE_MODE_QSGMII] = SPEED_2500
  17741. +};
  17742. +
  17743. +static struct mac_device * __cold
  17744. +alloc_macdev(struct device *dev, size_t sizeof_priv,
  17745. + void (*setup)(struct mac_device *mac_dev))
  17746. +{
  17747. + struct mac_device *mac_dev;
  17748. +
  17749. + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
  17750. + if (unlikely(mac_dev == NULL))
  17751. + mac_dev = ERR_PTR(-ENOMEM);
  17752. + else {
  17753. + mac_dev->dev = dev;
  17754. + dev_set_drvdata(dev, mac_dev);
  17755. + setup(mac_dev);
  17756. + }
  17757. +
  17758. + return mac_dev;
  17759. +}
  17760. +
  17761. +static int __cold free_macdev(struct mac_device *mac_dev)
  17762. +{
  17763. + dev_set_drvdata(mac_dev->dev, NULL);
  17764. +
  17765. + return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
  17766. +}
  17767. +
  17768. +static const struct of_device_id mac_match[] = {
  17769. + [DTSEC] = {
  17770. + .compatible = "fsl,fman-1g-mac"
  17771. + },
  17772. + [XGMAC] = {
  17773. + .compatible = "fsl,fman-10g-mac"
  17774. + },
  17775. + [MEMAC] = {
  17776. + .compatible = "fsl,fman-memac"
  17777. + },
  17778. + {}
  17779. +};
  17780. +MODULE_DEVICE_TABLE(of, mac_match);
  17781. +
  17782. +static int __cold mac_probe(struct platform_device *_of_dev)
  17783. +{
  17784. + int _errno, i;
  17785. + struct device *dev;
  17786. + struct device_node *mac_node, *dev_node;
  17787. + struct mac_device *mac_dev;
  17788. + struct platform_device *of_dev;
  17789. + struct resource res;
  17790. + const uint8_t *mac_addr;
  17791. + const char *char_prop;
  17792. + int nph;
  17793. + u32 cell_index;
  17794. + const struct of_device_id *match;
  17795. +
  17796. + dev = &_of_dev->dev;
  17797. + mac_node = dev->of_node;
  17798. +
  17799. + match = of_match_device(mac_match, dev);
  17800. + if (!match)
  17801. + return -EINVAL;
  17802. +
  17803. + for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
  17804. + i++)
  17805. + ;
  17806. + BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
  17807. +
  17808. + mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
  17809. + if (IS_ERR(mac_dev)) {
  17810. + _errno = PTR_ERR(mac_dev);
  17811. + dev_err(dev, "alloc_macdev() = %d\n", _errno);
  17812. + goto _return;
  17813. + }
  17814. +
  17815. + INIT_LIST_HEAD(&mac_dev->mc_addr_list);
  17816. +
  17817. + /* Get the FM node */
  17818. + dev_node = of_get_parent(mac_node);
  17819. + if (unlikely(dev_node == NULL)) {
  17820. + dev_err(dev, "of_get_parent(%s) failed\n",
  17821. + mac_node->full_name);
  17822. + _errno = -EINVAL;
  17823. + goto _return_dev_set_drvdata;
  17824. + }
  17825. +
  17826. + of_dev = of_find_device_by_node(dev_node);
  17827. + if (unlikely(of_dev == NULL)) {
  17828. + dev_err(dev, "of_find_device_by_node(%s) failed\n",
  17829. + dev_node->full_name);
  17830. + _errno = -EINVAL;
  17831. + goto _return_of_node_put;
  17832. + }
  17833. +
  17834. + mac_dev->fm_dev = fm_bind(&of_dev->dev);
  17835. + if (unlikely(mac_dev->fm_dev == NULL)) {
  17836. + dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
  17837. + _errno = -ENODEV;
  17838. + goto _return_of_node_put;
  17839. + }
  17840. +
  17841. + mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
  17842. + of_node_put(dev_node);
  17843. +
  17844. + /* Get the address of the memory mapped registers */
  17845. + _errno = of_address_to_resource(mac_node, 0, &res);
  17846. + if (unlikely(_errno < 0)) {
  17847. + dev_err(dev, "of_address_to_resource(%s) = %d\n",
  17848. + mac_node->full_name, _errno);
  17849. + goto _return_dev_set_drvdata;
  17850. + }
  17851. +
  17852. + mac_dev->res = __devm_request_region(
  17853. + dev,
  17854. + fm_get_mem_region(mac_dev->fm_dev),
  17855. + res.start, res.end + 1 - res.start, "mac");
  17856. + if (unlikely(mac_dev->res == NULL)) {
  17857. + dev_err(dev, "__devm_request_mem_region(mac) failed\n");
  17858. + _errno = -EBUSY;
  17859. + goto _return_dev_set_drvdata;
  17860. + }
  17861. +
  17862. + mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
  17863. + mac_dev->res->end + 1
  17864. + - mac_dev->res->start);
  17865. + if (unlikely(mac_dev->vaddr == NULL)) {
  17866. + dev_err(dev, "devm_ioremap() failed\n");
  17867. + _errno = -EIO;
  17868. + goto _return_dev_set_drvdata;
  17869. + }
  17870. +
  17871. +#define TBIPA_OFFSET 0x1c
  17872. +#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
  17873. + mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
  17874. + if (mac_dev->tbi_node) {
  17875. + u32 tbiaddr = TBIPA_DEFAULT_ADDR;
  17876. + const __be32 *tbi_reg;
  17877. + void __iomem *addr;
  17878. +
  17879. + tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
  17880. + if (tbi_reg)
  17881. + tbiaddr = be32_to_cpup(tbi_reg);
  17882. + addr = mac_dev->vaddr + TBIPA_OFFSET;
  17883. + /* TODO: out_be32 does not exist on ARM */
  17884. + out_be32(addr, tbiaddr);
  17885. + }
  17886. +
  17887. + if (!of_device_is_available(mac_node)) {
  17888. + devm_iounmap(dev, mac_dev->vaddr);
  17889. + __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
  17890. + res.start, res.end + 1 - res.start);
  17891. + fm_unbind(mac_dev->fm_dev);
  17892. + devm_kfree(dev, mac_dev);
  17893. + dev_set_drvdata(dev, NULL);
  17894. + return -ENODEV;
  17895. + }
  17896. +
  17897. + /* Get the cell-index */
  17898. + _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
  17899. + if (unlikely(_errno)) {
  17900. + dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
  17901. + mac_node->full_name);
  17902. + goto _return_dev_set_drvdata;
  17903. + }
  17904. + mac_dev->cell_index = (uint8_t)cell_index;
  17905. +
  17906. + /* Get the MAC address */
  17907. + mac_addr = of_get_mac_address(mac_node);
  17908. + if (unlikely(mac_addr == NULL)) {
  17909. + dev_err(dev, "of_get_mac_address(%s) failed\n",
  17910. + mac_node->full_name);
  17911. + _errno = -EINVAL;
  17912. + goto _return_dev_set_drvdata;
  17913. + }
  17914. + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
  17915. +
  17916. + /* Verify the number of port handles */
  17917. + nph = of_count_phandle_with_args(mac_node, "fsl,port-handles", NULL);
  17918. + if (unlikely(nph < 0)) {
  17919. + dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
  17920. + mac_node->full_name);
  17921. + _errno = nph;
  17922. + goto _return_dev_set_drvdata;
  17923. + }
  17924. +
  17925. + if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
  17926. + dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
  17927. + mac_node->full_name);
  17928. + _errno = -EINVAL;
  17929. + goto _return_dev_set_drvdata;
  17930. + }
  17931. +
  17932. + for_each_port_device(i, mac_dev->port_dev) {
  17933. + dev_node = of_parse_phandle(mac_node, "fsl,port-handles", i);
  17934. + if (unlikely(dev_node == NULL)) {
  17935. + dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
  17936. + mac_node->full_name);
  17937. + _errno = -EINVAL;
  17938. + goto _return_of_node_put;
  17939. + }
  17940. +
  17941. + of_dev = of_find_device_by_node(dev_node);
  17942. + if (unlikely(of_dev == NULL)) {
  17943. + dev_err(dev, "of_find_device_by_node(%s) failed\n",
  17944. + dev_node->full_name);
  17945. + _errno = -EINVAL;
  17946. + goto _return_of_node_put;
  17947. + }
  17948. +
  17949. + mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
  17950. + if (unlikely(mac_dev->port_dev[i] == NULL)) {
  17951. + dev_err(dev, "dev_get_drvdata(%s) failed\n",
  17952. + dev_node->full_name);
  17953. + _errno = -EINVAL;
  17954. + goto _return_of_node_put;
  17955. + }
  17956. + of_node_put(dev_node);
  17957. + }
  17958. +
  17959. + /* Get the PHY connection type */
  17960. + _errno = of_property_read_string(mac_node, "phy-connection-type",
  17961. + &char_prop);
  17962. + if (unlikely(_errno)) {
  17963. + dev_warn(dev,
  17964. + "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
  17965. + mac_node->full_name);
  17966. + mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
  17967. + } else
  17968. + mac_dev->phy_if = str2phy(char_prop);
  17969. +
  17970. + mac_dev->link = false;
  17971. + mac_dev->half_duplex = false;
  17972. + mac_dev->speed = phy2speed[mac_dev->phy_if];
  17973. + mac_dev->max_speed = mac_dev->speed;
  17974. + mac_dev->if_support = DTSEC_SUPPORTED;
  17975. + /* We don't support half-duplex in SGMII mode */
  17976. + if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii"))
  17977. + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
  17978. + SUPPORTED_100baseT_Half);
  17979. +
  17980. + if (strstr(char_prop, "sgmii-2500"))
  17981. + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
  17982. + SUPPORTED_100baseT_Half);
  17983. +
  17984. + /* Gigabit support (no half-duplex) */
  17985. + if (mac_dev->max_speed == 1000)
  17986. + mac_dev->if_support |= SUPPORTED_1000baseT_Full;
  17987. +
  17988. + /* The 10G interface only supports one mode */
  17989. + if (strstr(char_prop, "xgmii"))
  17990. + mac_dev->if_support = SUPPORTED_10000baseT_Full;
  17991. +
  17992. + /* Get the rest of the PHY information */
  17993. + mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
  17994. + if (mac_dev->phy_node == NULL) {
  17995. + u32 phy_id;
  17996. +
  17997. + _errno = of_property_read_u32(mac_node, "fixed-link", &phy_id);
  17998. + if (_errno) {
  17999. + dev_err(dev, "No PHY (or fixed link) found\n");
  18000. + _errno = -EINVAL;
  18001. + goto _return_dev_set_drvdata;
  18002. + }
  18003. +
  18004. + sprintf(mac_dev->fixed_bus_id, PHY_ID_FMT, "fixed-0",
  18005. + phy_id);
  18006. + }
  18007. +
  18008. + _errno = mac_dev->init(mac_dev);
  18009. + if (unlikely(_errno < 0)) {
  18010. + dev_err(dev, "mac_dev->init() = %d\n", _errno);
  18011. + goto _return_dev_set_drvdata;
  18012. + }
  18013. +
  18014. + /* pause frame autonegotiation enabled*/
  18015. + mac_dev->autoneg_pause = true;
  18016. +
  18017. + /* by intializing the values to false, force FMD to enable PAUSE frames
  18018. + * on RX and TX
  18019. + */
  18020. + mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
  18021. + mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
  18022. + _errno = set_mac_active_pause(mac_dev, true, true);
  18023. + if (unlikely(_errno < 0))
  18024. + dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
  18025. +
  18026. + dev_info(dev,
  18027. + "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
  18028. + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
  18029. + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
  18030. +
  18031. + goto _return;
  18032. +
  18033. +_return_of_node_put:
  18034. + of_node_put(dev_node);
  18035. +_return_dev_set_drvdata:
  18036. + dev_set_drvdata(dev, NULL);
  18037. +_return:
  18038. + return _errno;
  18039. +}
  18040. +
  18041. +static int __cold mac_remove(struct platform_device *of_dev)
  18042. +{
  18043. + int i, _errno;
  18044. + struct device *dev;
  18045. + struct mac_device *mac_dev;
  18046. +
  18047. + dev = &of_dev->dev;
  18048. + mac_dev = (struct mac_device *)dev_get_drvdata(dev);
  18049. +
  18050. + for_each_port_device(i, mac_dev->port_dev)
  18051. + fm_port_unbind(mac_dev->port_dev[i]);
  18052. +
  18053. + fm_unbind(mac_dev->fm_dev);
  18054. +
  18055. + _errno = free_macdev(mac_dev);
  18056. +
  18057. + return _errno;
  18058. +}
  18059. +
  18060. +static struct platform_driver mac_driver = {
  18061. + .driver = {
  18062. + .name = KBUILD_MODNAME,
  18063. + .of_match_table = mac_match,
  18064. + .owner = THIS_MODULE,
  18065. + },
  18066. + .probe = mac_probe,
  18067. + .remove = mac_remove
  18068. +};
  18069. +
  18070. +static int __init __cold mac_load(void)
  18071. +{
  18072. + int _errno;
  18073. +
  18074. + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
  18075. + KBUILD_BASENAME".c", __func__);
  18076. +
  18077. + pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
  18078. +
  18079. + _errno = platform_driver_register(&mac_driver);
  18080. + if (unlikely(_errno < 0)) {
  18081. + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
  18082. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  18083. + goto _return;
  18084. + }
  18085. +
  18086. + goto _return;
  18087. +
  18088. +_return:
  18089. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  18090. + KBUILD_BASENAME".c", __func__);
  18091. +
  18092. + return _errno;
  18093. +}
  18094. +module_init(mac_load);
  18095. +
  18096. +static void __exit __cold mac_unload(void)
  18097. +{
  18098. + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
  18099. + KBUILD_BASENAME".c", __func__);
  18100. +
  18101. + platform_driver_unregister(&mac_driver);
  18102. +
  18103. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  18104. + KBUILD_BASENAME".c", __func__);
  18105. +}
  18106. +module_exit(mac_unload);
  18107. --- /dev/null
  18108. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
  18109. @@ -0,0 +1,134 @@
  18110. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  18111. + *
  18112. + * Redistribution and use in source and binary forms, with or without
  18113. + * modification, are permitted provided that the following conditions are met:
  18114. + * * Redistributions of source code must retain the above copyright
  18115. + * notice, this list of conditions and the following disclaimer.
  18116. + * * Redistributions in binary form must reproduce the above copyright
  18117. + * notice, this list of conditions and the following disclaimer in the
  18118. + * documentation and/or other materials provided with the distribution.
  18119. + * * Neither the name of Freescale Semiconductor nor the
  18120. + * names of its contributors may be used to endorse or promote products
  18121. + * derived from this software without specific prior written permission.
  18122. + *
  18123. + *
  18124. + * ALTERNATIVELY, this software may be distributed under the terms of the
  18125. + * GNU General Public License ("GPL") as published by the Free Software
  18126. + * Foundation, either version 2 of that License or (at your option) any
  18127. + * later version.
  18128. + *
  18129. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  18130. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  18131. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18132. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  18133. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  18134. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  18135. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  18136. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  18137. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  18138. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  18139. + */
  18140. +
  18141. +#ifndef __MAC_H
  18142. +#define __MAC_H
  18143. +
  18144. +#include <linux/device.h> /* struct device, BUS_ID_SIZE */
  18145. +#include <linux/if_ether.h> /* ETH_ALEN */
  18146. +#include <linux/phy.h> /* phy_interface_t, struct phy_device */
  18147. +#include <linux/list.h>
  18148. +
  18149. +#include "lnxwrp_fsl_fman.h" /* struct port_device */
  18150. +
  18151. +enum {DTSEC, XGMAC, MEMAC};
  18152. +
  18153. +struct mac_device {
  18154. + struct device *dev;
  18155. + void *priv;
  18156. + uint8_t cell_index;
  18157. + struct resource *res;
  18158. + void __iomem *vaddr;
  18159. + uint8_t addr[ETH_ALEN];
  18160. + bool promisc;
  18161. +
  18162. + struct fm *fm_dev;
  18163. + struct fm_port *port_dev[2];
  18164. +
  18165. + phy_interface_t phy_if;
  18166. + u32 if_support;
  18167. + bool link;
  18168. + bool half_duplex;
  18169. + uint16_t speed;
  18170. + uint16_t max_speed;
  18171. + struct device_node *phy_node;
  18172. + char fixed_bus_id[MII_BUS_ID_SIZE + 3];
  18173. + struct device_node *tbi_node;
  18174. + struct phy_device *phy_dev;
  18175. + void *fm;
  18176. + /* List of multicast addresses */
  18177. + struct list_head mc_addr_list;
  18178. +
  18179. + bool autoneg_pause;
  18180. + bool rx_pause_req;
  18181. + bool tx_pause_req;
  18182. + bool rx_pause_active;
  18183. + bool tx_pause_active;
  18184. +
  18185. + struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
  18186. + int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
  18187. + int (*init)(struct mac_device *mac_dev);
  18188. + int (*start)(struct mac_device *mac_dev);
  18189. + int (*stop)(struct mac_device *mac_dev);
  18190. + int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
  18191. + int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
  18192. + int (*set_multi)(struct net_device *net_dev,
  18193. + struct mac_device *mac_dev);
  18194. + int (*uninit)(struct fm_mac_dev *fm_mac_dev);
  18195. + int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
  18196. + int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
  18197. + int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
  18198. + int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
  18199. + int (*fm_rtc_enable)(struct fm *fm_dev);
  18200. + int (*fm_rtc_disable)(struct fm *fm_dev);
  18201. + int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
  18202. + int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
  18203. + int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
  18204. + int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
  18205. + int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
  18206. + int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
  18207. + uint64_t fiper);
  18208. +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
  18209. + int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
  18210. + int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
  18211. +#endif
  18212. + int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
  18213. + bool en);
  18214. + int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
  18215. + int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
  18216. + int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
  18217. +};
  18218. +
  18219. +struct mac_address {
  18220. + uint8_t addr[ETH_ALEN];
  18221. + struct list_head list;
  18222. +};
  18223. +
  18224. +#define get_fm_handle(net_dev) \
  18225. + (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
  18226. +
  18227. +#define for_each_port_device(i, port_dev) \
  18228. + for (i = 0; i < ARRAY_SIZE(port_dev); i++)
  18229. +
  18230. +static inline __attribute((nonnull)) void *macdev_priv(
  18231. + const struct mac_device *mac_dev)
  18232. +{
  18233. + return (void *)mac_dev + sizeof(*mac_dev);
  18234. +}
  18235. +
  18236. +extern const char *mac_driver_description;
  18237. +extern const size_t mac_sizeof_priv[];
  18238. +extern void (*const mac_setup[])(struct mac_device *mac_dev);
  18239. +
  18240. +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
  18241. +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
  18242. +
  18243. +#endif /* __MAC_H */
  18244. --- /dev/null
  18245. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
  18246. @@ -0,0 +1,848 @@
  18247. +/* Copyright 2011-2012 Freescale Semiconductor Inc.
  18248. + *
  18249. + * Redistribution and use in source and binary forms, with or without
  18250. + * modification, are permitted provided that the following conditions are met:
  18251. + * * Redistributions of source code must retain the above copyright
  18252. + * notice, this list of conditions and the following disclaimer.
  18253. + * * Redistributions in binary form must reproduce the above copyright
  18254. + * notice, this list of conditions and the following disclaimer in the
  18255. + * documentation and/or other materials provided with the distribution.
  18256. + * * Neither the name of Freescale Semiconductor nor the
  18257. + * names of its contributors may be used to endorse or promote products
  18258. + * derived from this software without specific prior written permission.
  18259. + *
  18260. + *
  18261. + * ALTERNATIVELY, this software may be distributed under the terms of the
  18262. + * GNU General Public License ("GPL") as published by the Free Software
  18263. + * Foundation, either version 2 of that License or (at your option) any
  18264. + * later version.
  18265. + *
  18266. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  18267. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  18268. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18269. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  18270. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  18271. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  18272. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  18273. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  18274. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  18275. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  18276. + */
  18277. +
  18278. +/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
  18279. + * Validates device-tree configuration and sets up the offline ports.
  18280. + */
  18281. +
  18282. +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
  18283. +#define pr_fmt(fmt) \
  18284. + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
  18285. + KBUILD_BASENAME".c", __LINE__, __func__
  18286. +#else
  18287. +#define pr_fmt(fmt) \
  18288. + KBUILD_MODNAME ": " fmt
  18289. +#endif
  18290. +
  18291. +
  18292. +#include <linux/init.h>
  18293. +#include <linux/module.h>
  18294. +#include <linux/of_platform.h>
  18295. +#include <linux/fsl_qman.h>
  18296. +
  18297. +#include "offline_port.h"
  18298. +#include "dpaa_eth.h"
  18299. +#include "dpaa_eth_common.h"
  18300. +
  18301. +#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
  18302. +/* Manip extra space and data alignment for fragmentation */
  18303. +#define FRAG_MANIP_SPACE 128
  18304. +#define FRAG_DATA_ALIGN 64
  18305. +
  18306. +
  18307. +MODULE_LICENSE("Dual BSD/GPL");
  18308. +MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
  18309. +MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
  18310. +
  18311. +
  18312. +static const struct of_device_id oh_port_match_table[] = {
  18313. + {
  18314. + .compatible = "fsl,dpa-oh"
  18315. + },
  18316. + {
  18317. + .compatible = "fsl,dpa-oh-shared"
  18318. + },
  18319. + {}
  18320. +};
  18321. +MODULE_DEVICE_TABLE(of, oh_port_match_table);
  18322. +
  18323. +#ifdef CONFIG_PM
  18324. +
  18325. +static int oh_suspend(struct device *dev)
  18326. +{
  18327. + struct dpa_oh_config_s *oh_config;
  18328. +
  18329. + oh_config = dev_get_drvdata(dev);
  18330. + return fm_port_suspend(oh_config->oh_port);
  18331. +}
  18332. +
  18333. +static int oh_resume(struct device *dev)
  18334. +{
  18335. + struct dpa_oh_config_s *oh_config;
  18336. +
  18337. + oh_config = dev_get_drvdata(dev);
  18338. + return fm_port_resume(oh_config->oh_port);
  18339. +}
  18340. +
  18341. +static const struct dev_pm_ops oh_pm_ops = {
  18342. + .suspend = oh_suspend,
  18343. + .resume = oh_resume,
  18344. +};
  18345. +
  18346. +#define OH_PM_OPS (&oh_pm_ops)
  18347. +
  18348. +#else /* CONFIG_PM */
  18349. +
  18350. +#define OH_PM_OPS NULL
  18351. +
  18352. +#endif /* CONFIG_PM */
  18353. +
  18354. +/* Creates Frame Queues */
  18355. +static uint32_t oh_fq_create(struct qman_fq *fq,
  18356. + uint32_t fq_id, uint16_t channel,
  18357. + uint16_t wq_id)
  18358. +{
  18359. + struct qm_mcc_initfq fq_opts;
  18360. + uint32_t create_flags, init_flags;
  18361. + uint32_t ret = 0;
  18362. +
  18363. + if (fq == NULL)
  18364. + return 1;
  18365. +
  18366. + /* Set flags for FQ create */
  18367. + create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
  18368. +
  18369. + /* Create frame queue */
  18370. + ret = qman_create_fq(fq_id, create_flags, fq);
  18371. + if (ret != 0)
  18372. + return 1;
  18373. +
  18374. + /* Set flags for FQ init */
  18375. + init_flags = QMAN_INITFQ_FLAG_SCHED;
  18376. +
  18377. + /* Set FQ init options. Specify destination WQ ID and channel */
  18378. + fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
  18379. + fq_opts.fqd.dest.wq = wq_id;
  18380. + fq_opts.fqd.dest.channel = channel;
  18381. +
  18382. + /* Initialize frame queue */
  18383. + ret = qman_init_fq(fq, init_flags, &fq_opts);
  18384. + if (ret != 0) {
  18385. + qman_destroy_fq(fq, 0);
  18386. + return 1;
  18387. + }
  18388. +
  18389. + return 0;
  18390. +}
  18391. +
  18392. +static void dump_fq(struct device *dev, int fqid, uint16_t channel)
  18393. +{
  18394. + if (channel) {
  18395. + /* display fqs with a valid (!= 0) destination channel */
  18396. + dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
  18397. + }
  18398. +}
  18399. +
  18400. +static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
  18401. + int fqs_count, uint16_t channel_id)
  18402. +{
  18403. + int i;
  18404. + for (i = 0; i < fqs_count; i++)
  18405. + dump_fq(dev, (fqs + i)->fqid, channel_id);
  18406. +}
  18407. +
  18408. +static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
  18409. +{
  18410. + struct list_head *fq_list;
  18411. + struct fq_duple *fqd;
  18412. + int i;
  18413. +
  18414. + dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
  18415. + dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
  18416. +
  18417. + /* TX queues (old initialization) */
  18418. + dev_info(dev, "Initialized queues:");
  18419. + for (i = 0; i < conf->egress_cnt; i++)
  18420. + dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
  18421. + conf->channel);
  18422. +
  18423. + /* initialized ingress queues */
  18424. + list_for_each(fq_list, &conf->fqs_ingress_list) {
  18425. + fqd = list_entry(fq_list, struct fq_duple, fq_list);
  18426. + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
  18427. + }
  18428. +
  18429. + /* initialized egress queues */
  18430. + list_for_each(fq_list, &conf->fqs_egress_list) {
  18431. + fqd = list_entry(fq_list, struct fq_duple, fq_list);
  18432. + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
  18433. + }
  18434. +}
  18435. +
  18436. +/* Destroys Frame Queues */
  18437. +static void oh_fq_destroy(struct qman_fq *fq)
  18438. +{
  18439. + int _errno = 0;
  18440. +
  18441. + _errno = qman_retire_fq(fq, NULL);
  18442. + if (unlikely(_errno < 0))
  18443. + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
  18444. + KBUILD_BASENAME".c", __LINE__, __func__,
  18445. + qman_fq_fqid(fq), _errno);
  18446. +
  18447. + _errno = qman_oos_fq(fq);
  18448. + if (unlikely(_errno < 0)) {
  18449. + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
  18450. + KBUILD_BASENAME".c", __LINE__, __func__,
  18451. + qman_fq_fqid(fq), _errno);
  18452. + }
  18453. +
  18454. + qman_destroy_fq(fq, 0);
  18455. +}
  18456. +
  18457. +/* Allocation code for the OH port's PCD frame queues */
  18458. +static int __cold oh_alloc_pcd_fqids(struct device *dev,
  18459. + uint32_t num,
  18460. + uint8_t alignment,
  18461. + uint32_t *base_fqid)
  18462. +{
  18463. + dev_crit(dev, "callback not implemented!\n");
  18464. + BUG();
  18465. +
  18466. + return 0;
  18467. +}
  18468. +
  18469. +static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
  18470. +{
  18471. + dev_crit(dev, "callback not implemented!\n");
  18472. + BUG();
  18473. +
  18474. + return 0;
  18475. +}
  18476. +
  18477. +static void oh_set_buffer_layout(struct fm_port *port,
  18478. + struct dpa_buffer_layout_s *layout)
  18479. +{
  18480. + struct fm_port_params params;
  18481. +
  18482. + layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
  18483. + layout->parse_results = true;
  18484. + layout->hash_results = true;
  18485. + layout->time_stamp = false;
  18486. +
  18487. + fm_port_get_buff_layout_ext_params(port, &params);
  18488. + layout->manip_extra_space = params.manip_extra_space;
  18489. + layout->data_align = params.data_align;
  18490. +}
  18491. +
  18492. +static int
  18493. +oh_port_probe(struct platform_device *_of_dev)
  18494. +{
  18495. + struct device *dpa_oh_dev;
  18496. + struct device_node *dpa_oh_node;
  18497. + int lenp, _errno = 0, fq_idx, duple_idx;
  18498. + int n_size, i, j, ret, duples_count;
  18499. + struct platform_device *oh_of_dev;
  18500. + struct device_node *oh_node, *bpool_node = NULL, *root_node;
  18501. + struct device *oh_dev;
  18502. + struct dpa_oh_config_s *oh_config = NULL;
  18503. + const __be32 *oh_all_queues;
  18504. + const __be32 *channel_ids;
  18505. + const __be32 *oh_tx_queues;
  18506. + uint32_t queues_count;
  18507. + uint32_t crt_fqid_base;
  18508. + uint32_t crt_fq_count;
  18509. + bool frag_enabled = false;
  18510. + struct fm_port_params oh_port_tx_params;
  18511. + struct fm_port_pcd_param oh_port_pcd_params;
  18512. + struct dpa_buffer_layout_s buf_layout;
  18513. +
  18514. + /* True if the current partition owns the OH port. */
  18515. + bool init_oh_port;
  18516. +
  18517. + const struct of_device_id *match;
  18518. + int crt_ext_pools_count;
  18519. + u32 ext_pool_size;
  18520. + u32 port_id;
  18521. + u32 channel_id;
  18522. +
  18523. + int channel_ids_count;
  18524. + int channel_idx;
  18525. + struct fq_duple *fqd;
  18526. + struct list_head *fq_list, *fq_list_tmp;
  18527. +
  18528. + const __be32 *bpool_cfg;
  18529. + uint32_t bpid;
  18530. +
  18531. + memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
  18532. + dpa_oh_dev = &_of_dev->dev;
  18533. + dpa_oh_node = dpa_oh_dev->of_node;
  18534. + BUG_ON(dpa_oh_node == NULL);
  18535. +
  18536. + match = of_match_device(oh_port_match_table, dpa_oh_dev);
  18537. + if (!match)
  18538. + return -EINVAL;
  18539. +
  18540. + dev_dbg(dpa_oh_dev, "Probing OH port...\n");
  18541. +
  18542. + /* Find the referenced OH node */
  18543. + oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
  18544. + if (oh_node == NULL) {
  18545. + dev_err(dpa_oh_dev,
  18546. + "Can't find OH node referenced from node %s\n",
  18547. + dpa_oh_node->full_name);
  18548. + return -EINVAL;
  18549. + }
  18550. + dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
  18551. + match->compatible);
  18552. +
  18553. + _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
  18554. + if (_errno) {
  18555. + dev_err(dpa_oh_dev, "No port id found in node %s\n",
  18556. + dpa_oh_node->full_name);
  18557. + goto return_kfree;
  18558. + }
  18559. +
  18560. + _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
  18561. + &channel_id);
  18562. + if (_errno) {
  18563. + dev_err(dpa_oh_dev, "No channel id found in node %s\n",
  18564. + dpa_oh_node->full_name);
  18565. + goto return_kfree;
  18566. + }
  18567. +
  18568. + oh_of_dev = of_find_device_by_node(oh_node);
  18569. + BUG_ON(oh_of_dev == NULL);
  18570. + oh_dev = &oh_of_dev->dev;
  18571. +
  18572. + /* The OH port must be initialized exactly once.
  18573. + * The following scenarios are of interest:
  18574. + * - the node is Linux-private (will always initialize it);
  18575. + * - the node is shared between two Linux partitions
  18576. + * (only one of them will initialize it);
  18577. + * - the node is shared between a Linux and a LWE partition
  18578. + * (Linux will initialize it) - "fsl,dpa-oh-shared"
  18579. + */
  18580. +
  18581. + /* Check if the current partition owns the OH port
  18582. + * and ought to initialize it. It may be the case that we leave this
  18583. + * to another (also Linux) partition.
  18584. + */
  18585. + init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
  18586. +
  18587. + /* If we aren't the "owner" of the OH node, we're done here. */
  18588. + if (!init_oh_port) {
  18589. + dev_dbg(dpa_oh_dev,
  18590. + "Not owning the shared OH port %s, will not initialize it.\n",
  18591. + oh_node->full_name);
  18592. + of_node_put(oh_node);
  18593. + return 0;
  18594. + }
  18595. +
  18596. + /* Allocate OH dev private data */
  18597. + oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
  18598. + if (oh_config == NULL) {
  18599. + dev_err(dpa_oh_dev,
  18600. + "Can't allocate private data for OH node %s referenced from node %s!\n",
  18601. + oh_node->full_name, dpa_oh_node->full_name);
  18602. + _errno = -ENOMEM;
  18603. + goto return_kfree;
  18604. + }
  18605. +
  18606. + INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
  18607. + INIT_LIST_HEAD(&oh_config->fqs_egress_list);
  18608. +
  18609. + /* FQs that enter OH port */
  18610. + lenp = 0;
  18611. + oh_all_queues = of_get_property(dpa_oh_node,
  18612. + "fsl,qman-frame-queues-ingress", &lenp);
  18613. + if (lenp % (2 * sizeof(*oh_all_queues))) {
  18614. + dev_warn(dpa_oh_dev,
  18615. + "Wrong ingress queues format for OH node %s referenced from node %s!\n",
  18616. + oh_node->full_name, dpa_oh_node->full_name);
  18617. + /* just ignore the last unpaired value */
  18618. + }
  18619. +
  18620. + duples_count = lenp / (2 * sizeof(*oh_all_queues));
  18621. + dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
  18622. + duples_count);
  18623. + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
  18624. + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
  18625. + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
  18626. +
  18627. + fqd = devm_kzalloc(dpa_oh_dev,
  18628. + sizeof(struct fq_duple), GFP_KERNEL);
  18629. + if (!fqd) {
  18630. + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
  18631. + oh_node->full_name,
  18632. + dpa_oh_node->full_name);
  18633. + _errno = -ENOMEM;
  18634. + goto return_kfree;
  18635. + }
  18636. +
  18637. + fqd->fqs = devm_kzalloc(dpa_oh_dev,
  18638. + crt_fq_count * sizeof(struct qman_fq),
  18639. + GFP_KERNEL);
  18640. + if (!fqd->fqs) {
  18641. + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
  18642. + oh_node->full_name,
  18643. + dpa_oh_node->full_name);
  18644. + _errno = -ENOMEM;
  18645. + goto return_kfree;
  18646. + }
  18647. +
  18648. + for (j = 0; j < crt_fq_count; j++)
  18649. + (fqd->fqs + j)->fqid = crt_fqid_base + j;
  18650. + fqd->fqs_count = crt_fq_count;
  18651. + fqd->channel_id = (uint16_t)channel_id;
  18652. + list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
  18653. + }
  18654. +
  18655. + /* create the ingress queues */
  18656. + list_for_each(fq_list, &oh_config->fqs_ingress_list) {
  18657. + fqd = list_entry(fq_list, struct fq_duple, fq_list);
  18658. +
  18659. + for (j = 0; j < fqd->fqs_count; j++) {
  18660. + ret = oh_fq_create(fqd->fqs + j,
  18661. + (fqd->fqs + j)->fqid,
  18662. + fqd->channel_id, 3);
  18663. + if (ret != 0) {
  18664. + dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
  18665. + (fqd->fqs + j)->fqid,
  18666. + oh_node->full_name,
  18667. + dpa_oh_node->full_name);
  18668. + _errno = -EINVAL;
  18669. + goto return_kfree;
  18670. + }
  18671. + }
  18672. + }
  18673. +
  18674. + /* FQs that exit OH port */
  18675. + lenp = 0;
  18676. + oh_all_queues = of_get_property(dpa_oh_node,
  18677. + "fsl,qman-frame-queues-egress", &lenp);
  18678. + if (lenp % (2 * sizeof(*oh_all_queues))) {
  18679. + dev_warn(dpa_oh_dev,
  18680. + "Wrong egress queues format for OH node %s referenced from node %s!\n",
  18681. + oh_node->full_name, dpa_oh_node->full_name);
  18682. + /* just ignore the last unpaired value */
  18683. + }
  18684. +
  18685. + duples_count = lenp / (2 * sizeof(*oh_all_queues));
  18686. + dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
  18687. + duples_count);
  18688. + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
  18689. + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
  18690. + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
  18691. +
  18692. + fqd = devm_kzalloc(dpa_oh_dev,
  18693. + sizeof(struct fq_duple), GFP_KERNEL);
  18694. + if (!fqd) {
  18695. + dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
  18696. + oh_node->full_name,
  18697. + dpa_oh_node->full_name);
  18698. + _errno = -ENOMEM;
  18699. + goto return_kfree;
  18700. + }
  18701. +
  18702. + fqd->fqs = devm_kzalloc(dpa_oh_dev,
  18703. + crt_fq_count * sizeof(struct qman_fq),
  18704. + GFP_KERNEL);
  18705. + if (!fqd->fqs) {
  18706. + dev_err(dpa_oh_dev,
  18707. + "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
  18708. + oh_node->full_name,
  18709. + dpa_oh_node->full_name);
  18710. + _errno = -ENOMEM;
  18711. + goto return_kfree;
  18712. + }
  18713. +
  18714. + for (j = 0; j < crt_fq_count; j++)
  18715. + (fqd->fqs + j)->fqid = crt_fqid_base + j;
  18716. + fqd->fqs_count = crt_fq_count;
  18717. + /* channel ID is specified in another attribute */
  18718. + fqd->channel_id = 0;
  18719. + list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
  18720. +
  18721. + /* allocate the queue */
  18722. +
  18723. + }
  18724. +
  18725. + /* channel_ids for FQs that exit OH port */
  18726. + lenp = 0;
  18727. + channel_ids = of_get_property(dpa_oh_node,
  18728. + "fsl,qman-channel-ids-egress", &lenp);
  18729. +
  18730. + channel_ids_count = lenp / (sizeof(*channel_ids));
  18731. + if (channel_ids_count != duples_count) {
  18732. + dev_warn(dpa_oh_dev,
  18733. + "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
  18734. + oh_node->full_name, dpa_oh_node->full_name);
  18735. + /* just ignore the queues that do not have a Channel ID */
  18736. + }
  18737. +
  18738. + channel_idx = 0;
  18739. + list_for_each(fq_list, &oh_config->fqs_egress_list) {
  18740. + if (channel_idx + 1 > channel_ids_count)
  18741. + break;
  18742. + fqd = list_entry(fq_list, struct fq_duple, fq_list);
  18743. + fqd->channel_id =
  18744. + (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
  18745. + }
  18746. +
  18747. + /* create egress queues */
  18748. + list_for_each(fq_list, &oh_config->fqs_egress_list) {
  18749. + fqd = list_entry(fq_list, struct fq_duple, fq_list);
  18750. +
  18751. + if (fqd->channel_id == 0) {
  18752. + /* missing channel id in dts */
  18753. + continue;
  18754. + }
  18755. +
  18756. + for (j = 0; j < fqd->fqs_count; j++) {
  18757. + ret = oh_fq_create(fqd->fqs + j,
  18758. + (fqd->fqs + j)->fqid,
  18759. + fqd->channel_id, 3);
  18760. + if (ret != 0) {
  18761. + dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
  18762. + (fqd->fqs + j)->fqid,
  18763. + oh_node->full_name,
  18764. + dpa_oh_node->full_name);
  18765. + _errno = -EINVAL;
  18766. + goto return_kfree;
  18767. + }
  18768. + }
  18769. + }
  18770. +
  18771. + /* Read FQ ids/nums for the DPA OH node */
  18772. + oh_all_queues = of_get_property(dpa_oh_node,
  18773. + "fsl,qman-frame-queues-oh", &lenp);
  18774. + if (oh_all_queues == NULL) {
  18775. + dev_err(dpa_oh_dev,
  18776. + "No frame queues have been defined for OH node %s referenced from node %s\n",
  18777. + oh_node->full_name, dpa_oh_node->full_name);
  18778. + _errno = -EINVAL;
  18779. + goto return_kfree;
  18780. + }
  18781. +
  18782. + /* Check that the OH error and default FQs are there */
  18783. + BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
  18784. + queues_count = lenp / (2 * sizeof(*oh_all_queues));
  18785. + if (queues_count != 2) {
  18786. + dev_err(dpa_oh_dev,
  18787. + "Error and Default queues must be defined for OH node %s referenced from node %s\n",
  18788. + oh_node->full_name, dpa_oh_node->full_name);
  18789. + _errno = -EINVAL;
  18790. + goto return_kfree;
  18791. + }
  18792. +
  18793. + /* Read the FQIDs defined for this OH port */
  18794. + dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
  18795. + fq_idx = 0;
  18796. +
  18797. + /* Error FQID - must be present */
  18798. + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
  18799. + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
  18800. + if (crt_fq_count != 1) {
  18801. + dev_err(dpa_oh_dev,
  18802. + "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
  18803. + oh_node->full_name, dpa_oh_node->full_name,
  18804. + crt_fq_count);
  18805. + _errno = -EINVAL;
  18806. + goto return_kfree;
  18807. + }
  18808. + oh_config->error_fqid = crt_fqid_base;
  18809. + dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
  18810. + oh_config->error_fqid, oh_node->full_name);
  18811. +
  18812. + /* Default FQID - must be present */
  18813. + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
  18814. + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
  18815. + if (crt_fq_count != 1) {
  18816. + dev_err(dpa_oh_dev,
  18817. + "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
  18818. + oh_node->full_name, dpa_oh_node->full_name,
  18819. + crt_fq_count);
  18820. + _errno = -EINVAL;
  18821. + goto return_kfree;
  18822. + }
  18823. + oh_config->default_fqid = crt_fqid_base;
  18824. + dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
  18825. + oh_config->default_fqid, oh_node->full_name);
  18826. +
  18827. + /* TX FQID - presence is optional */
  18828. + oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
  18829. + &lenp);
  18830. + if (oh_tx_queues == NULL) {
  18831. + dev_dbg(dpa_oh_dev,
  18832. + "No tx queues have been defined for OH node %s referenced from node %s\n",
  18833. + oh_node->full_name, dpa_oh_node->full_name);
  18834. + goto config_port;
  18835. + }
  18836. +
  18837. + /* Check that queues-tx has only a base and a count defined */
  18838. + BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
  18839. + queues_count = lenp / (2 * sizeof(*oh_tx_queues));
  18840. + if (queues_count != 1) {
  18841. + dev_err(dpa_oh_dev,
  18842. + "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
  18843. + oh_node->full_name, dpa_oh_node->full_name);
  18844. + _errno = -EINVAL;
  18845. + goto return_kfree;
  18846. + }
  18847. +
  18848. + fq_idx = 0;
  18849. + crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
  18850. + crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
  18851. + oh_config->egress_cnt = crt_fq_count;
  18852. +
  18853. + /* Allocate TX queues */
  18854. + dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
  18855. + oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
  18856. + crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
  18857. + if (oh_config->egress_fqs == NULL) {
  18858. + dev_err(dpa_oh_dev,
  18859. + "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
  18860. + oh_node->full_name, dpa_oh_node->full_name);
  18861. + _errno = -ENOMEM;
  18862. + goto return_kfree;
  18863. + }
  18864. +
  18865. + /* Create TX queues */
  18866. + for (i = 0; i < crt_fq_count; i++) {
  18867. + ret = oh_fq_create(oh_config->egress_fqs + i,
  18868. + crt_fqid_base + i, (uint16_t)channel_id, 3);
  18869. + if (ret != 0) {
  18870. + dev_err(dpa_oh_dev,
  18871. + "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
  18872. + crt_fqid_base + i, oh_node->full_name,
  18873. + dpa_oh_node->full_name);
  18874. + _errno = -EINVAL;
  18875. + goto return_kfree;
  18876. + }
  18877. + }
  18878. +
  18879. +config_port:
  18880. + /* Get a handle to the fm_port so we can set
  18881. + * its configuration params
  18882. + */
  18883. + oh_config->oh_port = fm_port_bind(oh_dev);
  18884. + if (oh_config->oh_port == NULL) {
  18885. + dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
  18886. + oh_node->full_name);
  18887. + _errno = -EINVAL;
  18888. + goto return_kfree;
  18889. + }
  18890. +
  18891. + oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
  18892. +
  18893. + /* read the pool handlers */
  18894. + crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
  18895. + "fsl,bman-buffer-pools", NULL);
  18896. + if (crt_ext_pools_count <= 0) {
  18897. + dev_info(dpa_oh_dev,
  18898. + "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
  18899. + oh_node->full_name);
  18900. + goto init_port;
  18901. + }
  18902. +
  18903. + /* used for reading ext_pool_size*/
  18904. + root_node = of_find_node_by_path("/");
  18905. + if (root_node == NULL) {
  18906. + dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
  18907. + _errno = -EINVAL;
  18908. + goto return_kfree;
  18909. + }
  18910. +
  18911. + n_size = of_n_size_cells(root_node);
  18912. + of_node_put(root_node);
  18913. +
  18914. + dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
  18915. + crt_ext_pools_count);
  18916. +
  18917. + oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
  18918. +
  18919. + for (i = 0; i < crt_ext_pools_count; i++) {
  18920. + bpool_node = of_parse_phandle(dpa_oh_node,
  18921. + "fsl,bman-buffer-pools", i);
  18922. + if (bpool_node == NULL) {
  18923. + dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
  18924. + _errno = -EINVAL;
  18925. + goto return_kfree;
  18926. + }
  18927. +
  18928. + _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
  18929. + if (_errno) {
  18930. + dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
  18931. + _errno = -EINVAL;
  18932. + goto return_kfree;
  18933. + }
  18934. +
  18935. + oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
  18936. + dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
  18937. +
  18938. + bpool_cfg = of_get_property(bpool_node,
  18939. + "fsl,bpool-ethernet-cfg", &lenp);
  18940. + if (bpool_cfg == NULL) {
  18941. + dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
  18942. + _errno = -EINVAL;
  18943. + goto return_kfree;
  18944. + }
  18945. +
  18946. + ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
  18947. + oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
  18948. + dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
  18949. + ext_pool_size);
  18950. + of_node_put(bpool_node);
  18951. +
  18952. + }
  18953. +
  18954. + if (buf_layout.data_align != FRAG_DATA_ALIGN ||
  18955. + buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
  18956. + goto init_port;
  18957. +
  18958. + frag_enabled = true;
  18959. + dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
  18960. + port_id);
  18961. +
  18962. +init_port:
  18963. + of_node_put(oh_node);
  18964. + /* Set Tx params */
  18965. + dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
  18966. + oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
  18967. + frag_enabled);
  18968. + /* Set PCD params */
  18969. + oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
  18970. + oh_port_pcd_params.cbf = oh_free_pcd_fqids;
  18971. + oh_port_pcd_params.dev = dpa_oh_dev;
  18972. + fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
  18973. +
  18974. + dev_set_drvdata(dpa_oh_dev, oh_config);
  18975. +
  18976. + /* Enable the OH port */
  18977. + _errno = fm_port_enable(oh_config->oh_port);
  18978. + if (_errno)
  18979. + goto return_kfree;
  18980. +
  18981. + dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
  18982. +
  18983. + /* print of all referenced & created queues */
  18984. + dump_oh_config(dpa_oh_dev, oh_config);
  18985. +
  18986. + return 0;
  18987. +
  18988. +return_kfree:
  18989. + if (bpool_node)
  18990. + of_node_put(bpool_node);
  18991. + if (oh_node)
  18992. + of_node_put(oh_node);
  18993. + if (oh_config && oh_config->egress_fqs)
  18994. + devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
  18995. +
  18996. + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
  18997. + fqd = list_entry(fq_list, struct fq_duple, fq_list);
  18998. + list_del(fq_list);
  18999. + devm_kfree(dpa_oh_dev, fqd->fqs);
  19000. + devm_kfree(dpa_oh_dev, fqd);
  19001. + }
  19002. +
  19003. + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
  19004. + fqd = list_entry(fq_list, struct fq_duple, fq_list);
  19005. + list_del(fq_list);
  19006. + devm_kfree(dpa_oh_dev, fqd->fqs);
  19007. + devm_kfree(dpa_oh_dev, fqd);
  19008. + }
  19009. +
  19010. + devm_kfree(dpa_oh_dev, oh_config);
  19011. + return _errno;
  19012. +}
  19013. +
  19014. +static int __cold oh_port_remove(struct platform_device *_of_dev)
  19015. +{
  19016. + int _errno = 0, i;
  19017. + struct dpa_oh_config_s *oh_config;
  19018. +
  19019. + pr_info("Removing OH port...\n");
  19020. +
  19021. + oh_config = dev_get_drvdata(&_of_dev->dev);
  19022. + if (oh_config == NULL) {
  19023. + pr_err(KBUILD_MODNAME
  19024. + ": %s:%hu:%s(): No OH config in device private data!\n",
  19025. + KBUILD_BASENAME".c", __LINE__, __func__);
  19026. + _errno = -ENODEV;
  19027. + goto return_error;
  19028. + }
  19029. +
  19030. + if (oh_config->egress_fqs)
  19031. + for (i = 0; i < oh_config->egress_cnt; i++)
  19032. + oh_fq_destroy(oh_config->egress_fqs + i);
  19033. +
  19034. + if (oh_config->oh_port == NULL) {
  19035. + pr_err(KBUILD_MODNAME
  19036. + ": %s:%hu:%s(): No fm port in device private data!\n",
  19037. + KBUILD_BASENAME".c", __LINE__, __func__);
  19038. + _errno = -EINVAL;
  19039. + goto free_egress_fqs;
  19040. + }
  19041. +
  19042. + _errno = fm_port_disable(oh_config->oh_port);
  19043. +
  19044. +free_egress_fqs:
  19045. + if (oh_config->egress_fqs)
  19046. + devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
  19047. + devm_kfree(&_of_dev->dev, oh_config);
  19048. + dev_set_drvdata(&_of_dev->dev, NULL);
  19049. +
  19050. +return_error:
  19051. + return _errno;
  19052. +}
  19053. +
  19054. +static struct platform_driver oh_port_driver = {
  19055. + .driver = {
  19056. + .name = KBUILD_MODNAME,
  19057. + .of_match_table = oh_port_match_table,
  19058. + .owner = THIS_MODULE,
  19059. + .pm = OH_PM_OPS,
  19060. + },
  19061. + .probe = oh_port_probe,
  19062. + .remove = oh_port_remove
  19063. +};
  19064. +
  19065. +static int __init __cold oh_port_load(void)
  19066. +{
  19067. + int _errno;
  19068. +
  19069. + pr_info(OH_MOD_DESCRIPTION "\n");
  19070. +
  19071. + _errno = platform_driver_register(&oh_port_driver);
  19072. + if (_errno < 0) {
  19073. + pr_err(KBUILD_MODNAME
  19074. + ": %s:%hu:%s(): platform_driver_register() = %d\n",
  19075. + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
  19076. + }
  19077. +
  19078. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  19079. + KBUILD_BASENAME".c", __func__);
  19080. + return _errno;
  19081. +}
  19082. +module_init(oh_port_load);
  19083. +
  19084. +static void __exit __cold oh_port_unload(void)
  19085. +{
  19086. + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
  19087. + KBUILD_BASENAME".c", __func__);
  19088. +
  19089. + platform_driver_unregister(&oh_port_driver);
  19090. +
  19091. + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
  19092. + KBUILD_BASENAME".c", __func__);
  19093. +}
  19094. +module_exit(oh_port_unload);
  19095. --- /dev/null
  19096. +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
  19097. @@ -0,0 +1,59 @@
  19098. +/* Copyright 2011 Freescale Semiconductor Inc.
  19099. + *
  19100. + * Redistribution and use in source and binary forms, with or without
  19101. + * modification, are permitted provided that the following conditions are met:
  19102. + * * Redistributions of source code must retain the above copyright
  19103. + * notice, this list of conditions and the following disclaimer.
  19104. + * * Redistributions in binary form must reproduce the above copyright
  19105. + * notice, this list of conditions and the following disclaimer in the
  19106. + * documentation and/or other materials provided with the distribution.
  19107. + * * Neither the name of Freescale Semiconductor nor the
  19108. + * names of its contributors may be used to endorse or promote products
  19109. + * derived from this software without specific prior written permission.
  19110. + *
  19111. + *
  19112. + * ALTERNATIVELY, this software may be distributed under the terms of the
  19113. + * GNU General Public License ("GPL") as published by the Free Software
  19114. + * Foundation, either version 2 of that License or (at your option) any
  19115. + * later version.
  19116. + *
  19117. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  19118. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  19119. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19120. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  19121. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  19122. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19123. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  19124. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  19125. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  19126. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  19127. + */
  19128. +
  19129. +#ifndef __OFFLINE_PORT_H
  19130. +#define __OFFLINE_PORT_H
  19131. +
  19132. +struct fm_port;
  19133. +struct qman_fq;
  19134. +
  19135. +/* fqs are defined in duples (base_fq, fq_count) */
  19136. +struct fq_duple {
  19137. + struct qman_fq *fqs;
  19138. + int fqs_count;
  19139. + uint16_t channel_id;
  19140. + struct list_head fq_list;
  19141. +};
  19142. +
  19143. +/* OH port configuration */
  19144. +struct dpa_oh_config_s {
  19145. + uint32_t error_fqid;
  19146. + uint32_t default_fqid;
  19147. + struct fm_port *oh_port;
  19148. + uint32_t egress_cnt;
  19149. + struct qman_fq *egress_fqs;
  19150. + uint16_t channel;
  19151. +
  19152. + struct list_head fqs_ingress_list;
  19153. + struct list_head fqs_egress_list;
  19154. +};
  19155. +
  19156. +#endif /* __OFFLINE_PORT_H */