1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160 |
- From 2af9b49c7e6bad2dee75960ddf61fd52a4d3748f Mon Sep 17 00:00:00 2001
- From: Zhao Qiang <qiang.zhao@nxp.com>
- Date: Wed, 16 Dec 2015 22:00:36 +0200
- Subject: [PATCH 16/70] dpa: add dpaa_eth driver
- Dpaa is Datapatch Acceleration Architecture, this architecture provides
- the infrastructure to support simplified sharing of networking
- interfaces and accelerators by multiple CPUs.
- Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
- Signed-off-by: Camelia Groza <camelia.groza@freescale.com>
- Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
- Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
- Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
- Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
- ---
- drivers/net/ethernet/freescale/Kconfig | 2 +
- drivers/net/ethernet/freescale/Makefile | 1 +
- drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 187 ++
- drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 59 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1183 +++++++++++
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 695 +++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1719 ++++++++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 230 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1787 ++++++++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 227 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c | 1735 ++++++++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h | 90 +
- .../freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c | 201 ++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c | 499 +++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c | 2156 ++++++++++++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h | 294 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1128 ++++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c | 914 +++++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 +++++
- .../freescale/sdk_dpaa/dpaa_generic_ethtool.c | 286 +++
- .../freescale/sdk_dpaa/dpaa_macsec_ethtool.c | 250 +++
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 287 +++
- drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 915 +++++++++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 470 +++++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 134 ++
- .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 ++++++++
- .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
- 36 files changed, 18957 insertions(+)
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
- --- a/drivers/net/ethernet/freescale/Kconfig
- +++ b/drivers/net/ethernet/freescale/Kconfig
- @@ -93,4 +93,6 @@ config GIANFAR
- on the 8540.
-
- source "drivers/net/ethernet/freescale/sdk_fman/Kconfig"
- +source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig"
- +
- endif # NET_VENDOR_FREESCALE
- --- a/drivers/net/ethernet/freescale/Makefile
- +++ b/drivers/net/ethernet/freescale/Makefile
- @@ -18,3 +18,4 @@ gianfar_driver-objs := gianfar.o \
- obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
- ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
- obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/
- +obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
- @@ -0,0 +1,187 @@
- +menuconfig FSL_SDK_DPAA_ETH
- + tristate "DPAA Ethernet"
- + depends on (FSL_SOC || ARM64 || ARM) && FSL_BMAN && FSL_QMAN && FSL_SDK_FMAN
- + select PHYLIB
- + ---help---
- + Data Path Acceleration Architecture Ethernet driver,
- + supporting the Freescale QorIQ chips.
- + Depends on Freescale Buffer Manager and Queue Manager
- + driver and Frame Manager Driver.
- +
- +if FSL_SDK_DPAA_ETH
- +
- +config FSL_DPAA_HOOKS
- + bool "DPAA Ethernet driver hooks"
- +
- +config FSL_DPAA_MACSEC
- + tristate "DPAA MACSEC"
- + select FSL_DPAA_HOOKS
- + ---help---
- + Enable MACSEC support in DPAA.
- +
- +config FSL_DPAA_CEETM
- + bool "DPAA CEETM QoS"
- + select NET_SCHED
- + default n
- + ---help---
- + Enable QoS offloading support through the CEETM hardware block.
- +
- +config FSL_DPAA_OFFLINE_PORTS
- + bool "Offline Ports support"
- + depends on FSL_SDK_DPAA_ETH
- + default y
- + ---help---
- + The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
- + most of the functionality of the regular, online ports, except they receive their
- + frames from a core or an accelerator on the SoC, via QMan frame queues,
- + rather than directly from the network.
- + Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
- + any online FMan port. They deliver the processed frames to frame queues, according
- + to the applied PCD configurations.
- +
- + Choosing this feature will not impact the functionality and/or performance of the system,
- + so it is safe to have it.
- +
- +config FSL_DPAA_ADVANCED_DRIVERS
- + bool "Advanced DPAA Ethernet drivers"
- + depends on FSL_SDK_DPAA_ETH
- + default y
- + ---help---
- + Besides the standard DPAA Ethernet driver there are available other flavours
- + of DPAA drivers that support advanced scenarios:
- + - DPAA Shared MAC driver
- + - DPAA MAC-less driver
- + - DPAA Proxy initialization driver (for USDPAA)
- + Select this to also build the advanced drivers.
- +
- +config FSL_DPAA_GENERIC_DRIVER
- + bool "Generic DPAA Ethernet driver"
- + depends on FSL_SDK_DPAA_ETH
- + default y
- + ---help---
- + This enables the DPAA Generic driver (oNIC).
- +
- +config FSL_DPAA_ETH_JUMBO_FRAME
- + bool "Optimize for jumbo frames"
- + depends on !ARM64 && !ARM
- + default n
- + ---help---
- + Optimize the DPAA Ethernet driver throughput for large frames
- + termination traffic (e.g. 4K and above).
- + NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
- + is set to 9600 bytes.
- + Using this option in combination with small frames increases
- + significantly the driver's memory footprint and may even deplete
- + the system memory.
- + This option is not available on LS1043.
- +
- +config FSL_DPAA_TS
- + bool "Linux compliant timestamping"
- + depends on FSL_SDK_DPAA_ETH
- + default n
- + ---help---
- + Enable Linux API compliant timestamping support.
- +
- +config FSL_DPAA_1588
- + bool "IEEE 1588-compliant timestamping"
- + depends on FSL_SDK_DPAA_ETH
- + select FSL_DPAA_TS
- + default n
- + ---help---
- + Enable IEEE1588 support code.
- +
- +config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- + bool "Use driver's Tx queue selection mechanism"
- + default y
- + depends on FSL_SDK_DPAA_ETH
- + ---help---
- + The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
- + of the egress FQ. That will override the XPS support for this netdevice.
- + If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
- + or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
- + and use the standard XPS support instead.
- +
- +config FSL_DPAA_ETH_MAX_BUF_COUNT
- + int "Maximum nuber of buffers in private bpool"
- + depends on FSL_SDK_DPAA_ETH
- + range 64 2048
- + default "128"
- + ---help---
- + The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
- + buffer pool. One needn't normally modify this, as it has probably been tuned for performance
- + already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
- +
- +config FSL_DPAA_ETH_REFILL_THRESHOLD
- + int "Private bpool refill threshold"
- + depends on FSL_SDK_DPAA_ETH
- + range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
- + default "80"
- + ---help---
- + The DPAA-Ethernet driver will start replenishing buffer pools whose count
- + falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
- + modify this value unless one has very specific performance reasons.
- +
- +config FSL_DPAA_CS_THRESHOLD_1G
- + hex "Egress congestion threshold on 1G ports"
- + depends on FSL_SDK_DPAA_ETH
- + range 0x1000 0x10000000
- + default "0x06000000"
- + ---help---
- + The size in bytes of the egress Congestion State notification threshold on 1G ports.
- + The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
- + (e.g. by sending UDP datagrams at "while(1) speed"),
- + and the larger the frame size, the more acute the problem.
- + So we have to find a balance between these factors:
- + - avoiding the device staying congested for a prolonged time (risking
- + the netdev watchdog to fire - see also the tx_timeout module param);
- + - affecting performance of protocols such as TCP, which otherwise
- + behave well under the congestion notification mechanism;
- + - preventing the Tx cores from tightly-looping (as if the congestion
- + threshold was too low to be effective);
- + - running out of memory if the CS threshold is set too high.
- +
- +config FSL_DPAA_CS_THRESHOLD_10G
- + hex "Egress congestion threshold on 10G ports"
- + depends on FSL_SDK_DPAA_ETH
- + range 0x1000 0x20000000
- + default "0x10000000"
- +
- +config FSL_DPAA_INGRESS_CS_THRESHOLD
- + hex "Ingress congestion threshold on FMan ports"
- + depends on FSL_SDK_DPAA_ETH
- + default "0x10000000"
- + ---help---
- + The size in bytes of the ingress tail-drop threshold on FMan ports.
- + Traffic piling up above this value will be rejected by QMan and discarded by FMan.
- +
- +config FSL_DPAA_ETH_DEBUGFS
- + bool "DPAA Ethernet debugfs interface"
- + depends on DEBUG_FS && FSL_SDK_DPAA_ETH
- + default y
- + ---help---
- + This option compiles debugfs code for the DPAA Ethernet driver.
- +
- +config FSL_DPAA_ETH_DEBUG
- + bool "DPAA Ethernet Debug Support"
- + depends on FSL_SDK_DPAA_ETH
- + default n
- + ---help---
- + This option compiles debug code for the DPAA Ethernet driver.
- +
- +config FSL_DPAA_DBG_LOOP
- + bool "DPAA Ethernet Debug loopback"
- + depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- + default n
- + ---help---
- + This option allows to divert all received traffic on a certain interface A towards a
- + selected interface B. This option is used to benchmark the HW + Ethernet driver in
- + isolation from the Linux networking stack. The loops are controlled by debugfs entries,
- + one for each interface. By default all loops are disabled (target value is -1). I.e. to
- + change the loop setting for interface 4 and divert all received traffic to interface 5
- + write Tx interface number in the receive interface debugfs file:
- + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
- + 4->-1
- + # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
- + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
- + 4->5
- +endif # FSL_SDK_DPAA_ETH
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
- @@ -0,0 +1,59 @@
- +#
- +# Makefile for the Freescale Ethernet controllers
- +#
- +ccflags-y += -DVERSION=\"\"
- +#
- +# Include netcomm SW specific definitions
- +include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
- +
- +ccflags-y += -I$(NET_DPA)
- +
- +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
- +obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
- +
- +fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
- +ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
- +fsl_dpa-objs += dpaa_debugfs.o
- +endif
- +ifeq ($(CONFIG_FSL_DPAA_1588),y)
- +fsl_dpa-objs += dpaa_1588.o
- +endif
- +ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
- +ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
- +fsl_dpa-objs += dpaa_eth_ceetm.o
- +endif
- +
- +fsl_mac-objs += mac.o mac-api.o
- +
- +# Advanced drivers
- +ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
- +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
- +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
- +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_shared.o
- +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_macless.o
- +obj-$(CONFIG_FSL_DPAA_MACSEC) += fsl_dpa_macsec.o
- +
- +fsl_advanced-objs += dpaa_eth_base.o
- +# suport for multiple drivers per kernel module comes in kernel 3.14
- +# so we are forced to generate several modules for the advanced drivers
- +fsl_proxy-objs += dpaa_eth_proxy.o
- +fsl_dpa_shared-objs += dpaa_eth_shared.o
- +fsl_dpa_macless-objs += dpaa_eth_macless.o
- +fsl_dpa_macsec-objs += dpaa_eth_macsec.o dpaa_macsec_ethtool.o
- +
- +ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
- +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
- +
- +fsl_oh-objs += offline_port.o
- +endif
- +endif
- +
- +# Generic driver
- +ifeq ($(CONFIG_FSL_DPAA_GENERIC_DRIVER),y)
- +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_generic.o
- +
- +fsl_generic-objs += dpaa_eth_generic.o dpaa_eth_generic_sysfs.o dpaa_generic_ethtool.o
- +endif
- +
- +# Needed by the tracing framework
- +CFLAGS_dpaa_eth.o := -I$(src)
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
- @@ -0,0 +1,580 @@
- +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
- + * Copyright (C) 2009 IXXAT Automation, GmbH
- + *
- + * DPAA Ethernet Driver -- IEEE 1588 interface functionality
- + *
- + * This program is free software; you can redistribute it and/or modify
- + * it under the terms of the GNU General Public License as published by
- + * the Free Software Foundation; either version 2 of the License, or
- + * (at your option) any later version.
- + *
- + * This program is distributed in the hope that it will be useful,
- + * but WITHOUT ANY WARRANTY; without even the implied warranty of
- + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + * GNU General Public License for more details.
- + *
- + * You should have received a copy of the GNU General Public License along
- + * with this program; if not, write to the Free Software Foundation, Inc.,
- + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- + *
- + */
- +#include <linux/io.h>
- +#include <linux/device.h>
- +#include <linux/fs.h>
- +#include <linux/vmalloc.h>
- +#include <linux/spinlock.h>
- +#include <linux/ip.h>
- +#include <linux/ipv6.h>
- +#include <linux/udp.h>
- +#include <asm/div64.h>
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#include "dpaa_1588.h"
- +#include "mac.h"
- +
- +static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
- +{
- + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
- +
- + circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
- + if (!circ_buf->buf)
- + return 1;
- +
- + circ_buf->head = 0;
- + circ_buf->tail = 0;
- + ptp_buf->size = size;
- + spin_lock_init(&ptp_buf->ptp_lock);
- +
- + return 0;
- +}
- +
- +static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
- +{
- + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
- +
- + circ_buf->head = 0;
- + circ_buf->tail = 0;
- + ptp_buf->size = size;
- +}
- +
- +static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
- + struct dpa_ptp_data *data)
- +{
- + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
- + int size = ptp_buf->size;
- + struct dpa_ptp_data *tmp;
- + unsigned long flags;
- + int head, tail;
- +
- + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
- +
- + head = circ_buf->head;
- + tail = circ_buf->tail;
- +
- + if (CIRC_SPACE(head, tail, size) <= 0)
- + circ_buf->tail = (tail + 1) & (size - 1);
- +
- + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
- + memcpy(tmp, data, sizeof(struct dpa_ptp_data));
- +
- + circ_buf->head = (head + 1) & (size - 1);
- +
- + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
- +
- + return 0;
- +}
- +
- +static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
- + struct dpa_ptp_ident *src)
- +{
- + int ret;
- +
- + if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
- + return 0;
- +
- + if ((dst->netw_prot == src->netw_prot)
- + || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
- + if (dst->seq_id != src->seq_id)
- + return 0;
- +
- + ret = memcmp(dst->snd_port_id, src->snd_port_id,
- + DPA_PTP_SOURCE_PORT_LENGTH);
- + if (ret)
- + return 0;
- + else
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
- + struct dpa_ptp_ident *ident,
- + struct dpa_ptp_time *ts)
- +{
- + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
- + int size = ptp_buf->size;
- + int head, tail, idx;
- + unsigned long flags;
- + struct dpa_ptp_data *tmp, *tmp2;
- + struct dpa_ptp_ident *tmp_ident;
- +
- + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
- +
- + head = circ_buf->head;
- + tail = idx = circ_buf->tail;
- +
- + if (CIRC_CNT(head, tail, size) == 0) {
- + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
- + return 1;
- + }
- +
- + while (idx != head) {
- + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
- + tmp_ident = &tmp->ident;
- + if (dpa_ptp_is_ident_match(tmp_ident, ident))
- + break;
- + idx = (idx + 1) & (size - 1);
- + }
- +
- + if (idx == head) {
- + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
- + return 1;
- + }
- +
- + ts->sec = tmp->ts.sec;
- + ts->nsec = tmp->ts.nsec;
- +
- + if (idx != tail) {
- + if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
- + tail = circ_buf->tail =
- + (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
- + }
- +
- + while (CIRC_CNT(idx, tail, size) > 0) {
- + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
- + idx = (idx - 1) & (size - 1);
- + tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
- + *tmp = *tmp2;
- + }
- + }
- + circ_buf->tail = (tail + 1) & (size - 1);
- +
- + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
- +
- + return 0;
- +}
- +
- +/* Parse the PTP packets
- + *
- + * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
- + * an IEEE802.3 ethernet frame. This function returns the position of
- + * the PTP packet or NULL if no PTP found
- + */
- +static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
- +{
- + u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
- + u8 *ptp_loc = NULL;
- + u8 msg_type;
- + u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
- + struct iphdr *iph;
- + struct udphdr *udph;
- + struct ipv6hdr *ipv6h;
- +
- + /* when we can receive S/G frames we need to check the data we want to
- + * access is in the linear skb buffer
- + */
- + if (!pskb_may_pull(skb, access_len))
- + return NULL;
- +
- + *eth_type = *((u16 *)pos);
- +
- + /* Check if inner tag is here */
- + if (*eth_type == ETH_P_8021Q) {
- + access_len += DPA_VLAN_TAG_LEN;
- +
- + if (!pskb_may_pull(skb, access_len))
- + return NULL;
- +
- + pos += DPA_VLAN_TAG_LEN;
- + *eth_type = *((u16 *)pos);
- + }
- +
- + pos += DPA_ETYPE_LEN;
- +
- + switch (*eth_type) {
- + /* Transport of PTP over Ethernet */
- + case ETH_P_1588:
- + ptp_loc = pos;
- +
- + if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
- + return NULL;
- +
- + msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
- + if ((msg_type == PTP_MSGTYPE_SYNC)
- + || (msg_type == PTP_MSGTYPE_DELREQ)
- + || (msg_type == PTP_MSGTYPE_PDELREQ)
- + || (msg_type == PTP_MSGTYPE_PDELRESP))
- + return ptp_loc;
- + break;
- + /* Transport of PTP over IPv4 */
- + case ETH_P_IP:
- + iph = (struct iphdr *)pos;
- + access_len += sizeof(struct iphdr);
- +
- + if (!pskb_may_pull(skb, access_len))
- + return NULL;
- +
- + if (ntohs(iph->protocol) != IPPROTO_UDP)
- + return NULL;
- +
- + access_len += iph->ihl * 4 - sizeof(struct iphdr) +
- + sizeof(struct udphdr);
- +
- + if (!pskb_may_pull(skb, access_len))
- + return NULL;
- +
- + pos += iph->ihl * 4;
- + udph = (struct udphdr *)pos;
- + if (ntohs(udph->dest) != 319)
- + return NULL;
- + ptp_loc = pos + sizeof(struct udphdr);
- + break;
- + /* Transport of PTP over IPv6 */
- + case ETH_P_IPV6:
- + ipv6h = (struct ipv6hdr *)pos;
- +
- + access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
- +
- + if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
- + return NULL;
- +
- + pos += sizeof(struct ipv6hdr);
- + udph = (struct udphdr *)pos;
- + if (ntohs(udph->dest) != 319)
- + return NULL;
- + ptp_loc = pos + sizeof(struct udphdr);
- + break;
- + default:
- + break;
- + }
- +
- + return ptp_loc;
- +}
- +
- +static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
- + struct sk_buff *skb, void *data, enum port_type rx_tx,
- + struct dpa_ptp_data *ptp_data)
- +{
- + u64 nsec;
- + u32 mod;
- + u8 *ptp_loc;
- + u16 eth_type;
- +
- + ptp_loc = dpa_ptp_parse_packet(skb, ð_type);
- + if (!ptp_loc)
- + return -EINVAL;
- +
- + switch (eth_type) {
- + case ETH_P_IP:
- + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
- + break;
- + case ETH_P_IPV6:
- + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
- + break;
- + case ETH_P_1588:
- + ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
- + break;
- + default:
- + return -EINVAL;
- + }
- +
- + if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
- + return -EINVAL;
- +
- + ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
- + ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
- + ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
- + memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
- + DPA_PTP_SOURCE_PORT_LENGTH);
- +
- + nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
- + mod = do_div(nsec, NANOSEC_PER_SECOND);
- + ptp_data->ts.sec = nsec;
- + ptp_data->ts.nsec = mod;
- +
- + return 0;
- +}
- +
- +void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
- + struct sk_buff *skb, void *data)
- +{
- + struct dpa_ptp_tsu *tsu = priv->tsu;
- + struct dpa_ptp_data ptp_tx_data;
- +
- + if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
- + return;
- +
- + dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
- +}
- +
- +void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
- + struct sk_buff *skb, void *data)
- +{
- + struct dpa_ptp_tsu *tsu = priv->tsu;
- + struct dpa_ptp_data ptp_rx_data;
- +
- + if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
- + return;
- +
- + dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
- +}
- +
- +static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
- + struct dpa_ptp_ident *ident,
- + struct dpa_ptp_time *ts)
- +{
- + struct dpa_ptp_tsu *tsu = ptp_tsu;
- + struct dpa_ptp_time tmp;
- + int flag;
- +
- + flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
- + if (!flag) {
- + ts->sec = tmp.sec;
- + ts->nsec = tmp.nsec;
- + return 0;
- + }
- +
- + return -1;
- +}
- +
- +static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
- + struct dpa_ptp_ident *ident,
- + struct dpa_ptp_time *ts)
- +{
- + struct dpa_ptp_tsu *tsu = ptp_tsu;
- + struct dpa_ptp_time tmp;
- + int flag;
- +
- + flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
- + if (!flag) {
- + ts->sec = tmp.sec;
- + ts->nsec = tmp.nsec;
- + return 0;
- + }
- +
- + return -1;
- +}
- +
- +static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
- + struct dpa_ptp_time *cnt_time)
- +{
- + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
- + u64 tmp, fiper;
- +
- + if (mac_dev->fm_rtc_disable)
- + mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
- +
- + /* TMR_FIPER1 will pulse every second after ALARM1 expired */
- + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
- + fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
- + if (mac_dev->fm_rtc_set_alarm)
- + mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
- + 0, tmp);
- + if (mac_dev->fm_rtc_set_fiper)
- + mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
- + 0, fiper);
- +
- + if (mac_dev->fm_rtc_enable)
- + mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
- +}
- +
- +static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
- + struct dpa_ptp_time *curr_time)
- +{
- + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
- + u64 tmp;
- + u32 mod;
- +
- + if (mac_dev->fm_rtc_get_cnt)
- + mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
- + &tmp);
- +
- + mod = do_div(tmp, NANOSEC_PER_SECOND);
- + curr_time->sec = (u32)tmp;
- + curr_time->nsec = mod;
- +}
- +
- +static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
- + struct dpa_ptp_time *cnt_time)
- +{
- + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
- + u64 tmp;
- +
- + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
- +
- + if (mac_dev->fm_rtc_set_cnt)
- + mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
- + tmp);
- +
- + /* Restart fiper two seconds later */
- + cnt_time->sec += 2;
- + cnt_time->nsec = 0;
- + dpa_set_fiper_alarm(tsu, cnt_time);
- +}
- +
- +static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
- +{
- + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
- + u32 drift;
- +
- + if (mac_dev->fm_rtc_get_drift)
- + mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
- + &drift);
- +
- + *addend = drift;
- +}
- +
- +static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
- +{
- + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
- +
- + if (mac_dev->fm_rtc_set_drift)
- + mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
- + addend);
- +}
- +
- +static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
- +{
- + dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
- + dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
- +}
- +
- +int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
- +{
- + struct dpa_priv_s *priv = netdev_priv(dev);
- + struct dpa_ptp_tsu *tsu = priv->tsu;
- + struct mac_device *mac_dev = priv->mac_dev;
- + struct dpa_ptp_data ptp_data;
- + struct dpa_ptp_data *ptp_data_user;
- + struct dpa_ptp_time act_time;
- + u32 addend;
- + int retval = 0;
- +
- + if (!tsu || !tsu->valid)
- + return -ENODEV;
- +
- + switch (cmd) {
- + case PTP_ENBL_TXTS_IOCTL:
- + tsu->hwts_tx_en_ioctl = 1;
- + if (mac_dev->fm_rtc_enable)
- + mac_dev->fm_rtc_enable(get_fm_handle(dev));
- + if (mac_dev->ptp_enable)
- + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
- + break;
- + case PTP_DSBL_TXTS_IOCTL:
- + tsu->hwts_tx_en_ioctl = 0;
- + if (mac_dev->fm_rtc_disable)
- + mac_dev->fm_rtc_disable(get_fm_handle(dev));
- + if (mac_dev->ptp_disable)
- + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
- + break;
- + case PTP_ENBL_RXTS_IOCTL:
- + tsu->hwts_rx_en_ioctl = 1;
- + break;
- + case PTP_DSBL_RXTS_IOCTL:
- + tsu->hwts_rx_en_ioctl = 0;
- + break;
- + case PTP_GET_RX_TIMESTAMP:
- + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
- + if (copy_from_user(&ptp_data.ident,
- + &ptp_data_user->ident, sizeof(ptp_data.ident)))
- + return -EINVAL;
- +
- + if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
- + return -EAGAIN;
- +
- + if (copy_to_user((void __user *)&ptp_data_user->ts,
- + &ptp_data.ts, sizeof(ptp_data.ts)))
- + return -EFAULT;
- + break;
- + case PTP_GET_TX_TIMESTAMP:
- + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
- + if (copy_from_user(&ptp_data.ident,
- + &ptp_data_user->ident, sizeof(ptp_data.ident)))
- + return -EINVAL;
- +
- + if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
- + return -EAGAIN;
- +
- + if (copy_to_user((void __user *)&ptp_data_user->ts,
- + &ptp_data.ts, sizeof(ptp_data.ts)))
- + return -EFAULT;
- + break;
- + case PTP_GET_TIME:
- + dpa_get_curr_cnt(tsu, &act_time);
- + if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
- + return -EFAULT;
- + break;
- + case PTP_SET_TIME:
- + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
- + return -EINVAL;
- + dpa_set_1588cnt(tsu, &act_time);
- + break;
- + case PTP_GET_ADJ:
- + dpa_get_drift(tsu, &addend);
- + if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
- + return -EFAULT;
- + break;
- + case PTP_SET_ADJ:
- + if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
- + return -EINVAL;
- + dpa_set_drift(tsu, addend);
- + break;
- + case PTP_SET_FIPER_ALARM:
- + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
- + return -EINVAL;
- + dpa_set_fiper_alarm(tsu, &act_time);
- + break;
- + case PTP_CLEANUP_TS:
- + dpa_flush_timestamp(tsu);
- + break;
- + default:
- + return -EINVAL;
- + }
- +
- + return retval;
- +}
- +
- +int dpa_ptp_init(struct dpa_priv_s *priv)
- +{
- + struct dpa_ptp_tsu *tsu;
- +
- + /* Allocate memory for PTP structure */
- + tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
- + if (!tsu)
- + return -ENOMEM;
- +
- + tsu->valid = TRUE;
- + tsu->dpa_priv = priv;
- +
- + dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
- + dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
- +
- + priv->tsu = tsu;
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_ptp_init);
- +
- +void dpa_ptp_cleanup(struct dpa_priv_s *priv)
- +{
- + struct dpa_ptp_tsu *tsu = priv->tsu;
- +
- + tsu->valid = FALSE;
- + vfree(tsu->rx_timestamps.circ_buf.buf);
- + vfree(tsu->tx_timestamps.circ_buf.buf);
- +
- + kfree(tsu);
- +}
- +EXPORT_SYMBOL(dpa_ptp_cleanup);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
- @@ -0,0 +1,138 @@
- +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
- + *
- + * This program is free software; you can redistribute it and/or modify
- + * it under the terms of the GNU General Public License as published by
- + * the Free Software Foundation; either version 2 of the License, or
- + * (at your option) any later version.
- + *
- + * This program is distributed in the hope that it will be useful,
- + * but WITHOUT ANY WARRANTY; without even the implied warranty of
- + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + * GNU General Public License for more details.
- + *
- + * You should have received a copy of the GNU General Public License along
- + * with this program; if not, write to the Free Software Foundation, Inc.,
- + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- + *
- + */
- +#ifndef __DPAA_1588_H__
- +#define __DPAA_1588_H__
- +
- +#include <linux/netdevice.h>
- +#include <linux/etherdevice.h>
- +#include <linux/circ_buf.h>
- +#include <linux/fsl_qman.h>
- +
- +#define DEFAULT_PTP_RX_BUF_SZ 256
- +#define DEFAULT_PTP_TX_BUF_SZ 256
- +
- +/* 1588 private ioctl calls */
- +#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
- +#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
- +#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
- +#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
- +#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
- +#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
- +#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
- +#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
- +#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
- +#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
- +#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
- +#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
- +
- +/* PTP V2 message type */
- +enum {
- + PTP_MSGTYPE_SYNC = 0x0,
- + PTP_MSGTYPE_DELREQ = 0x1,
- + PTP_MSGTYPE_PDELREQ = 0x2,
- + PTP_MSGTYPE_PDELRESP = 0x3,
- + PTP_MSGTYPE_FLWUP = 0x8,
- + PTP_MSGTYPE_DELRESP = 0x9,
- + PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
- + PTP_MSGTYPE_ANNOUNCE = 0xB,
- + PTP_MSGTYPE_SGNLNG = 0xC,
- + PTP_MSGTYPE_MNGMNT = 0xD,
- +};
- +
- +/* Byte offset of data in the PTP V2 headers */
- +#define PTP_OFFS_MSG_TYPE 0
- +#define PTP_OFFS_VER_PTP 1
- +#define PTP_OFFS_MSG_LEN 2
- +#define PTP_OFFS_DOM_NMB 4
- +#define PTP_OFFS_FLAGS 6
- +#define PTP_OFFS_CORFIELD 8
- +#define PTP_OFFS_SRCPRTID 20
- +#define PTP_OFFS_SEQ_ID 30
- +#define PTP_OFFS_CTRL 32
- +#define PTP_OFFS_LOGMEAN 33
- +
- +#define PTP_IP_OFFS 14
- +#define PTP_UDP_OFFS 34
- +#define PTP_HEADER_OFFS 42
- +#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
- +#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
- +#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
- +#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
- +
- +/* 1588-2008 network protocol enumeration values */
- +#define DPA_PTP_PROT_IPV4 1
- +#define DPA_PTP_PROT_IPV6 2
- +#define DPA_PTP_PROT_802_3 3
- +#define DPA_PTP_PROT_DONTCARE 0xFFFF
- +
- +#define DPA_PTP_SOURCE_PORT_LENGTH 10
- +#define DPA_PTP_HEADER_SZE 34
- +#define DPA_ETYPE_LEN 2
- +#define DPA_VLAN_TAG_LEN 4
- +#define NANOSEC_PER_SECOND 1000000000
- +
- +/* The threshold between the current found one and the oldest one */
- +#define TS_ACCUMULATION_THRESHOLD 50
- +
- +/* Struct needed to identify a timestamp */
- +struct dpa_ptp_ident {
- + u8 version;
- + u8 msg_type;
- + u16 netw_prot;
- + u16 seq_id;
- + u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
- +};
- +
- +/* Timestamp format in 1588-2008 */
- +struct dpa_ptp_time {
- + u64 sec; /* just 48 bit used */
- + u32 nsec;
- +};
- +
- +/* needed for timestamp data over ioctl */
- +struct dpa_ptp_data {
- + struct dpa_ptp_ident ident;
- + struct dpa_ptp_time ts;
- +};
- +
- +struct dpa_ptp_circ_buf {
- + struct circ_buf circ_buf;
- + u32 size;
- + spinlock_t ptp_lock;
- +};
- +
- +/* PTP TSU control structure */
- +struct dpa_ptp_tsu {
- + struct dpa_priv_s *dpa_priv;
- + bool valid;
- + struct dpa_ptp_circ_buf rx_timestamps;
- + struct dpa_ptp_circ_buf tx_timestamps;
- +
- + /* HW timestamping over ioctl enabled flag */
- + int hwts_tx_en_ioctl;
- + int hwts_rx_en_ioctl;
- +};
- +
- +extern int dpa_ptp_init(struct dpa_priv_s *priv);
- +extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
- +extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
- + struct sk_buff *skb, void *data);
- +extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
- + struct sk_buff *skb, void *data);
- +extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
- +#endif
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
- @@ -0,0 +1,180 @@
- +/* Copyright 2008-2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/module.h>
- +#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
- +#include <linux/debugfs.h>
- +#include "dpaa_debugfs.h"
- +#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
- +
- +#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
- +#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
- +
- +static struct dentry *dpa_debugfs_root;
- +
- +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
- +static ssize_t dpa_loop_write(struct file *f,
- + const char __user *buf, size_t count, loff_t *off);
- +
- +static const struct file_operations dpa_debugfs_lp_fops = {
- + .open = dpa_debugfs_loop_open,
- + .write = dpa_loop_write,
- + .read = seq_read,
- + .llseek = seq_lseek,
- + .release = single_release,
- +};
- +
- +static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
- +{
- + struct dpa_priv_s *priv;
- +
- + BUG_ON(offset == NULL);
- +
- + priv = netdev_priv((struct net_device *)file->private);
- + seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
- +
- + return 0;
- +}
- +
- +static int user_input_convert(const char __user *user_buf, size_t count,
- + long *val)
- +{
- + char buf[12];
- +
- + if (count > sizeof(buf) - 1)
- + return -EINVAL;
- + if (copy_from_user(buf, user_buf, count))
- + return -EFAULT;
- + buf[count] = '\0';
- + if (kstrtol(buf, 0, val))
- + return -EINVAL;
- + return 0;
- +}
- +
- +static ssize_t dpa_loop_write(struct file *f,
- + const char __user *buf, size_t count, loff_t *off)
- +{
- + struct dpa_priv_s *priv;
- + struct net_device *netdev;
- + struct seq_file *sf;
- + int ret;
- + long val;
- +
- + ret = user_input_convert(buf, count, &val);
- + if (ret)
- + return ret;
- +
- + sf = (struct seq_file *)f->private_data;
- + netdev = (struct net_device *)sf->private;
- + priv = netdev_priv(netdev);
- +
- + priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
- +
- + return count;
- +}
- +
- +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
- +{
- + int _errno;
- + const struct net_device *net_dev;
- +
- + _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
- + if (unlikely(_errno < 0)) {
- + net_dev = (struct net_device *)inode->i_private;
- +
- + if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
- + netdev_err(net_dev, "single_open() = %d\n",
- + _errno);
- + }
- +
- + return _errno;
- +}
- +
- +
- +int dpa_netdev_debugfs_create(struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + static int cnt;
- + char loop_file_name[100];
- +
- + if (unlikely(dpa_debugfs_root == NULL)) {
- + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
- + KBUILD_BASENAME".c", __LINE__, __func__,
- + "root debugfs missing, possible module ordering issue");
- + return -ENOMEM;
- + }
- +
- + sprintf(loop_file_name, "eth%d_loop", ++cnt);
- + priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
- + S_IRUGO,
- + dpa_debugfs_root,
- + net_dev,
- + &dpa_debugfs_lp_fops);
- + if (unlikely(priv->debugfs_loop_file == NULL)) {
- + netdev_err(net_dev, "debugfs_create_file(%s/%s)",
- + dpa_debugfs_root->d_iname,
- + loop_file_name);
- +
- + return -ENOMEM;
- + }
- + return 0;
- +}
- +
- +void dpa_netdev_debugfs_remove(struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- +
- + debugfs_remove(priv->debugfs_loop_file);
- +}
- +
- +int __init dpa_debugfs_module_init(void)
- +{
- + int _errno = 0;
- +
- + pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
- +
- + dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
- +
- + if (unlikely(dpa_debugfs_root == NULL)) {
- + _errno = -ENOMEM;
- + pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
- + KBUILD_BASENAME".c", __LINE__, __func__);
- + pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
- + DPA_ETH_DEBUGFS_ROOT, _errno);
- + }
- +
- + return _errno;
- +}
- +
- +void __exit dpa_debugfs_module_exit(void)
- +{
- + debugfs_remove(dpa_debugfs_root);
- +}
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
- @@ -0,0 +1,43 @@
- +/* Copyright 2008-2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef DPAA_DEBUGFS_H_
- +#define DPAA_DEBUGFS_H_
- +
- +#include <linux/netdevice.h>
- +#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
- +
- +int dpa_netdev_debugfs_create(struct net_device *net_dev);
- +void dpa_netdev_debugfs_remove(struct net_device *net_dev);
- +int __init dpa_debugfs_module_init(void);
- +void __exit dpa_debugfs_module_exit(void);
- +
- +#endif /* DPAA_DEBUGFS_H_ */
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
- @@ -0,0 +1,1183 @@
- +/* Copyright 2008-2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/of_mdio.h>
- +#include <linux/of_net.h>
- +#include <linux/kthread.h>
- +#include <linux/io.h>
- +#include <linux/if_arp.h> /* arp_hdr_len() */
- +#include <linux/if_vlan.h> /* VLAN_HLEN */
- +#include <linux/icmp.h> /* struct icmphdr */
- +#include <linux/ip.h> /* struct iphdr */
- +#include <linux/ipv6.h> /* struct ipv6hdr */
- +#include <linux/udp.h> /* struct udphdr */
- +#include <linux/tcp.h> /* struct tcphdr */
- +#include <linux/net.h> /* net_ratelimit() */
- +#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
- +#include <linux/highmem.h>
- +#include <linux/percpu.h>
- +#include <linux/dma-mapping.h>
- +#include <linux/fsl_bman.h>
- +
- +#include "fsl_fman.h"
- +#include "fm_ext.h"
- +#include "fm_port_ext.h"
- +
- +#include "mac.h"
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- +#include "dpaa_debugfs.h"
- +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
- +
- +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
- + * using trace events only need to #include <trace/events/sched.h>
- + */
- +#define CREATE_TRACE_POINTS
- +#include "dpaa_eth_trace.h"
- +
- +#define DPA_NAPI_WEIGHT 64
- +
- +/* Valid checksum indication */
- +#define DPA_CSUM_VALID 0xFFFF
- +
- +#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +
- +MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
- +
- +MODULE_DESCRIPTION(DPA_DESCRIPTION);
- +
- +static uint8_t debug = -1;
- +module_param(debug, byte, S_IRUGO);
- +MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
- +
- +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
- +static uint16_t tx_timeout = 1000;
- +module_param(tx_timeout, ushort, S_IRUGO);
- +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
- +
- +static const char rtx[][3] = {
- + [RX] = "RX",
- + [TX] = "TX"
- +};
- +
- +/* BM */
- +
- +#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
- +
- +static uint8_t dpa_priv_common_bpid;
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- +struct net_device *dpa_loop_netdevs[20];
- +#endif
- +
- +#ifdef CONFIG_PM
- +
- +static int dpaa_suspend(struct device *dev)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- + int err = 0;
- +
- + net_dev = dev_get_drvdata(dev);
- +
- + if (net_dev->flags & IFF_UP) {
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + if (priv->wol & DPAA_WOL_MAGIC) {
- + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
- + priv->mac_dev->get_mac_handle(mac_dev), true);
- + if (err) {
- + netdev_err(net_dev, "set_wol() = %d\n", err);
- + goto set_wol_failed;
- + }
- + }
- +
- + err = fm_port_suspend(mac_dev->port_dev[RX]);
- + if (err) {
- + netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
- + goto rx_port_suspend_failed;
- + }
- +
- + err = fm_port_suspend(mac_dev->port_dev[TX]);
- + if (err) {
- + netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
- + goto tx_port_suspend_failed;
- + }
- + }
- +
- + return 0;
- +
- +tx_port_suspend_failed:
- + fm_port_resume(mac_dev->port_dev[RX]);
- +rx_port_suspend_failed:
- + if (priv->wol & DPAA_WOL_MAGIC) {
- + priv->mac_dev->set_wol(mac_dev->port_dev[RX],
- + priv->mac_dev->get_mac_handle(mac_dev), false);
- + }
- +set_wol_failed:
- + return err;
- +}
- +
- +static int dpaa_resume(struct device *dev)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- + int err = 0;
- +
- + net_dev = dev_get_drvdata(dev);
- +
- + if (net_dev->flags & IFF_UP) {
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + err = fm_port_resume(mac_dev->port_dev[TX]);
- + if (err) {
- + netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
- + goto resume_failed;
- + }
- +
- + err = fm_port_resume(mac_dev->port_dev[RX]);
- + if (err) {
- + netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
- + goto resume_failed;
- + }
- +
- + if (priv->wol & DPAA_WOL_MAGIC) {
- + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
- + priv->mac_dev->get_mac_handle(mac_dev), false);
- + if (err) {
- + netdev_err(net_dev, "set_wol() = %d\n", err);
- + goto resume_failed;
- + }
- + }
- + }
- +
- + return 0;
- +
- +resume_failed:
- + return err;
- +}
- +
- +static const struct dev_pm_ops dpaa_pm_ops = {
- + .suspend = dpaa_suspend,
- + .resume = dpaa_resume,
- +};
- +
- +#define DPAA_PM_OPS (&dpaa_pm_ops)
- +
- +#else /* CONFIG_PM */
- +
- +#define DPAA_PM_OPS NULL
- +
- +#endif /* CONFIG_PM */
- +
- +/* Checks whether the checksum field in Parse Results array is valid
- + * (equals 0xFFFF) and increments the .cse counter otherwise
- + */
- +static inline void
- +dpa_csum_validation(const struct dpa_priv_s *priv,
- + struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_fd *fd)
- +{
- + dma_addr_t addr = qm_fd_addr(fd);
- + struct dpa_bp *dpa_bp = priv->dpa_bp;
- + void *frm = phys_to_virt(addr);
- + fm_prs_result_t *parse_result;
- +
- + if (unlikely(!frm))
- + return;
- +
- + dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
- + DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
- +
- + parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
- +
- + if (parse_result->cksum != DPA_CSUM_VALID)
- + percpu_priv->rx_errors.cse++;
- +}
- +
- +static void _dpa_rx_error(struct net_device *net_dev,
- + const struct dpa_priv_s *priv,
- + struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_fd *fd,
- + u32 fqid)
- +{
- + /* limit common, possibly innocuous Rx FIFO Overflow errors'
- + * interference with zero-loss convergence benchmark results.
- + */
- + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
- + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
- + else
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
- + fd->status & FM_FD_STAT_RX_ERRORS);
- +#ifdef CONFIG_FSL_DPAA_HOOKS
- + if (dpaa_eth_hooks.rx_error &&
- + dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
- + /* it's up to the hook to perform resource cleanup */
- + return;
- +#endif
- + percpu_priv->stats.rx_errors++;
- +
- + if (fd->status & FM_PORT_FRM_ERR_DMA)
- + percpu_priv->rx_errors.dme++;
- + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
- + percpu_priv->rx_errors.fpe++;
- + if (fd->status & FM_PORT_FRM_ERR_SIZE)
- + percpu_priv->rx_errors.fse++;
- + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
- + percpu_priv->rx_errors.phe++;
- + if (fd->status & FM_FD_STAT_L4CV)
- + dpa_csum_validation(priv, percpu_priv, fd);
- +
- + dpa_fd_release(net_dev, fd);
- +}
- +
- +static void _dpa_tx_error(struct net_device *net_dev,
- + const struct dpa_priv_s *priv,
- + struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_fd *fd,
- + u32 fqid)
- +{
- + struct sk_buff *skb;
- +
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_warn(net_dev, "FD status = 0x%08x\n",
- + fd->status & FM_FD_STAT_TX_ERRORS);
- +#ifdef CONFIG_FSL_DPAA_HOOKS
- + if (dpaa_eth_hooks.tx_error &&
- + dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
- + /* now the hook must ensure proper cleanup */
- + return;
- +#endif
- + percpu_priv->stats.tx_errors++;
- +
- + /* If we intended the buffers from this frame to go into the bpools
- + * when the FMan transmit was done, we need to put it in manually.
- + */
- + if (fd->bpid != 0xff) {
- + dpa_fd_release(net_dev, fd);
- + return;
- + }
- +
- + skb = _dpa_cleanup_tx_fd(priv, fd);
- + dev_kfree_skb(skb);
- +}
- +
- +/* Helper function to factor out frame validation logic on all Rx paths. Its
- + * purpose is to extract from the Parse Results structure information about
- + * the integrity of the frame, its checksum, the length of the parsed headers
- + * and whether the frame is suitable for GRO.
- + *
- + * Assumes no parser errors, since any error frame is dropped before this
- + * function is called.
- + *
- + * @skb will have its ip_summed field overwritten;
- + * @use_gro will only be written with 0, if the frame is definitely not
- + * GRO-able; otherwise, it will be left unchanged;
- + * @hdr_size will be written with a safe value, at least the size of the
- + * headers' length.
- + */
- +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
- + const struct qm_fd *fd,
- + struct sk_buff *skb, int *use_gro)
- +{
- + if (fd->status & FM_FD_STAT_L4CV) {
- + /* The parser has run and performed L4 checksum validation.
- + * We know there were no parser errors (and implicitly no
- + * L4 csum error), otherwise we wouldn't be here.
- + */
- + skb->ip_summed = CHECKSUM_UNNECESSARY;
- +
- + /* Don't go through GRO for certain types of traffic that
- + * we know are not GRO-able, such as dgram-based protocols.
- + * In the worst-case scenarios, such as small-pkt terminating
- + * UDP, the extra GRO processing would be overkill.
- + *
- + * The only protocol the Parser supports that is also GRO-able
- + * is currently TCP.
- + */
- + if (!fm_l4_frame_is_tcp(parse_results))
- + *use_gro = 0;
- +
- + return;
- + }
- +
- + /* We're here because either the parser didn't run or the L4 checksum
- + * was not verified. This may include the case of a UDP frame with
- + * checksum zero or an L4 proto other than TCP/UDP
- + */
- + skb->ip_summed = CHECKSUM_NONE;
- +
- + /* Bypass GRO for unknown traffic or if no PCDs are applied */
- + *use_gro = 0;
- +}
- +
- +int dpaa_eth_poll(struct napi_struct *napi, int budget)
- +{
- + struct dpa_napi_portal *np =
- + container_of(napi, struct dpa_napi_portal, napi);
- +
- + int cleaned = qman_p_poll_dqrr(np->p, budget);
- +
- + if (cleaned < budget) {
- + int tmp;
- + napi_complete(napi);
- + tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
- + DPA_BUG_ON(tmp);
- + }
- +
- + return cleaned;
- +}
- +EXPORT_SYMBOL(dpaa_eth_poll);
- +
- +static void __hot _dpa_tx_conf(struct net_device *net_dev,
- + const struct dpa_priv_s *priv,
- + struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_fd *fd,
- + u32 fqid)
- +{
- + struct sk_buff *skb;
- +
- + /* do we need the timestamp for the error frames? */
- +
- + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_warn(net_dev, "FD status = 0x%08x\n",
- + fd->status & FM_FD_STAT_TX_ERRORS);
- +
- + percpu_priv->stats.tx_errors++;
- + }
- +
- + /* hopefully we need not get the timestamp before the hook */
- +#ifdef CONFIG_FSL_DPAA_HOOKS
- + if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
- + fd, fqid) == DPAA_ETH_STOLEN)
- + /* it's the hook that must now perform cleanup */
- + return;
- +#endif
- + /* This might not perfectly reflect the reality, if the core dequeuing
- + * the Tx confirmation is different from the one that did the enqueue,
- + * but at least it'll show up in the total count.
- + */
- + percpu_priv->tx_confirm++;
- +
- + skb = _dpa_cleanup_tx_fd(priv, fd);
- +
- + dev_kfree_skb(skb);
- +}
- +
- +enum qman_cb_dqrr_result
- +priv_rx_error_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + int *count_ptr;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- +
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- + count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
- +
- + if (dpaa_eth_napi_schedule(percpu_priv, portal))
- + return qman_cb_dqrr_stop;
- +
- + if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
- + /* Unable to refill the buffer pool due to insufficient
- + * system memory. Just release the frame back into the pool,
- + * otherwise we'll soon end up with an empty buffer pool.
- + */
- + dpa_fd_release(net_dev, &dq->fd);
- + else
- + _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
- +
- + return qman_cb_dqrr_consume;
- +}
- +
- +
- +enum qman_cb_dqrr_result __hot
- +priv_rx_default_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + int *count_ptr;
- + struct dpa_bp *dpa_bp;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- + dpa_bp = priv->dpa_bp;
- +
- + /* Trace the Rx fd */
- + trace_dpa_rx_fd(net_dev, fq, &dq->fd);
- +
- + /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- + count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
- +
- + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
- + return qman_cb_dqrr_stop;
- +
- + /* Vale of plenty: make sure we didn't run out of buffers */
- +
- + if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
- + /* Unable to refill the buffer pool due to insufficient
- + * system memory. Just release the frame back into the pool,
- + * otherwise we'll soon end up with an empty buffer pool.
- + */
- + dpa_fd_release(net_dev, &dq->fd);
- + else
- + _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
- + count_ptr);
- +
- + return qman_cb_dqrr_consume;
- +}
- +
- +enum qman_cb_dqrr_result
- +priv_tx_conf_error_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- +
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + if (dpaa_eth_napi_schedule(percpu_priv, portal))
- + return qman_cb_dqrr_stop;
- +
- + _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
- +
- + return qman_cb_dqrr_consume;
- +}
- +
- +enum qman_cb_dqrr_result __hot
- +priv_tx_conf_default_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- +
- + /* Trace the fd */
- + trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
- +
- + /* Non-migratable context, safe to use raw_cpu_ptr */
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + if (dpaa_eth_napi_schedule(percpu_priv, portal))
- + return qman_cb_dqrr_stop;
- +
- + _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
- +
- + return qman_cb_dqrr_consume;
- +}
- +
- +void priv_ern(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_mr_entry *msg)
- +{
- + struct net_device *net_dev;
- + const struct dpa_priv_s *priv;
- + struct sk_buff *skb;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct qm_fd fd = msg->ern.fd;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- + /* Non-migratable context, safe to use raw_cpu_ptr */
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + percpu_priv->stats.tx_dropped++;
- + percpu_priv->stats.tx_fifo_errors++;
- + count_ern(percpu_priv, msg);
- +
- + /* If we intended this buffer to go into the pool
- + * when the FM was done, we need to put it in
- + * manually.
- + */
- + if (msg->ern.fd.bpid != 0xff) {
- + dpa_fd_release(net_dev, &fd);
- + return;
- + }
- +
- + skb = _dpa_cleanup_tx_fd(priv, &fd);
- + dev_kfree_skb_any(skb);
- +}
- +
- +const struct dpa_fq_cbs_t private_fq_cbs = {
- + .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
- + .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
- + .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
- + .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
- + .egress_ern = { .cb = { .ern = priv_ern } }
- +};
- +EXPORT_SYMBOL(private_fq_cbs);
- +
- +static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
- +{
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, j;
- +
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + for (j = 0; j < qman_portal_max; j++)
- + napi_enable(&percpu_priv->np[j].napi);
- + }
- +}
- +
- +static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
- +{
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, j;
- +
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + for (j = 0; j < qman_portal_max; j++)
- + napi_disable(&percpu_priv->np[j].napi);
- + }
- +}
- +
- +static int __cold dpa_eth_priv_start(struct net_device *net_dev)
- +{
- + int err;
- + struct dpa_priv_s *priv;
- +
- + priv = netdev_priv(net_dev);
- +
- + dpaa_eth_napi_enable(priv);
- +
- + err = dpa_start(net_dev);
- + if (err < 0)
- + dpaa_eth_napi_disable(priv);
- +
- + return err;
- +}
- +
- +
- +
- +static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
- +{
- + int _errno;
- + struct dpa_priv_s *priv;
- +
- + _errno = dpa_stop(net_dev);
- + /* Allow NAPI to consume any frame still in the Rx/TxConfirm
- + * ingress queues. This is to avoid a race between the current
- + * context and ksoftirqd which could leave NAPI disabled while
- + * in fact there's still Rx traffic to be processed.
- + */
- + usleep_range(5000, 10000);
- +
- + priv = netdev_priv(net_dev);
- + dpaa_eth_napi_disable(priv);
- +
- + return _errno;
- +}
- +
- +#ifdef CONFIG_NET_POLL_CONTROLLER
- +static void dpaa_eth_poll_controller(struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct dpa_percpu_priv_s *percpu_priv =
- + raw_cpu_ptr(priv->percpu_priv);
- + struct qman_portal *p;
- + const struct qman_portal_config *pc;
- + struct dpa_napi_portal *np;
- +
- + p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
- + pc = qman_p_get_portal_config(p);
- + np = &percpu_priv->np[pc->index];
- +
- + qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
- + qman_p_poll_dqrr(np->p, np->napi.weight);
- + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
- +}
- +#endif
- +
- +static const struct net_device_ops dpa_private_ops = {
- + .ndo_open = dpa_eth_priv_start,
- + .ndo_start_xmit = dpa_tx,
- + .ndo_stop = dpa_eth_priv_stop,
- + .ndo_tx_timeout = dpa_timeout,
- + .ndo_get_stats64 = dpa_get_stats64,
- + .ndo_set_mac_address = dpa_set_mac_address,
- + .ndo_validate_addr = eth_validate_addr,
- +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- + .ndo_select_queue = dpa_select_queue,
- +#endif
- + .ndo_change_mtu = dpa_change_mtu,
- + .ndo_set_rx_mode = dpa_set_rx_mode,
- + .ndo_init = dpa_ndo_init,
- + .ndo_set_features = dpa_set_features,
- + .ndo_fix_features = dpa_fix_features,
- + .ndo_do_ioctl = dpa_ioctl,
- +#ifdef CONFIG_NET_POLL_CONTROLLER
- + .ndo_poll_controller = dpaa_eth_poll_controller,
- +#endif
- +};
- +
- +static int dpa_private_napi_add(struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, cpu;
- +
- + for_each_possible_cpu(cpu) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- +
- + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
- + qman_portal_max * sizeof(struct dpa_napi_portal),
- + GFP_KERNEL);
- +
- + if (unlikely(percpu_priv->np == NULL)) {
- + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + for (i = 0; i < qman_portal_max; i++)
- + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
- + dpaa_eth_poll, DPA_NAPI_WEIGHT);
- + }
- +
- + return 0;
- +}
- +
- +void dpa_private_napi_del(struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, cpu;
- +
- + for_each_possible_cpu(cpu) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- +
- + if (percpu_priv->np) {
- + for (i = 0; i < qman_portal_max; i++)
- + netif_napi_del(&percpu_priv->np[i].napi);
- +
- + devm_kfree(net_dev->dev.parent, percpu_priv->np);
- + }
- + }
- +}
- +EXPORT_SYMBOL(dpa_private_napi_del);
- +
- +static int dpa_private_netdev_init(struct net_device *net_dev)
- +{
- + int i;
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct dpa_percpu_priv_s *percpu_priv;
- + const uint8_t *mac_addr;
- +
- + /* Although we access another CPU's private data here
- + * we do it at initialization so it is safe
- + */
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- + percpu_priv->net_dev = net_dev;
- + }
- +
- + net_dev->netdev_ops = &dpa_private_ops;
- + mac_addr = priv->mac_dev->addr;
- +
- + net_dev->mem_start = priv->mac_dev->res->start;
- + net_dev->mem_end = priv->mac_dev->res->end;
- +
- + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- + NETIF_F_LLTX);
- +
- + /* Advertise S/G and HIGHDMA support for private interfaces */
- + net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
- + /* Recent kernels enable GSO automatically, if
- + * we declare NETIF_F_SG. For conformity, we'll
- + * still declare GSO explicitly.
- + */
- + net_dev->features |= NETIF_F_GSO;
- +
- + /* Advertise GRO support */
- + net_dev->features |= NETIF_F_GRO;
- +
- + return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
- +}
- +
- +static struct dpa_bp * __cold
- +dpa_priv_bp_probe(struct device *dev)
- +{
- + struct dpa_bp *dpa_bp;
- +
- + dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
- + if (unlikely(dpa_bp == NULL)) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
- + dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
- +
- + dpa_bp->seed_cb = dpa_bp_priv_seed;
- + dpa_bp->free_buf_cb = _dpa_bp_free_pf;
- +
- + return dpa_bp;
- +}
- +
- +/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
- + * We won't be sending congestion notifications to FMan; for now, we just use
- + * this CGR to generate enqueue rejections to FMan in order to drop the frames
- + * before they reach our ingress queues and eat up memory.
- + */
- +static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
- +{
- + struct qm_mcc_initcgr initcgr;
- + u32 cs_th;
- + int err;
- +
- + err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
- + if (err < 0) {
- + pr_err("Error %d allocating CGR ID\n", err);
- + goto out_error;
- + }
- +
- + /* Enable CS TD, but disable Congestion State Change Notifications. */
- + initcgr.we_mask = QM_CGR_WE_CS_THRES;
- + initcgr.cgr.cscn_en = QM_CGR_EN;
- + cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
- + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- +
- + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
- + initcgr.cgr.cstd_en = QM_CGR_EN;
- +
- + /* This is actually a hack, because this CGR will be associated with
- + * our affine SWP. However, we'll place our ingress FQs in it.
- + */
- + err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
- + &initcgr);
- + if (err < 0) {
- + pr_err("Error %d creating ingress CGR with ID %d\n", err,
- + priv->ingress_cgr.cgrid);
- + qman_release_cgrid(priv->ingress_cgr.cgrid);
- + goto out_error;
- + }
- + pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
- + priv->ingress_cgr.cgrid, priv->mac_dev->addr);
- +
- + /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
- + * range), but we have no common initialization path between the
- + * different variants of the DPAA Eth driver, so we do it here rather
- + * than modifying every other variant than "private Eth".
- + */
- + priv->use_ingress_cgr = true;
- +
- +out_error:
- + return err;
- +}
- +
- +static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
- + size_t count)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + int i;
- +
- + if (netif_msg_probe(priv))
- + dev_dbg(net_dev->dev.parent,
- + "Using private BM buffer pools\n");
- +
- + priv->bp_count = count;
- +
- + for (i = 0; i < count; i++) {
- + int err;
- + err = dpa_bp_alloc(&dpa_bp[i]);
- + if (err < 0) {
- + dpa_bp_free(priv);
- + priv->dpa_bp = NULL;
- + return err;
- + }
- +
- + priv->dpa_bp = &dpa_bp[i];
- + }
- +
- + dpa_priv_common_bpid = priv->dpa_bp->bpid;
- + return 0;
- +}
- +
- +static const struct of_device_id dpa_match[];
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- +static int dpa_new_loop_id(void)
- +{
- + static int if_id;
- +
- + return if_id++;
- +}
- +#endif
- +
- +static int
- +dpaa_eth_priv_probe(struct platform_device *_of_dev)
- +{
- + int err = 0, i, channel;
- + struct device *dev;
- + struct device_node *dpa_node;
- + struct dpa_bp *dpa_bp;
- + struct dpa_fq *dpa_fq, *tmp;
- + size_t count = 1;
- + struct net_device *net_dev = NULL;
- + struct dpa_priv_s *priv = NULL;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct fm_port_fqs port_fqs;
- + struct dpa_buffer_layout_s *buf_layout = NULL;
- + struct mac_device *mac_dev;
- + struct task_struct *kth;
- +
- + dev = &_of_dev->dev;
- +
- + dpa_node = dev->of_node;
- +
- + if (!of_device_is_available(dpa_node))
- + return -ENODEV;
- +
- + /* Get the buffer pools assigned to this interface;
- + * run only once the default pool probing code
- + */
- + dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
- + dpa_priv_bp_probe(dev);
- + if (IS_ERR(dpa_bp))
- + return PTR_ERR(dpa_bp);
- +
- + /* Allocate this early, so we can store relevant information in
- + * the private area (needed by 1588 code in dpa_mac_probe)
- + */
- + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
- + if (!net_dev) {
- + dev_err(dev, "alloc_etherdev_mq() failed\n");
- + goto alloc_etherdev_mq_failed;
- + }
- +
- + /* Do this here, so we can be verbose early */
- + SET_NETDEV_DEV(net_dev, dev);
- + dev_set_drvdata(dev, net_dev);
- +
- + priv = netdev_priv(net_dev);
- + priv->net_dev = net_dev;
- + strcpy(priv->if_type, "private");
- +
- + priv->msg_enable = netif_msg_init(debug, -1);
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + priv->loop_id = dpa_new_loop_id();
- + priv->loop_to = -1; /* disabled by default */
- + dpa_loop_netdevs[priv->loop_id] = net_dev;
- +#endif
- +
- + mac_dev = dpa_mac_probe(_of_dev);
- + if (IS_ERR(mac_dev) || !mac_dev) {
- + err = PTR_ERR(mac_dev);
- + goto mac_probe_failed;
- + }
- +
- + /* We have physical ports, so we need to establish
- + * the buffer layout.
- + */
- + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
- + GFP_KERNEL);
- + if (!buf_layout) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + goto alloc_failed;
- + }
- + dpa_set_buffers_layout(mac_dev, buf_layout);
- +
- + /* For private ports, need to compute the size of the default
- + * buffer pool, based on FMan port buffer layout;also update
- + * the maximum buffer size for private ports if necessary
- + */
- + dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
- + /* We only want to use jumbo frame optimization if we actually have
- + * L2 MAX FRM set for jumbo frames as well.
- + */
- + if (fm_get_max_frm() < 9600)
- + dev_warn(dev,
- + "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
- +#endif
- +
- + INIT_LIST_HEAD(&priv->dpa_fq_list);
- +
- + memset(&port_fqs, 0, sizeof(port_fqs));
- +
- + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
- + if (!err)
- + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
- + &port_fqs, true, TX);
- +
- + if (err < 0)
- + goto fq_probe_failed;
- +
- + /* bp init */
- +
- + err = dpa_priv_bp_create(net_dev, dpa_bp, count);
- +
- + if (err < 0)
- + goto bp_create_failed;
- +
- + priv->mac_dev = mac_dev;
- +
- + channel = dpa_get_channel();
- +
- + if (channel < 0) {
- + err = channel;
- + goto get_channel_failed;
- + }
- +
- + priv->channel = (uint16_t)channel;
- +
- + /* Start a thread that will walk the cpus with affine portals
- + * and add this pool channel to each's dequeue mask.
- + */
- + kth = kthread_run(dpaa_eth_add_channel,
- + (void *)(unsigned long)priv->channel,
- + "dpaa_%p:%d", net_dev, priv->channel);
- + if (!kth) {
- + err = -ENOMEM;
- + goto add_channel_failed;
- + }
- +
- + dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
- +
- + /* Create a congestion group for this netdev, with
- + * dynamically-allocated CGR ID.
- + * Must be executed after probing the MAC, but before
- + * assigning the egress FQs to the CGRs.
- + */
- + err = dpaa_eth_cgr_init(priv);
- + if (err < 0) {
- + dev_err(dev, "Error initializing CGR\n");
- + goto tx_cgr_init_failed;
- + }
- + err = dpaa_eth_priv_ingress_cgr_init(priv);
- + if (err < 0) {
- + dev_err(dev, "Error initializing ingress CGR\n");
- + goto rx_cgr_init_failed;
- + }
- +
- + /* Add the FQs to the interface, and make them active */
- + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
- + err = dpa_fq_init(dpa_fq, false);
- + if (err < 0)
- + goto fq_alloc_failed;
- + }
- +
- + priv->buf_layout = buf_layout;
- + priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
- + priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
- +
- + /* All real interfaces need their ports initialized */
- + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
- + buf_layout, dev);
- +
- +#ifdef CONFIG_FMAN_PFC
- + for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
- + err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
- + mac_dev->port_dev[TX], i, i);
- + if (unlikely(err != 0)) {
- + dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
- + goto pfc_mapping_failed;
- + }
- + }
- +#endif
- +
- + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
- +
- + if (priv->percpu_priv == NULL) {
- + dev_err(dev, "devm_alloc_percpu() failed\n");
- + err = -ENOMEM;
- + goto alloc_percpu_failed;
- + }
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- + memset(percpu_priv, 0, sizeof(*percpu_priv));
- + }
- +
- + /* Initialize NAPI */
- + err = dpa_private_napi_add(net_dev);
- +
- + if (err < 0)
- + goto napi_add_failed;
- +
- + err = dpa_private_netdev_init(net_dev);
- +
- + if (err < 0)
- + goto netdev_init_failed;
- +
- + dpaa_eth_sysfs_init(&net_dev->dev);
- +
- +#ifdef CONFIG_PM
- + device_set_wakeup_capable(dev, true);
- +#endif
- +
- + pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
- +
- + return 0;
- +
- +netdev_init_failed:
- +napi_add_failed:
- + dpa_private_napi_del(net_dev);
- +alloc_percpu_failed:
- +#ifdef CONFIG_FMAN_PFC
- +pfc_mapping_failed:
- +#endif
- + dpa_fq_free(dev, &priv->dpa_fq_list);
- +fq_alloc_failed:
- + qman_delete_cgr_safe(&priv->ingress_cgr);
- + qman_release_cgrid(priv->ingress_cgr.cgrid);
- +rx_cgr_init_failed:
- + qman_delete_cgr_safe(&priv->cgr_data.cgr);
- + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
- +tx_cgr_init_failed:
- +add_channel_failed:
- +get_channel_failed:
- + dpa_bp_free(priv);
- +bp_create_failed:
- +fq_probe_failed:
- +alloc_failed:
- +mac_probe_failed:
- + dev_set_drvdata(dev, NULL);
- + free_netdev(net_dev);
- +alloc_etherdev_mq_failed:
- + if (atomic_read(&dpa_bp->refs) == 0)
- + devm_kfree(dev, dpa_bp);
- +
- + return err;
- +}
- +
- +static const struct of_device_id dpa_match[] = {
- + {
- + .compatible = "fsl,dpa-ethernet"
- + },
- + {}
- +};
- +MODULE_DEVICE_TABLE(of, dpa_match);
- +
- +static struct platform_driver dpa_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME,
- + .of_match_table = dpa_match,
- + .owner = THIS_MODULE,
- + .pm = DPAA_PM_OPS,
- + },
- + .probe = dpaa_eth_priv_probe,
- + .remove = dpa_remove
- +};
- +
- +static int __init __cold dpa_load(void)
- +{
- + int _errno;
- +
- + pr_info(DPA_DESCRIPTION "\n");
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + dpa_debugfs_module_init();
- +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
- +
- + /* initialise dpaa_eth mirror values */
- + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
- + dpa_max_frm = fm_get_max_frm();
- + dpa_num_cpus = num_possible_cpus();
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
- +#endif
- +
- + _errno = platform_driver_register(&dpa_driver);
- + if (unlikely(_errno < 0)) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): platform_driver_register() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- + }
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + return _errno;
- +}
- +module_init(dpa_load);
- +
- +static void __exit __cold dpa_unload(void)
- +{
- + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + platform_driver_unregister(&dpa_driver);
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + dpa_debugfs_module_exit();
- +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
- +
- + /* Only one channel is used and needs to be relased after all
- + * interfaces are removed
- + */
- + dpa_release_channel();
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +}
- +module_exit(dpa_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
- @@ -0,0 +1,695 @@
- +/* Copyright 2008-2012 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __DPA_H
- +#define __DPA_H
- +
- +#include <linux/netdevice.h>
- +#include <linux/fsl_qman.h> /* struct qman_fq */
- +
- +#include "fm_ext.h"
- +#include "dpaa_eth_trace.h"
- +
- +extern int dpa_rx_extra_headroom;
- +extern int dpa_max_frm;
- +extern int dpa_num_cpus;
- +
- +#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
- +#define dpa_get_max_frm() dpa_max_frm
- +
- +#define dpa_get_max_mtu() \
- + (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
- +
- +#define __hot
- +
- +/* Simple enum of FQ types - used for array indexing */
- +enum port_type {RX, TX};
- +
- +/* TODO: This structure should be renamed & moved to the FMD wrapper */
- +struct dpa_buffer_layout_s {
- + uint16_t priv_data_size;
- + bool parse_results;
- + bool time_stamp;
- + bool hash_results;
- + uint8_t manip_extra_space;
- + uint16_t data_align;
- +};
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define DPA_BUG_ON(cond) BUG_ON(cond)
- +#else
- +#define DPA_BUG_ON(cond)
- +#endif
- +
- +#define DPA_TX_PRIV_DATA_SIZE 16
- +#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
- +#define DPA_TIME_STAMP_SIZE 8
- +#define DPA_HASH_RESULTS_SIZE 8
- +#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
- + dpa_get_rx_extra_headroom())
- +
- +#define FM_FD_STAT_RX_ERRORS \
- + (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
- + FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
- + FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
- + FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
- + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
- +
- +#define FM_FD_STAT_TX_ERRORS \
- + (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
- + FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
- +
- +#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
- +/* The raw buffer size must be cacheline aligned.
- + * Normally we use 2K buffers.
- + */
- +#define DPA_BP_RAW_SIZE 2048
- +#else
- +/* For jumbo frame optimizations, use buffers large enough to accommodate
- + * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
- + * space to account for further alignments.
- + */
- +#define DPA_MAX_FRM_SIZE 9600
- +#define DPA_BP_RAW_SIZE \
- + ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
- + sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
- +#endif
- +
- +/* This is what FMan is ever allowed to use.
- + * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- + * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- + * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- + * half-page-aligned buffers (can we?), so we reserve some more space
- + * for start-of-buffer alignment.
- + */
- +#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
- + SMP_CACHE_BYTES)
- +/* We must ensure that skb_shinfo is always cacheline-aligned. */
- +#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
- +
- +/* Maximum size of a buffer for which recycling is allowed.
- + * We need an upper limit such that forwarded skbs that get reallocated on Tx
- + * aren't allowed to grow unboundedly. On the other hand, we need to make sure
- + * that skbs allocated by us will not fail to be recycled due to their size.
- + *
- + * For a requested size, the kernel allocator provides the next power of two
- + * sized block, which the stack will use as is, regardless of the actual size
- + * it required; since we must accommodate at most 9.6K buffers (L2 maximum
- + * supported frame size), set the recycling upper limit to 16K.
- + */
- +#define DPA_RECYCLE_MAX_SIZE 16384
- +
- +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
- +/*TODO: temporary for fman pcd testing */
- +#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
- +#endif
- +
- +#define DPAA_ETH_FQ_DELTA 0x10000
- +
- +#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
- + (((device_addr) & 0x1fffff) >> 6)
- +
- +#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
- + (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
- +
- +/* Largest value that the FQD's OAL field can hold.
- + * This is DPAA-1.x specific.
- + * TODO: This rather belongs in fsl_qman.h
- + */
- +#define FSL_QMAN_MAX_OAL 127
- +
- +/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
- +#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
- +
- +/* Default alignment for start of data in an Rx FD */
- +#define DPA_FD_DATA_ALIGNMENT 16
- +
- +/* Values for the L3R field of the FM Parse Results
- + */
- +/* L3 Type field: First IP Present IPv4 */
- +#define FM_L3_PARSE_RESULT_IPV4 0x8000
- +/* L3 Type field: First IP Present IPv6 */
- +#define FM_L3_PARSE_RESULT_IPV6 0x4000
- +
- +/* Values for the L4R field of the FM Parse Results
- + * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
- + */
- +/* L4 Type field: UDP */
- +#define FM_L4_PARSE_RESULT_UDP 0x40
- +/* L4 Type field: TCP */
- +#define FM_L4_PARSE_RESULT_TCP 0x20
- +/* FD status field indicating whether the FM Parser has attempted to validate
- + * the L4 csum of the frame.
- + * Note that having this bit set doesn't necessarily imply that the checksum
- + * is valid. One would have to check the parse results to find that out.
- + */
- +#define FM_FD_STAT_L4CV 0x00000004
- +
- +
- +#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
- +
- +/* Check if the parsed frame was found to be a TCP segment.
- + *
- + * @parse_result_ptr must be of type (fm_prs_result_t *).
- + */
- +#define fm_l4_frame_is_tcp(parse_result_ptr) \
- + ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
- +
- +/* number of Tx queues to FMan */
- +#ifdef CONFIG_FMAN_PFC
- +#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
- +#else
- +#define DPAA_ETH_TX_QUEUES NR_CPUS
- +#endif
- +
- +#define DPAA_ETH_RX_QUEUES 128
- +
- +/* Convenience macros for storing/retrieving the skb back-pointers. They must
- + * accommodate both recycling and confirmation paths - i.e. cases when the buf
- + * was allocated by ourselves, respectively by the stack. In the former case,
- + * we could store the skb at negative offset; in the latter case, we can't,
- + * so we'll use 0 as offset.
- + *
- + * NB: @off is an offset from a (struct sk_buff **) pointer!
- + */
- +#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
- +{ \
- + skbh = (struct sk_buff **)addr; \
- + *(skbh + (off)) = skb; \
- +}
- +#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
- +{ \
- + skbh = (struct sk_buff **)addr; \
- + skb = *(skbh + (off)); \
- +}
- +
- +#ifdef CONFIG_PM
- +/* Magic Packet wakeup */
- +#define DPAA_WOL_MAGIC 0x00000001
- +#endif
- +
- +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
- +struct pcd_range {
- + uint32_t base;
- + uint32_t count;
- +};
- +#endif
- +
- +/* More detailed FQ types - used for fine-grained WQ assignments */
- +enum dpa_fq_type {
- + FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
- + FQ_TYPE_RX_ERROR, /* Rx Error FQs */
- + FQ_TYPE_RX_PCD, /* User-defined PCDs */
- + FQ_TYPE_TX, /* "Real" Tx FQs */
- + FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
- + FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
- + FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
- + FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
- +};
- +
- +struct dpa_fq {
- + struct qman_fq fq_base;
- + struct list_head list;
- + struct net_device *net_dev;
- + bool init;
- + uint32_t fqid;
- + uint32_t flags;
- + uint16_t channel;
- + uint8_t wq;
- + enum dpa_fq_type fq_type;
- +};
- +
- +struct dpa_fq_cbs_t {
- + struct qman_fq rx_defq;
- + struct qman_fq tx_defq;
- + struct qman_fq rx_errq;
- + struct qman_fq tx_errq;
- + struct qman_fq egress_ern;
- +};
- +
- +struct fqid_cell {
- + uint32_t start;
- + uint32_t count;
- +};
- +
- +struct dpa_bp {
- + struct bman_pool *pool;
- + uint8_t bpid;
- + struct device *dev;
- + union {
- + /* The buffer pools used for the private ports are initialized
- + * with target_count buffers for each CPU; at runtime the
- + * number of buffers per CPU is constantly brought back to this
- + * level
- + */
- + int target_count;
- + /* The configured value for the number of buffers in the pool,
- + * used for shared port buffer pools
- + */
- + int config_count;
- + };
- + size_t size;
- + bool seed_pool;
- + /* physical address of the contiguous memory used by the pool to store
- + * the buffers
- + */
- + dma_addr_t paddr;
- + /* virtual address of the contiguous memory used by the pool to store
- + * the buffers
- + */
- + void __iomem *vaddr;
- + /* current number of buffers in the bpool alloted to this CPU */
- + int __percpu *percpu_count;
- + atomic_t refs;
- + /* some bpools need to be seeded before use by this cb */
- + int (*seed_cb)(struct dpa_bp *);
- + /* some bpools need to be emptied before freeing; this cb is used
- + * for freeing of individual buffers taken from the pool
- + */
- + void (*free_buf_cb)(void *addr);
- +};
- +
- +struct dpa_rx_errors {
- + u64 dme; /* DMA Error */
- + u64 fpe; /* Frame Physical Error */
- + u64 fse; /* Frame Size Error */
- + u64 phe; /* Header Error */
- + u64 cse; /* Checksum Validation Error */
- +};
- +
- +/* Counters for QMan ERN frames - one counter per rejection code */
- +struct dpa_ern_cnt {
- + u64 cg_tdrop; /* Congestion group taildrop */
- + u64 wred; /* WRED congestion */
- + u64 err_cond; /* Error condition */
- + u64 early_window; /* Order restoration, frame too early */
- + u64 late_window; /* Order restoration, frame too late */
- + u64 fq_tdrop; /* FQ taildrop */
- + u64 fq_retired; /* FQ is retired */
- + u64 orp_zero; /* ORP disabled */
- +};
- +
- +struct dpa_napi_portal {
- + struct napi_struct napi;
- + struct qman_portal *p;
- +};
- +
- +struct dpa_percpu_priv_s {
- + struct net_device *net_dev;
- + struct dpa_napi_portal *np;
- + u64 in_interrupt;
- + u64 tx_returned;
- + u64 tx_confirm;
- + /* fragmented (non-linear) skbuffs received from the stack */
- + u64 tx_frag_skbuffs;
- + /* number of S/G frames received */
- + u64 rx_sg;
- +
- + struct rtnl_link_stats64 stats;
- + struct dpa_rx_errors rx_errors;
- + struct dpa_ern_cnt ern_cnt;
- +};
- +
- +struct dpa_priv_s {
- + struct dpa_percpu_priv_s __percpu *percpu_priv;
- + struct dpa_bp *dpa_bp;
- + /* Store here the needed Tx headroom for convenience and speed
- + * (even though it can be computed based on the fields of buf_layout)
- + */
- + uint16_t tx_headroom;
- + struct net_device *net_dev;
- + struct mac_device *mac_dev;
- + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
- + struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
- +
- + size_t bp_count;
- +
- + uint16_t channel; /* "fsl,qman-channel-id" */
- + struct list_head dpa_fq_list;
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + struct dentry *debugfs_loop_file;
- +#endif
- +
- + uint32_t msg_enable; /* net_device message level */
- +#ifdef CONFIG_FSL_DPAA_1588
- + struct dpa_ptp_tsu *tsu;
- +#endif
- +
- +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
- +/* TODO: this is temporary until pcd support is implemented in dpaa */
- + int priv_pcd_num_ranges;
- + struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
- +#endif
- +
- + struct {
- + /**
- + * All egress queues to a given net device belong to one
- + * (and the same) congestion group.
- + */
- + struct qman_cgr cgr;
- + /* If congested, when it began. Used for performance stats. */
- + u32 congestion_start_jiffies;
- + /* Number of jiffies the Tx port was congested. */
- + u32 congested_jiffies;
- + /**
- + * Counter for the number of times the CGR
- + * entered congestion state
- + */
- + u32 cgr_congested_count;
- + } cgr_data;
- + /* Use a per-port CGR for ingress traffic. */
- + bool use_ingress_cgr;
- + struct qman_cgr ingress_cgr;
- +
- +#ifdef CONFIG_FSL_DPAA_TS
- + bool ts_tx_en; /* Tx timestamping enabled */
- + bool ts_rx_en; /* Rx timestamping enabled */
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- + struct dpa_buffer_layout_s *buf_layout;
- + uint16_t rx_headroom;
- + char if_type[30];
- +
- + void *peer;
- +#ifdef CONFIG_PM
- + u32 wol;
- +#endif
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + int loop_id;
- + int loop_to;
- +#endif
- +#ifdef CONFIG_FSL_DPAA_CEETM
- + bool ceetm_en; /* CEETM QoS enabled */
- +#endif
- +};
- +
- +struct fm_port_fqs {
- + struct dpa_fq *tx_defq;
- + struct dpa_fq *tx_errq;
- + struct dpa_fq *rx_defq;
- + struct dpa_fq *rx_errq;
- +};
- +
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- +extern struct net_device *dpa_loop_netdevs[20];
- +#endif
- +
- +/* functions with different implementation for SG and non-SG: */
- +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
- +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
- +void __hot _dpa_rx(struct net_device *net_dev,
- + struct qman_portal *portal,
- + const struct dpa_priv_s *priv,
- + struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_fd *fd,
- + u32 fqid,
- + int *count_ptr);
- +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
- +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
- + struct qman_fq *egress_fq, struct qman_fq *conf_fq);
- +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
- + const struct qm_fd *fd);
- +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
- + const struct qm_fd *fd,
- + struct sk_buff *skb,
- + int *use_gro);
- +#ifndef CONFIG_FSL_DPAA_TS
- +bool dpa_skb_is_recyclable(struct sk_buff *skb);
- +bool dpa_buf_is_recyclable(struct sk_buff *skb,
- + uint32_t min_size,
- + uint16_t min_offset,
- + unsigned char **new_buf_start);
- +#endif
- +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd,
- + int *count_ptr, int *offset);
- +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd);
- +int __cold __attribute__((nonnull))
- + _dpa_fq_free(struct device *dev, struct qman_fq *fq);
- +
- +/* Turn on HW checksum computation for this outgoing frame.
- + * If the current protocol is not something we support in this regard
- + * (or if the stack has already computed the SW checksum), we do nothing.
- + *
- + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- + * otherwise.
- + *
- + * Note that this function may modify the fd->cmd field and the skb data buffer
- + * (the Parse Results area).
- + */
- +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
- +
- +static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
- + struct qman_portal *portal)
- +{
- + /* In case of threaded ISR for RT enable kernel,
- + * in_irq() does not return appropriate value, so use
- + * in_serving_softirq to distinguish softirq or irq context.
- + */
- + if (unlikely(in_irq() || !in_serving_softirq())) {
- + /* Disable QMan IRQ and invoke NAPI */
- + int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
- + if (likely(!ret)) {
- + const struct qman_portal_config *pc =
- + qman_p_get_portal_config(portal);
- + struct dpa_napi_portal *np =
- + &percpu_priv->np[pc->index];
- +
- + np->p = portal;
- + napi_schedule(&np->napi);
- + percpu_priv->in_interrupt++;
- + return 1;
- + }
- + }
- + return 0;
- +}
- +
- +static inline ssize_t __const __must_check __attribute__((nonnull))
- +dpa_fd_length(const struct qm_fd *fd)
- +{
- + return fd->length20;
- +}
- +
- +static inline ssize_t __const __must_check __attribute__((nonnull))
- +dpa_fd_offset(const struct qm_fd *fd)
- +{
- + return fd->offset;
- +}
- +
- +/* Verifies if the skb length is below the interface MTU */
- +static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
- +{
- + if (unlikely(skb->len > mtu))
- + if ((skb->protocol != htons(ETH_P_8021Q))
- + || (skb->len > mtu + 4))
- + return -1;
- +
- + return 0;
- +}
- +
- +static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
- +{
- + uint16_t headroom;
- + /* The frame headroom must accommodate:
- + * - the driver private data area
- + * - parse results, hash results, timestamp if selected
- + * - manip extra space
- + * If either hash results or time stamp are selected, both will
- + * be copied to/from the frame headroom, as TS is located between PR and
- + * HR in the IC and IC copy size has a granularity of 16bytes
- + * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
- + *
- + * Also make sure the headroom is a multiple of data_align bytes
- + */
- + headroom = (uint16_t)(bl->priv_data_size +
- + (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
- + (bl->hash_results || bl->time_stamp ?
- + DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
- + bl->manip_extra_space);
- +
- + return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
- +}
- +
- +int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
- +int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
- +int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
- +
- +void dpaa_eth_sysfs_remove(struct device *dev);
- +void dpaa_eth_sysfs_init(struct device *dev);
- +int dpaa_eth_poll(struct napi_struct *napi, int budget);
- +
- +void dpa_private_napi_del(struct net_device *net_dev);
- +
- +/* Equivalent to a memset(0), but works faster */
- +static inline void clear_fd(struct qm_fd *fd)
- +{
- + fd->opaque_addr = 0;
- + fd->opaque = 0;
- + fd->cmd = 0;
- +}
- +
- +static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
- + struct qman_fq *tx_fq)
- +{
- + int i;
- +
- + for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
- + if (priv->egress_fqs[i] == tx_fq)
- + return i;
- +
- + return -EINVAL;
- +}
- +
- +static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
- + struct rtnl_link_stats64 *percpu_stats,
- + struct qm_fd *fd, struct qman_fq *egress_fq,
- + struct qman_fq *conf_fq)
- +{
- + int err, i;
- +
- + if (fd->bpid == 0xff)
- + fd->cmd |= qman_fq_fqid(conf_fq);
- +
- + /* Trace this Tx fd */
- + trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
- +
- + for (i = 0; i < 100000; i++) {
- + err = qman_enqueue(egress_fq, fd, 0);
- + if (err != -EBUSY)
- + break;
- + }
- +
- + if (unlikely(err < 0)) {
- + /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
- + percpu_stats->tx_errors++;
- + percpu_stats->tx_fifo_errors++;
- + return err;
- + }
- +
- + percpu_stats->tx_packets++;
- + percpu_stats->tx_bytes += dpa_fd_length(fd);
- +
- + return 0;
- +}
- +
- +/* Use multiple WQs for FQ assignment:
- + * - Tx Confirmation queues go to WQ1.
- + * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
- + * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
- + * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
- + * to be scheduled, in case there are many more FQs in WQ3).
- + * This ensures that Tx-confirmed buffers are timely released. In particular,
- + * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
- + * are greatly outnumbered by other FQs in the system (usually PCDs), while
- + * dequeue scheduling is round-robin.
- + */
- +static inline void _dpa_assign_wq(struct dpa_fq *fq)
- +{
- + switch (fq->fq_type) {
- + case FQ_TYPE_TX_CONFIRM:
- + case FQ_TYPE_TX_CONF_MQ:
- + fq->wq = 1;
- + break;
- + case FQ_TYPE_RX_DEFAULT:
- + case FQ_TYPE_TX:
- + fq->wq = 3;
- + break;
- + case FQ_TYPE_RX_ERROR:
- + case FQ_TYPE_TX_ERROR:
- + case FQ_TYPE_RX_PCD_HI_PRIO:
- + fq->wq = 2;
- + break;
- + case FQ_TYPE_RX_PCD:
- + fq->wq = 5;
- + break;
- + default:
- + WARN(1, "Invalid FQ type %d for FQID %d!\n",
- + fq->fq_type, fq->fqid);
- + }
- +}
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- +/* Use in lieu of skb_get_queue_mapping() */
- +#ifdef CONFIG_FMAN_PFC
- +#define dpa_get_queue_mapping(skb) \
- + (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
- + ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
- + ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
- + dpa_num_cpus + smp_processor_id()));
- +
- +#else
- +#define dpa_get_queue_mapping(skb) \
- + raw_smp_processor_id()
- +#endif
- +#else
- +/* Use the queue selected by XPS */
- +#define dpa_get_queue_mapping(skb) \
- + skb_get_queue_mapping(skb)
- +#endif
- +
- +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
- +struct ptp_priv_s {
- + struct device_node *node;
- + struct platform_device *of_dev;
- + struct mac_device *mac_dev;
- +};
- +extern struct ptp_priv_s ptp_priv;
- +#endif
- +
- +static inline void _dpa_bp_free_pf(void *addr)
- +{
- + put_page(virt_to_head_page(addr));
- +}
- +
- +/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
- + * manifests itself at high traffic rates when frames exceed 4K memory
- + * boundaries; For the moment, we use a SW workaround to avoid frames larger
- + * than 4K or that exceed 4K alignements.
- + */
- +
- +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- +#define DPAA_LS1043A_DMA_4K_ISSUE 1
- +#endif
- +
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- +#define HAS_DMA_ISSUE(start, size) \
- + (((unsigned long)(start) ^ ((unsigned long)(start) + \
- + (unsigned long)(size))) & ~0xFFF)
- +
- +#define BOUNDARY_4K(start, size) (((unsigned long)(start) + \
- + (unsigned long)(size)) & ~0xFFF)
- +#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
- +
- +#endif /* __DPA_H */
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
- @@ -0,0 +1,263 @@
- +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/io.h>
- +#include <linux/of_platform.h>
- +#include <linux/of_net.h>
- +#include <linux/etherdevice.h>
- +#include <linux/kthread.h>
- +#include <linux/percpu.h>
- +#include <linux/highmem.h>
- +#include <linux/sort.h>
- +#include <linux/fsl_qman.h>
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#include "dpaa_eth_base.h"
- +
- +#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +
- +uint8_t advanced_debug = -1;
- +module_param(advanced_debug, byte, S_IRUGO);
- +MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
- +EXPORT_SYMBOL(advanced_debug);
- +
- +static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
- +{
- + return ((struct dpa_bp *)dpa_bp0)->size -
- + ((struct dpa_bp *)dpa_bp1)->size;
- +}
- +
- +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
- +dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
- +{
- + int i, lenp, na, ns, err;
- + struct device *dev;
- + struct device_node *dev_node;
- + const __be32 *bpool_cfg;
- + struct dpa_bp *dpa_bp;
- + u32 bpid;
- +
- + dev = &_of_dev->dev;
- +
- + *count = of_count_phandle_with_args(dev->of_node,
- + "fsl,bman-buffer-pools", NULL);
- + if (*count < 1) {
- + dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
- + return ERR_PTR(-EINVAL);
- + }
- +
- + dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
- + if (dpa_bp == NULL) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + dev_node = of_find_node_by_path("/");
- + if (unlikely(dev_node == NULL)) {
- + dev_err(dev, "of_find_node_by_path(/) failed\n");
- + return ERR_PTR(-EINVAL);
- + }
- +
- + na = of_n_addr_cells(dev_node);
- + ns = of_n_size_cells(dev_node);
- +
- + for (i = 0; i < *count; i++) {
- + of_node_put(dev_node);
- +
- + dev_node = of_parse_phandle(dev->of_node,
- + "fsl,bman-buffer-pools", i);
- + if (dev_node == NULL) {
- + dev_err(dev, "of_find_node_by_phandle() failed\n");
- + return ERR_PTR(-EFAULT);
- + }
- +
- + if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
- + dev_err(dev,
- + "!of_device_is_compatible(%s, fsl,bpool)\n",
- + dev_node->full_name);
- + dpa_bp = ERR_PTR(-EINVAL);
- + goto _return_of_node_put;
- + }
- +
- + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
- + if (err) {
- + dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
- + dpa_bp = ERR_PTR(-EINVAL);
- + goto _return_of_node_put;
- + }
- + dpa_bp[i].bpid = (uint8_t)bpid;
- +
- + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
- + &lenp);
- + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
- + const uint32_t *seed_pool;
- +
- + dpa_bp[i].config_count =
- + (int)of_read_number(bpool_cfg, ns);
- + dpa_bp[i].size =
- + (size_t)of_read_number(bpool_cfg + ns, ns);
- + dpa_bp[i].paddr =
- + of_read_number(bpool_cfg + 2 * ns, na);
- +
- + seed_pool = of_get_property(dev_node,
- + "fsl,bpool-ethernet-seeds", &lenp);
- + dpa_bp[i].seed_pool = !!seed_pool;
- +
- + } else {
- + dev_err(dev,
- + "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
- + dev_node->full_name);
- + dpa_bp = ERR_PTR(-EINVAL);
- + goto _return_of_node_put;
- + }
- + }
- +
- + sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
- +
- + return dpa_bp;
- +
- +_return_of_node_put:
- + if (dev_node)
- + of_node_put(dev_node);
- +
- + return dpa_bp;
- +}
- +EXPORT_SYMBOL(dpa_bp_probe);
- +
- +int dpa_bp_shared_port_seed(struct dpa_bp *bp)
- +{
- + void __iomem **ptr;
- +
- + /* In MAC-less and Shared-MAC scenarios the physical
- + * address of the buffer pool in device tree is set
- + * to 0 to specify that another entity (USDPAA) will
- + * allocate and seed the buffers
- + */
- + if (!bp->paddr)
- + return 0;
- +
- + /* allocate memory region for buffers */
- + devm_request_mem_region(bp->dev, bp->paddr,
- + bp->size * bp->config_count, KBUILD_MODNAME);
- + /* managed ioremap unmapping */
- + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- + if (!ptr)
- + return -EIO;
- +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- + bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count);
- +#else
- + bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0);
- +#endif
- + if (bp->vaddr == NULL) {
- + pr_err("Could not map memory for pool %d\n", bp->bpid);
- + devres_free(ptr);
- + return -EIO;
- + }
- + *ptr = bp->vaddr;
- + devres_add(bp->dev, ptr);
- +
- + /* seed pool with buffers from that memory region */
- + if (bp->seed_pool) {
- + int count = bp->target_count;
- + dma_addr_t addr = bp->paddr;
- +
- + while (count) {
- + struct bm_buffer bufs[8];
- + uint8_t num_bufs = 0;
- +
- + do {
- + BUG_ON(addr > 0xffffffffffffull);
- + bufs[num_bufs].bpid = bp->bpid;
- + bm_buffer_set64(&bufs[num_bufs++], addr);
- + addr += bp->size;
- +
- + } while (--count && (num_bufs < 8));
- +
- + while (bman_release(bp->pool, bufs, num_bufs, 0))
- + cpu_relax();
- + }
- + }
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_bp_shared_port_seed);
- +
- +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
- + size_t count)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + int i;
- +
- + priv->dpa_bp = dpa_bp;
- + priv->bp_count = count;
- +
- + for (i = 0; i < count; i++) {
- + int err;
- + err = dpa_bp_alloc(&dpa_bp[i]);
- + if (err < 0) {
- + dpa_bp_free(priv);
- + priv->dpa_bp = NULL;
- + return err;
- + }
- + }
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_bp_create);
- +
- +static int __init __cold dpa_advanced_load(void)
- +{
- + pr_info(DPA_DESCRIPTION "\n");
- +
- + return 0;
- +}
- +module_init(dpa_advanced_load);
- +
- +static void __exit __cold dpa_advanced_unload(void)
- +{
- + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
- + KBUILD_BASENAME".c", __func__);
- +
- +}
- +module_exit(dpa_advanced_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
- @@ -0,0 +1,50 @@
- +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __DPAA_ETH_BASE_H
- +#define __DPAA_ETH_BASE_H
- +
- +#include <linux/etherdevice.h> /* struct net_device */
- +#include <linux/fsl_bman.h> /* struct bm_buffer */
- +#include <linux/of_platform.h> /* struct platform_device */
- +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
- +
- +extern uint8_t advanced_debug;
- +extern const struct dpa_fq_cbs_t shared_fq_cbs;
- +extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
- +
- +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
- +dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
- +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
- + size_t count);
- +int dpa_bp_shared_port_seed(struct dpa_bp *bp);
- +
- +#endif /* __DPAA_ETH_BASE_H */
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
- @@ -0,0 +1,1719 @@
- +/* Copyright 2008-2016 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/init.h>
- +#include "dpaa_eth_ceetm.h"
- +
- +#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
- +
- +const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
- + [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
- + [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
- +};
- +
- +struct Qdisc_ops ceetm_qdisc_ops;
- +
- +/* Obtain the DCP and the SP ids from the FMan port */
- +static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
- + unsigned int *sp_id)
- +{
- + uint32_t channel;
- + t_LnxWrpFmPortDev *port_dev;
- + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
- + struct mac_device *mac_dev = dpa_priv->mac_dev;
- +
- + port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
- + channel = port_dev->txCh;
- +
- + *sp_id = channel & CHANNEL_SP_MASK;
- + pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
- +
- + if (channel < DCP0_MAX_CHANNEL) {
- + *dcp_id = qm_dc_portal_fman0;
- + pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
- + } else {
- + *dcp_id = qm_dc_portal_fman1;
- + pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
- + }
- +}
- +
- +/* Enqueue Rejection Notification callback */
- +static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
- + const struct qm_mr_entry *msg)
- +{
- + struct net_device *net_dev;
- + struct ceetm_class *cls;
- + struct ceetm_class_stats *cstats = NULL;
- + const struct dpa_priv_s *dpa_priv;
- + struct dpa_percpu_priv_s *dpa_percpu_priv;
- + struct sk_buff *skb;
- + struct qm_fd fd = msg->ern.fd;
- +
- + net_dev = ((struct ceetm_fq *)fq)->net_dev;
- + dpa_priv = netdev_priv(net_dev);
- + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
- +
- + /* Increment DPA counters */
- + dpa_percpu_priv->stats.tx_dropped++;
- + dpa_percpu_priv->stats.tx_fifo_errors++;
- +
- + /* Increment CEETM counters */
- + cls = ((struct ceetm_fq *)fq)->ceetm_cls;
- + switch (cls->type) {
- + case CEETM_PRIO:
- + cstats = this_cpu_ptr(cls->prio.cstats);
- + break;
- + case CEETM_WBFS:
- + cstats = this_cpu_ptr(cls->wbfs.cstats);
- + break;
- + }
- +
- + if (cstats)
- + cstats->ern_drop_count++;
- +
- + if (fd.bpid != 0xff) {
- + dpa_fd_release(net_dev, &fd);
- + return;
- + }
- +
- + skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
- + dev_kfree_skb_any(skb);
- +}
- +
- +/* Congestion State Change Notification callback */
- +static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
- +{
- + struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
- + struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
- + struct ceetm_class *cls = ceetm_fq->ceetm_cls;
- + struct ceetm_class_stats *cstats = NULL;
- +
- + switch (cls->type) {
- + case CEETM_PRIO:
- + cstats = this_cpu_ptr(cls->prio.cstats);
- + break;
- + case CEETM_WBFS:
- + cstats = this_cpu_ptr(cls->wbfs.cstats);
- + break;
- + }
- +
- + if (congested) {
- + dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
- + netif_tx_stop_all_queues(dpa_priv->net_dev);
- + dpa_priv->cgr_data.cgr_congested_count++;
- + if (cstats)
- + cstats->cgr_congested_count++;
- + } else {
- + dpa_priv->cgr_data.congested_jiffies +=
- + (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
- + netif_tx_wake_all_queues(dpa_priv->net_dev);
- + }
- +}
- +
- +/* Allocate a ceetm fq */
- +static int ceetm_alloc_fq(struct ceetm_fq **fq,
- + struct net_device *dev,
- + struct ceetm_class *cls)
- +{
- + *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
- + if (!*fq)
- + return -ENOMEM;
- +
- + (*fq)->net_dev = dev;
- + (*fq)->ceetm_cls = cls;
- + return 0;
- +}
- +
- +/* Configure a ceetm Class Congestion Group */
- +static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
- + struct qm_ceetm_channel *channel,
- + unsigned int id,
- + struct ceetm_fq *fq,
- + u32 if_support)
- +{
- + int err;
- + u32 cs_th;
- + u16 ccg_mask;
- + struct qm_ceetm_ccg_params ccg_params;
- +
- + err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
- + if (err)
- + return err;
- +
- + /* Configure the count mode (frames/bytes), enable
- + * notifications, enable tail-drop, and configure the tail-drop
- + * mode and threshold */
- + ccg_mask = QM_CCGR_WE_MODE | QM_CCGR_WE_CSCN_EN |
- + QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
- + QM_CCGR_WE_TD_THRES;
- +
- + ccg_params.mode = 0; /* count bytes */
- + ccg_params.cscn_en = 1; /* generate notifications */
- + ccg_params.td_en = 1; /* enable tail-drop */
- + ccg_params.td_mode = 1; /* tail-drop on threshold */
- +
- + /* Configure the tail-drop threshold according to the link
- + * speed */
- + if (if_support & SUPPORTED_10000baseT_Full)
- + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
- + else
- + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
- + qm_cgr_cs_thres_set64(&ccg_params.td_thres, cs_th, 1);
- +
- + err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
- + if (err)
- + return err;
- +
- + return 0;
- +}
- +
- +/* Configure a ceetm Logical Frame Queue */
- +static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
- + struct qm_ceetm_lfq **lfq)
- +{
- + int err;
- + u64 context_a;
- + u32 context_b;
- +
- + err = qman_ceetm_lfq_claim(lfq, cq);
- + if (err)
- + return err;
- +
- + /* Get the former contexts in order to preserve context B */
- + err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
- + if (err)
- + return err;
- +
- + context_a = CEETM_CONTEXT_A;
- + err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
- + if (err)
- + return err;
- +
- + (*lfq)->ern = ceetm_ern;
- +
- + err = qman_ceetm_create_fq(*lfq, &fq->fq);
- + if (err)
- + return err;
- +
- + return 0;
- +}
- +
- +/* Configure a prio ceetm class */
- +static int ceetm_config_prio_cls(struct ceetm_class *cls, struct net_device *dev,
- + struct qm_ceetm_channel *channel, unsigned int id)
- +{
- + int err;
- + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
- +
- + err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
- + if (err)
- + return err;
- +
- + /* Claim and configure the CCG */
- + err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
- + dpa_priv->mac_dev->if_support);
- + if (err)
- + return err;
- +
- + /* Claim and configure the CQ */
- + err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
- + if (err)
- + return err;
- +
- + if (cls->shaped) {
- + err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
- + if (err)
- + return err;
- +
- + err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
- + if (err)
- + return err;
- + }
- +
- + /* Claim and configure a LFQ */
- + err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
- + if (err)
- + return err;
- +
- + return 0;
- +}
- +
- +/* Configure a wbfs ceetm class */
- +static int ceetm_config_wbfs_cls(struct ceetm_class *cls, struct net_device *dev,
- + struct qm_ceetm_channel *channel, unsigned int id, int type)
- +{
- + int err;
- + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
- +
- + err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
- + if (err)
- + return err;
- +
- + /* Claim and configure the CCG */
- + err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
- + dpa_priv->mac_dev->if_support);
- + if (err)
- + return err;
- +
- + /* Claim and configure the CQ */
- + if (type == WBFS_GRP_B)
- + err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
- + cls->wbfs.ccg);
- + else
- + err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
- + cls->wbfs.ccg);
- + if (err)
- + return err;
- +
- + /* Configure the CQ weight: real number mutiplied by 100 to get rid
- + * of the fraction */
- + err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
- + cls->wbfs.weight * 100);
- + if (err)
- + return err;
- +
- + /* Claim and configure a LFQ */
- + err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
- + if (err)
- + return err;
- +
- + return 0;
- +}
- +
- +/* Find class in qdisc hash table using given handle */
- +static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
- +{
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct Qdisc_class_common *clc;
- +
- + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
- + __func__, handle, sch->handle);
- +
- + clc = qdisc_class_find(&priv->clhash, handle);
- + return clc ? container_of(clc, struct ceetm_class, common) : NULL;
- +}
- +
- +/* Insert a class in the qdisc's class hash */
- +static void ceetm_link_class(struct Qdisc *sch,
- + struct Qdisc_class_hash *clhash,
- + struct Qdisc_class_common *common)
- +{
- + sch_tree_lock(sch);
- + qdisc_class_hash_insert(clhash, common);
- + sch_tree_unlock(sch);
- + qdisc_class_hash_grow(sch, clhash);
- +}
- +
- +/* Destroy a ceetm class */
- +static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
- +{
- + if (!cl)
- + return;
- +
- + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
- + __func__, cl->common.classid, sch->handle);
- +
- + switch (cl->type) {
- + case CEETM_ROOT:
- + if (cl->root.child) {
- + qdisc_destroy(cl->root.child);
- + cl->root.child = NULL;
- + }
- +
- + if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the channel %d\n",
- + __func__, cl->root.ch->idx);
- +
- + break;
- +
- + case CEETM_PRIO:
- + if (cl->prio.child) {
- + qdisc_destroy(cl->prio.child);
- + cl->prio.child = NULL;
- + }
- +
- + if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the LFQ %d\n",
- + __func__, cl->prio.lfq->idx);
- +
- + if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the CQ %d\n",
- + __func__, cl->prio.cq->idx);
- +
- + if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the CCG %d\n",
- + __func__, cl->prio.ccg->idx);
- +
- + if (cl->prio.fq)
- + kfree(cl->prio.fq);
- +
- + if (cl->prio.cstats)
- + free_percpu(cl->prio.cstats);
- +
- + break;
- +
- + case CEETM_WBFS:
- + if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the LFQ %d\n",
- + __func__, cl->wbfs.lfq->idx);
- +
- + if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the CQ %d\n",
- + __func__, cl->wbfs.cq->idx);
- +
- + if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the CCG %d\n",
- + __func__, cl->wbfs.ccg->idx);
- +
- + if (cl->wbfs.fq)
- + kfree(cl->wbfs.fq);
- +
- + if (cl->wbfs.cstats)
- + free_percpu(cl->wbfs.cstats);
- + }
- +
- + tcf_destroy_chain(&cl->filter_list);
- + kfree(cl);
- +}
- +
- +/* Destroy a ceetm qdisc */
- +static void ceetm_destroy(struct Qdisc *sch)
- +{
- + unsigned int ntx, i;
- + struct hlist_node *next;
- + struct ceetm_class *cl;
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct net_device *dev = qdisc_dev(sch);
- +
- + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
- + __func__, sch->handle);
- +
- + /* All filters need to be removed before destroying the classes */
- + tcf_destroy_chain(&priv->filter_list);
- +
- + for (i = 0; i < priv->clhash.hashsize; i++) {
- + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
- + tcf_destroy_chain(&cl->filter_list);
- + }
- +
- + for (i = 0; i < priv->clhash.hashsize; i++) {
- + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
- + common.hnode)
- + ceetm_cls_destroy(sch, cl);
- + }
- +
- + qdisc_class_hash_destroy(&priv->clhash);
- +
- + switch (priv->type) {
- + case CEETM_ROOT:
- + dpa_disable_ceetm(dev);
- +
- + if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the LNI %d\n",
- + __func__, priv->root.lni->idx);
- +
- + if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
- + pr_err(KBUILD_BASENAME
- + " : %s : error releasing the SP %d\n",
- + __func__, priv->root.sp->idx);
- +
- + if (priv->root.qstats)
- + free_percpu(priv->root.qstats);
- +
- + if (!priv->root.qdiscs)
- + break;
- +
- + /* Remove the pfifo qdiscs */
- + for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
- + if (priv->root.qdiscs[ntx])
- + qdisc_destroy(priv->root.qdiscs[ntx]);
- +
- + kfree(priv->root.qdiscs);
- + break;
- +
- + case CEETM_PRIO:
- + if (priv->prio.parent)
- + priv->prio.parent->root.child = NULL;
- + break;
- +
- + case CEETM_WBFS:
- + if (priv->wbfs.parent)
- + priv->wbfs.parent->prio.child = NULL;
- + break;
- + }
- +}
- +
- +static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
- +{
- + struct Qdisc *qdisc;
- + unsigned int ntx, i;
- + struct nlattr *nest;
- + struct tc_ceetm_qopt qopt;
- + struct ceetm_qdisc_stats *qstats;
- + struct net_device *dev = qdisc_dev(sch);
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- +
- + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
- +
- + sch_tree_lock(sch);
- + memset(&qopt, 0, sizeof(qopt));
- + qopt.type = priv->type;
- + qopt.shaped = priv->shaped;
- +
- + switch (priv->type) {
- + case CEETM_ROOT:
- + /* Gather statistics from the underlying pfifo qdiscs */
- + sch->q.qlen = 0;
- + memset(&sch->bstats, 0, sizeof(sch->bstats));
- + memset(&sch->qstats, 0, sizeof(sch->qstats));
- +
- + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
- + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
- + sch->q.qlen += qdisc->q.qlen;
- + sch->bstats.bytes += qdisc->bstats.bytes;
- + sch->bstats.packets += qdisc->bstats.packets;
- + sch->qstats.qlen += qdisc->qstats.qlen;
- + sch->qstats.backlog += qdisc->qstats.backlog;
- + sch->qstats.drops += qdisc->qstats.drops;
- + sch->qstats.requeues += qdisc->qstats.requeues;
- + sch->qstats.overlimits += qdisc->qstats.overlimits;
- + }
- +
- + for_each_online_cpu(i) {
- + qstats = per_cpu_ptr(priv->root.qstats, i);
- + sch->qstats.drops += qstats->drops;
- + }
- +
- + qopt.rate = priv->root.rate;
- + qopt.ceil = priv->root.ceil;
- + qopt.overhead = priv->root.overhead;
- + break;
- +
- + case CEETM_PRIO:
- + qopt.qcount = priv->prio.qcount;
- + break;
- +
- + case CEETM_WBFS:
- + qopt.qcount = priv->wbfs.qcount;
- + qopt.cr = priv->wbfs.cr;
- + qopt.er = priv->wbfs.er;
- + break;
- +
- + default:
- + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
- + sch_tree_unlock(sch);
- + return -EINVAL;
- + }
- +
- + nest = nla_nest_start(skb, TCA_OPTIONS);
- + if (nest == NULL)
- + goto nla_put_failure;
- + if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
- + goto nla_put_failure;
- + nla_nest_end(skb, nest);
- +
- + sch_tree_unlock(sch);
- + return skb->len;
- +
- +nla_put_failure:
- + sch_tree_unlock(sch);
- + nla_nest_cancel(skb, nest);
- + return -EMSGSIZE;
- +}
- +
- +/* Configure a root ceetm qdisc */
- +static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
- + struct tc_ceetm_qopt *qopt)
- +{
- + struct netdev_queue *dev_queue;
- + struct Qdisc *qdisc;
- + enum qm_dc_portal dcp_id;
- + unsigned int i, sp_id;
- + int err;
- + u64 bps;
- + struct qm_ceetm_sp *sp;
- + struct qm_ceetm_lni *lni;
- + struct net_device *dev = qdisc_dev(sch);
- + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
- + struct mac_device *mac_dev = dpa_priv->mac_dev;
- +
- + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
- +
- + /* Validate inputs */
- + if (sch->parent != TC_H_ROOT) {
- + pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
- + tcf_destroy_chain(&priv->filter_list);
- + qdisc_class_hash_destroy(&priv->clhash);
- + return -EINVAL;
- + }
- +
- + if (!mac_dev) {
- + pr_err("CEETM: the interface is lacking a mac\n");
- + err = -EINVAL;
- + goto err_init_root;
- + }
- +
- + /* pre-allocate underlying pfifo qdiscs */
- + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
- + sizeof(priv->root.qdiscs[0]),
- + GFP_KERNEL);
- + if (priv->root.qdiscs == NULL) {
- + err = -ENOMEM;
- + goto err_init_root;
- + }
- +
- + for (i = 0; i < dev->num_tx_queues; i++) {
- + dev_queue = netdev_get_tx_queue(dev, i);
- + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
- + TC_H_MAKE(TC_H_MAJ(sch->handle),
- + TC_H_MIN(i + PFIFO_MIN_OFFSET)));
- + if (qdisc == NULL) {
- + err = -ENOMEM;
- + goto err_init_root;
- + }
- +
- + priv->root.qdiscs[i] = qdisc;
- + qdisc->flags |= TCQ_F_ONETXQUEUE;
- + }
- +
- + sch->flags |= TCQ_F_MQROOT;
- +
- + priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
- + if (!priv->root.qstats) {
- + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
- + __func__);
- + err = -ENOMEM;
- + goto err_init_root;
- + }
- +
- + priv->shaped = qopt->shaped;
- + priv->root.rate = qopt->rate;
- + priv->root.ceil = qopt->ceil;
- + priv->root.overhead = qopt->overhead;
- +
- + /* Claim the SP */
- + get_dcp_and_sp(dev, &dcp_id, &sp_id);
- + err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
- + __func__);
- + goto err_init_root;
- + }
- +
- + priv->root.sp = sp;
- +
- + /* Claim the LNI - will use the same id as the SP id since SPs 0-7
- + * are connected to the TX FMan ports */
- + err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
- + __func__);
- + goto err_init_root;
- + }
- +
- + priv->root.lni = lni;
- +
- + err = qman_ceetm_sp_set_lni(sp, lni);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to link the SP and "
- + "LNI\n", __func__);
- + goto err_init_root;
- + }
- +
- + lni->sp = sp;
- +
- + /* Configure the LNI shaper */
- + if (priv->shaped) {
- + err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to configure "
- + "the LNI shaper\n", __func__);
- + goto err_init_root;
- + }
- +
- + bps = priv->root.rate << 3; /* Bps -> bps */
- + err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to configure "
- + "the LNI shaper\n", __func__);
- + goto err_init_root;
- + }
- +
- + bps = priv->root.ceil << 3; /* Bps -> bps */
- + err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to configure "
- + "the LNI shaper\n", __func__);
- + goto err_init_root;
- + }
- + }
- +
- + /* TODO default configuration */
- +
- + dpa_enable_ceetm(dev);
- + return 0;
- +
- +err_init_root:
- + ceetm_destroy(sch);
- + return err;
- +}
- +
- +/* Configure a prio ceetm qdisc */
- +static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
- + struct tc_ceetm_qopt *qopt)
- +{
- + int err;
- + unsigned int i;
- + struct ceetm_class *parent_cl, *child_cl;
- + struct Qdisc *parent_qdisc;
- + struct net_device *dev = qdisc_dev(sch);
- +
- + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
- +
- + if (sch->parent == TC_H_ROOT) {
- + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
- + err = -EINVAL;
- + goto err_init_prio;
- + }
- +
- + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
- + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
- + pr_err("CEETM: a ceetm qdisc can not be attached to other "
- + "qdisc/class types\n");
- + err = -EINVAL;
- + goto err_init_prio;
- + }
- +
- + /* Obtain the parent root ceetm_class */
- + parent_cl = ceetm_find(sch->parent, parent_qdisc);
- +
- + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
- + pr_err("CEETM: a prio ceetm qdiscs can be added only under a "
- + "root ceetm class\n");
- + err = -EINVAL;
- + goto err_init_prio;
- + }
- +
- + priv->prio.parent = parent_cl;
- + parent_cl->root.child = sch;
- +
- + priv->shaped = parent_cl->shaped;
- + priv->prio.qcount = qopt->qcount;
- +
- + /* Create and configure qcount child classes */
- + for (i = 0; i < priv->prio.qcount; i++) {
- + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
- + if (!child_cl) {
- + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
- + __func__);
- + err = -ENOMEM;
- + goto err_init_prio;
- + }
- +
- + child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
- + if (!child_cl->prio.cstats) {
- + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
- + __func__);
- + err = -ENOMEM;
- + goto err_init_prio_cls;
- + }
- +
- + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
- + child_cl->refcnt = 1;
- + child_cl->parent = sch;
- + child_cl->type = CEETM_PRIO;
- + child_cl->shaped = priv->shaped;
- + child_cl->prio.child = NULL;
- +
- + /* All shaped CQs have CR and ER enabled by default */
- + child_cl->prio.cr = child_cl->shaped;
- + child_cl->prio.er = child_cl->shaped;
- + child_cl->prio.fq = NULL;
- + child_cl->prio.cq = NULL;
- +
- + /* Configure the corresponding hardware CQ */
- + err = ceetm_config_prio_cls(child_cl, dev,
- + parent_cl->root.ch, i);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to configure "
- + "the ceetm prio class %X\n",
- + __func__,
- + child_cl->common.classid);
- + goto err_init_prio_cls;
- + }
- +
- + /* Add class handle in Qdisc */
- + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
- + pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X "
- + "associated with CQ %d and CCG %d\n",
- + __func__,
- + child_cl->common.classid,
- + child_cl->prio.cq->idx,
- + child_cl->prio.ccg->idx);
- + }
- +
- + return 0;
- +
- +err_init_prio_cls:
- + ceetm_cls_destroy(sch, child_cl);
- +err_init_prio:
- + ceetm_destroy(sch);
- + return err;
- +}
- +
- +/* Configure a wbfs ceetm qdisc */
- +static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
- + struct tc_ceetm_qopt *qopt)
- +{
- + int err, group_b, small_group;
- + unsigned int i, id, prio_a, prio_b;
- + struct ceetm_class *parent_cl, *child_cl, *root_cl;
- + struct Qdisc *parent_qdisc;
- + struct ceetm_qdisc *parent_priv;
- + struct qm_ceetm_channel *channel;
- + struct net_device *dev = qdisc_dev(sch);
- +
- + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
- +
- + /* Validate inputs */
- + if (sch->parent == TC_H_ROOT) {
- + pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
- + err = -EINVAL;
- + goto err_init_wbfs;
- + }
- +
- + /* Obtain the parent prio ceetm qdisc */
- + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
- + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
- + pr_err("CEETM: a ceetm qdisc can not be attached to other "
- + "qdisc/class types\n");
- + err = -EINVAL;
- + goto err_init_wbfs;
- + }
- +
- + /* Obtain the parent prio ceetm class */
- + parent_cl = ceetm_find(sch->parent, parent_qdisc);
- + parent_priv = qdisc_priv(parent_qdisc);
- +
- + if (!parent_cl || parent_cl->type != CEETM_PRIO) {
- + pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a "
- + "prio ceetm class\n");
- + err = -EINVAL;
- + goto err_init_wbfs;
- + }
- +
- + priv->shaped = parent_cl->shaped;
- +
- + if (!priv->shaped && (qopt->cr || qopt->er)) {
- + pr_err("CEETM: CR/ER can be enabled only for shaped wbfs "
- + "ceetm qdiscs\n");
- + err = -EINVAL;
- + goto err_init_wbfs;
- + }
- +
- + if (priv->shaped && !(qopt->cr || qopt->er)) {
- + pr_err("CEETM: either CR or ER must be enabled for shaped "
- + "wbfs ceetm qdiscs\n");
- + err = -EINVAL;
- + goto err_init_wbfs;
- + }
- +
- + /* Obtain the parent root ceetm class */
- + root_cl = parent_priv->prio.parent;
- + if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b)
- + || root_cl->root.wbfs_grp_large) {
- + pr_err("CEETM: no more wbfs classes are available\n");
- + err = -EINVAL;
- + goto err_init_wbfs;
- + }
- +
- + if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b)
- + && qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
- + pr_err("CEETM: only %d wbfs classes are available\n",
- + CEETM_MIN_WBFS_QCOUNT);
- + err = -EINVAL;
- + goto err_init_wbfs;
- + }
- +
- + priv->wbfs.parent = parent_cl;
- + parent_cl->prio.child = sch;
- +
- + priv->wbfs.qcount = qopt->qcount;
- + priv->wbfs.cr = qopt->cr;
- + priv->wbfs.er = qopt->er;
- +
- + channel = root_cl->root.ch;
- +
- + /* Configure the hardware wbfs channel groups */
- + if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
- + /* Configure the large group A */
- + priv->wbfs.group_type = WBFS_GRP_LARGE;
- + small_group = false;
- + group_b = false;
- + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
- + prio_b = prio_a;
- +
- + } else if (root_cl->root.wbfs_grp_a) {
- + /* Configure the group B */
- + priv->wbfs.group_type = WBFS_GRP_B;
- +
- + err = qman_ceetm_channel_get_group(channel, &small_group,
- + &prio_a, &prio_b);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to get group "
- + "details\n", __func__);
- + goto err_init_wbfs;
- + }
- +
- + small_group = true;
- + group_b = true;
- + prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
- + /* If group A isn't configured, configure it as group B */
- + prio_a = prio_a ? : prio_b;
- +
- + } else {
- + /* Configure the small group A */
- + priv->wbfs.group_type = WBFS_GRP_A;
- +
- + err = qman_ceetm_channel_get_group(channel, &small_group,
- + &prio_a, &prio_b);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to get group "
- + "details\n", __func__);
- + goto err_init_wbfs;
- + }
- +
- + small_group = true;
- + group_b = false;
- + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
- + /* If group B isn't configured, configure it as group A */
- + prio_b = prio_b ? : prio_a;
- + }
- +
- + err = qman_ceetm_channel_set_group(channel, small_group, prio_a, prio_b);
- + if (err)
- + goto err_init_wbfs;
- +
- + if (priv->shaped) {
- + err = qman_ceetm_channel_set_group_cr_eligibility(channel,
- + group_b,
- + priv->wbfs.cr);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to set group "
- + "CR eligibility\n", __func__);
- + goto err_init_wbfs;
- + }
- +
- + err = qman_ceetm_channel_set_group_er_eligibility(channel,
- + group_b,
- + priv->wbfs.er);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to set group "
- + "ER eligibility\n", __func__);
- + goto err_init_wbfs;
- + }
- + }
- +
- + /* Create qcount child classes */
- + for (i = 0; i < priv->wbfs.qcount; i++) {
- + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
- + if (!child_cl) {
- + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
- + __func__);
- + err = -ENOMEM;
- + goto err_init_wbfs;
- + }
- +
- + child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
- + if (!child_cl->wbfs.cstats) {
- + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
- + __func__);
- + err = -ENOMEM;
- + goto err_init_wbfs_cls;
- + }
- +
- + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
- + child_cl->refcnt = 1;
- + child_cl->parent = sch;
- + child_cl->type = CEETM_WBFS;
- + child_cl->shaped = priv->shaped;
- + child_cl->wbfs.fq = NULL;
- + child_cl->wbfs.cq = NULL;
- + child_cl->wbfs.weight = qopt->qweight[i];
- +
- + if (priv->wbfs.group_type == WBFS_GRP_B)
- + id = WBFS_GRP_B_OFFSET + i;
- + else
- + id = WBFS_GRP_A_OFFSET + i;
- +
- + err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
- + priv->wbfs.group_type);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to configure "
- + "the ceetm wbfs class %X\n",
- + __func__,
- + child_cl->common.classid);
- + goto err_init_wbfs_cls;
- + }
- +
- + /* Add class handle in Qdisc */
- + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
- + pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X "
- + "associated with CQ %d and CCG %d\n",
- + __func__,
- + child_cl->common.classid,
- + child_cl->wbfs.cq->idx,
- + child_cl->wbfs.ccg->idx);
- + }
- +
- + /* Signal the root class that a group has been configured */
- + switch (priv->wbfs.group_type) {
- + case WBFS_GRP_LARGE:
- + root_cl->root.wbfs_grp_large = true;
- + break;
- + case WBFS_GRP_A:
- + root_cl->root.wbfs_grp_a = true;
- + break;
- + case WBFS_GRP_B:
- + root_cl->root.wbfs_grp_b = true;
- + break;
- + }
- +
- + return 0;
- +
- +err_init_wbfs_cls:
- + ceetm_cls_destroy(sch, child_cl);
- +err_init_wbfs:
- + ceetm_destroy(sch);
- + return err;
- +}
- +
- +/* Configure a generic ceetm qdisc */
- +static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
- +{
- + struct tc_ceetm_qopt *qopt;
- + struct nlattr *tb[TCA_CEETM_QOPS + 1];
- + int ret;
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct net_device *dev = qdisc_dev(sch);
- +
- + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
- +
- + if (!netif_is_multiqueue(dev))
- + return -EOPNOTSUPP;
- +
- + ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
- + if (ret < 0) {
- + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
- + return ret;
- + }
- +
- + if (tb[TCA_CEETM_QOPS] == NULL) {
- + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
- + return -EINVAL;
- + }
- +
- + if (TC_H_MIN(sch->handle)) {
- + pr_err("CEETM: a qdisc should not have a minor\n");
- + return -EINVAL;
- + }
- +
- + qopt = nla_data(tb[TCA_CEETM_QOPS]);
- +
- + /* Initialize the class hash list. Each qdisc has its own class hash */
- + ret = qdisc_class_hash_init(&priv->clhash);
- + if (ret < 0) {
- + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init "
- + "failed\n", __func__);
- + return ret;
- + }
- +
- + priv->type = qopt->type;
- +
- + switch (priv->type) {
- + case CEETM_ROOT:
- + ret = ceetm_init_root(sch, priv, qopt);
- + break;
- + case CEETM_PRIO:
- + ret = ceetm_init_prio(sch, priv, qopt);
- + break;
- + case CEETM_WBFS:
- + ret = ceetm_init_wbfs(sch, priv, qopt);
- + break;
- + default:
- + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
- + ceetm_destroy(sch);
- + ret = -EINVAL;
- + }
- +
- + return ret;
- +}
- +
- +/* Attach the underlying pfifo qdiscs */
- +static void ceetm_attach(struct Qdisc *sch)
- +{
- + struct net_device *dev = qdisc_dev(sch);
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct Qdisc *qdisc, *old_qdisc;
- + unsigned int i;
- +
- + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
- +
- + for (i = 0; i < dev->num_tx_queues; i++) {
- + qdisc = priv->root.qdiscs[i];
- + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
- + if (old_qdisc)
- + qdisc_destroy(old_qdisc);
- + }
- +}
- +
- +static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
- +{
- + struct ceetm_class *cl;
- + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
- + __func__, classid, sch->handle);
- + cl = ceetm_find(classid, sch);
- +
- + if (cl)
- + cl->refcnt++; /* Will decrement in put() */
- + return (unsigned long)cl;
- +}
- +
- +static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
- +{
- + struct ceetm_class *cl = (struct ceetm_class *)arg;
- + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
- + __func__, cl->common.classid, sch->handle);
- + cl->refcnt--;
- +
- + if (cl->refcnt == 0)
- + ceetm_cls_destroy(sch, cl);
- +}
- +
- +/* Add a ceetm root class or configure a ceetm prio class */
- +static int ceetm_cls_change(struct Qdisc *sch, u32 classid,
- + u32 parentid, struct nlattr **tca,
- + unsigned long *arg)
- +{
- + int err;
- + u64 bps;
- + struct ceetm_qdisc *priv;
- + struct ceetm_class *cl = (struct ceetm_class *)*arg;
- + struct nlattr *opt = tca[TCA_OPTIONS];
- + struct nlattr *tb[__TCA_CEETM_MAX];
- + struct tc_ceetm_copt *copt;
- + struct qm_ceetm_channel *channel;
- + struct net_device *dev = qdisc_dev(sch);
- +
- + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
- + __func__, classid, sch->handle);
- +
- + if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
- + pr_err("CEETM: a ceetm class can not be attached to other "
- + "qdisc/class types\n");
- + return -EINVAL;
- + }
- +
- + priv = qdisc_priv(sch);
- +
- + if (!opt) {
- + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
- + return -EINVAL;
- + }
- +
- + if (!cl && sch->handle != parentid) {
- + pr_err("CEETM: classes can be attached to the root ceetm "
- + "qdisc only\n");
- + return -EINVAL;
- + }
- +
- + if (!cl && priv->type != CEETM_ROOT) {
- + pr_err("CEETM: only root ceetm classes can be attached to the "
- + "root ceetm qdisc\n");
- + return -EINVAL;
- + }
- +
- + err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
- + if (err < 0) {
- + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
- + return -EINVAL;
- + }
- +
- + if (tb[TCA_CEETM_COPT] == NULL) {
- + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
- + return -EINVAL;
- + }
- +
- + if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
- + pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm "
- + "root classes\n");
- + return -EINVAL;
- + }
- +
- + copt = nla_data(tb[TCA_CEETM_COPT]);
- +
- + /* Configure an existing ceetm prio class */
- + if (cl) {
- + if (copt->type != CEETM_PRIO) {
- + pr_err("CEETM: only prio ceetm classes can be changed\n");
- + return -EINVAL;
- + }
- +
- + if (!cl->shaped && (copt->cr || copt->er)) {
- + pr_err("CEETM: only shaped classes can have CR and "
- + "ER enabled\n");
- + return -EINVAL;
- + }
- +
- + if (cl->prio.cr != (bool)copt->cr)
- + err = qman_ceetm_channel_set_cq_cr_eligibility(
- + cl->prio.cq->parent,
- + cl->prio.cq->idx,
- + copt->cr);
- +
- + if (!err && cl->prio.er != (bool)copt->er)
- + err = qman_ceetm_channel_set_cq_er_eligibility(
- + cl->prio.cq->parent,
- + cl->prio.cq->idx,
- + copt->er);
- +
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to configure "
- + "the ceetm prio class %X\n",
- + __func__,
- + cl->common.classid);
- + return err;
- + }
- +
- + cl->prio.cr = copt->cr;
- + cl->prio.er = copt->er;
- + return 0;
- + }
- +
- + /* Add a new root ceetm class */
- + if (copt->type != CEETM_ROOT) {
- + pr_err("CEETM: only root ceetm classes can be attached to the "
- + "root ceetm qdisc\n");
- + return -EINVAL;
- + }
- +
- + if (copt->shaped && !priv->shaped) {
- + pr_err("CEETM: can not add a shaped ceetm root class under an "
- + "unshaped ceetm root qdisc\n");
- + return -EINVAL;
- + }
- +
- + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
- + if (!cl) {
- + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", __func__);
- + return -ENOMEM;
- + }
- +
- + cl->type = copt->type;
- + cl->shaped = copt->shaped;
- + cl->root.rate = copt->rate;
- + cl->root.ceil = copt->ceil;
- + cl->root.tbl = copt->tbl;
- +
- + cl->common.classid = classid;
- + cl->refcnt = 1;
- + cl->parent = sch;
- + cl->root.child = NULL;
- + cl->root.wbfs_grp_a = false;
- + cl->root.wbfs_grp_b = false;
- + cl->root.wbfs_grp_large = false;
- +
- + /* Claim a CEETM channel */
- + err = qman_ceetm_channel_claim(&channel, priv->root.lni);
- + if (err) {
- + pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
- + __func__);
- + goto claim_err;
- + }
- +
- + cl->root.ch = channel;
- +
- + if (cl->shaped) {
- + /* Configure the channel shaper */
- + err = qman_ceetm_channel_enable_shaper(channel, 1);
- + if (err)
- + goto channel_err;
- +
- + bps = cl->root.rate << 3; /* Bps -> bps */
- + err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
- + dev->mtu);
- + if (err)
- + goto channel_err;
- +
- + bps = cl->root.ceil << 3; /* Bps -> bps */
- + err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
- + dev->mtu);
- + if (err)
- + goto channel_err;
- +
- + } else {
- + /* Configure the uFQ algorithm */
- + err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
- + if (err)
- + goto channel_err;
- + }
- +
- + /* Add class handle in Qdisc */
- + ceetm_link_class(sch, &priv->clhash, &cl->common);
- +
- + pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with "
- + "channel %d\n", __func__, classid, channel->idx);
- + *arg = (unsigned long)cl;
- + return 0;
- +
- +channel_err:
- + pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
- + __func__, channel->idx);
- + if (qman_ceetm_channel_release(channel))
- + pr_err(KBUILD_BASENAME " : %s : failed to release the channel "
- + "%d\n", __func__, channel->idx);
- +claim_err:
- + if (cl) {
- + kfree(cl);
- + }
- + return err;
- +}
- +
- +static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
- +{
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct ceetm_class *cl;
- + unsigned int i;
- +
- + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
- +
- + if (arg->stop)
- + return;
- +
- + for (i = 0; i < priv->clhash.hashsize; i++) {
- + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
- + if (arg->count < arg->skip) {
- + arg->count++;
- + continue;
- + }
- + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
- + arg->stop = 1;
- + return;
- + }
- + arg->count++;
- + }
- + }
- +}
- +
- +static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
- + struct sk_buff *skb, struct tcmsg *tcm)
- +{
- + struct ceetm_class *cl = (struct ceetm_class *)arg;
- + struct nlattr *nest;
- + struct tc_ceetm_copt copt;
- +
- + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
- + __func__, cl->common.classid, sch->handle);
- +
- + sch_tree_lock(sch);
- +
- + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
- + tcm->tcm_handle = cl->common.classid;
- +
- + memset(&copt, 0, sizeof(copt));
- +
- + copt.shaped = cl->shaped;
- + copt.type = cl->type;
- +
- + switch (cl->type) {
- + case CEETM_ROOT:
- + if (cl->root.child)
- + tcm->tcm_info = cl->root.child->handle;
- +
- + copt.rate = cl->root.rate;
- + copt.ceil = cl->root.ceil;
- + copt.tbl = cl->root.tbl;
- + break;
- +
- + case CEETM_PRIO:
- + if (cl->prio.child)
- + tcm->tcm_info = cl->prio.child->handle;
- +
- + copt.cr = cl->prio.cr;
- + copt.er = cl->prio.er;
- + break;
- +
- + case CEETM_WBFS:
- + copt.weight = cl->wbfs.weight;
- + break;
- + }
- +
- + nest = nla_nest_start(skb, TCA_OPTIONS);
- + if (nest == NULL)
- + goto nla_put_failure;
- + if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
- + goto nla_put_failure;
- + nla_nest_end(skb, nest);
- + sch_tree_unlock(sch);
- + return skb->len;
- +
- +nla_put_failure:
- + sch_tree_unlock(sch);
- + nla_nest_cancel(skb, nest);
- + return -EMSGSIZE;
- +}
- +
- +static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
- +{
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct ceetm_class *cl = (struct ceetm_class *)arg;
- +
- + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
- + __func__, cl->common.classid, sch->handle);
- +
- + sch_tree_lock(sch);
- + qdisc_class_hash_remove(&priv->clhash, &cl->common);
- + cl->refcnt--;
- +
- + /* The refcnt should be at least 1 since we have incremented it in
- + get(). Will decrement again in put() where we will call destroy()
- + to actually free the memory if it reaches 0. */
- + BUG_ON(cl->refcnt == 0);
- +
- + sch_tree_unlock(sch);
- + return 0;
- +}
- +
- +/* Get the class' child qdisc, if any */
- +static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
- +{
- + struct ceetm_class *cl = (struct ceetm_class *)arg;
- +
- + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
- + __func__, cl->common.classid, sch->handle);
- +
- + switch (cl->type) {
- + case CEETM_ROOT:
- + return cl->root.child;
- + break;
- +
- + case CEETM_PRIO:
- + return cl->prio.child;
- + break;
- + }
- +
- + return NULL;
- +}
- +
- +static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
- + struct Qdisc *new, struct Qdisc **old)
- +{
- + if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
- + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm "
- + "classes\n");
- + return -EOPNOTSUPP;
- + }
- +
- + return 0;
- +}
- +
- +static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
- + struct gnet_dump *d)
- +{
- + unsigned int i;
- + struct ceetm_class *cl = (struct ceetm_class *)arg;
- + struct gnet_stats_basic_packed tmp_bstats;
- + struct ceetm_class_stats *cstats = NULL;
- + struct qm_ceetm_cq *cq = NULL;
- + struct tc_ceetm_xstats xstats;
- +
- + memset(&xstats, 0, sizeof(xstats));
- + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
- +
- + switch (cl->type) {
- + case CEETM_ROOT:
- + return 0;
- + case CEETM_PRIO:
- + cq = cl->prio.cq;
- + break;
- + case CEETM_WBFS:
- + cq = cl->wbfs.cq;
- + break;
- + }
- +
- + for_each_online_cpu(i) {
- + switch (cl->type) {
- + case CEETM_PRIO:
- + cstats = per_cpu_ptr(cl->prio.cstats, i);
- + break;
- + case CEETM_WBFS:
- + cstats = per_cpu_ptr(cl->wbfs.cstats, i);
- + break;
- + }
- +
- + if (cstats) {
- + xstats.ern_drop_count += cstats->ern_drop_count;
- + xstats.cgr_congested_count += cstats->cgr_congested_count;
- + tmp_bstats.bytes += cstats->bstats.bytes;
- + tmp_bstats.packets += cstats->bstats.packets;
- + }
- + }
- +
- + if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0)
- + return -1;
- +
- + if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
- + &xstats.frame_count, &xstats.byte_count))
- + return -1;
- +
- + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
- +}
- +
- +static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
- +{
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct ceetm_class *cl = (struct ceetm_class *)arg;
- + struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
- +
- + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
- + cl ? cl->common.classid : 0, sch->handle);
- + return fl;
- +}
- +
- +static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
- + u32 classid)
- +{
- + struct ceetm_class *cl = ceetm_find(classid, sch);
- + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
- + cl ? cl->common.classid : 0, sch->handle);
- + return (unsigned long)cl;
- +}
- +
- +static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
- +{
- + struct ceetm_class *cl = (struct ceetm_class *)arg;
- + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
- + cl ? cl->common.classid : 0, sch->handle);
- +}
- +
- +const struct Qdisc_class_ops ceetm_cls_ops = {
- + .graft = ceetm_cls_graft,
- + .leaf = ceetm_cls_leaf,
- + .get = ceetm_cls_get,
- + .put = ceetm_cls_put,
- + .change = ceetm_cls_change,
- + .delete = ceetm_cls_delete,
- + .walk = ceetm_cls_walk,
- + .tcf_chain = ceetm_tcf_chain,
- + .bind_tcf = ceetm_tcf_bind,
- + .unbind_tcf = ceetm_tcf_unbind,
- + .dump = ceetm_cls_dump,
- + .dump_stats = ceetm_cls_dump_stats,
- +};
- +
- +struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
- + .id = "ceetm",
- + .priv_size = sizeof(struct ceetm_qdisc),
- + .cl_ops = &ceetm_cls_ops,
- + .init = ceetm_init,
- + .destroy = ceetm_destroy,
- + .dump = ceetm_dump,
- + .attach = ceetm_attach,
- + .owner = THIS_MODULE,
- +};
- +
- +/* Run the filters and classifiers attached to the qdisc on the provided skb */
- +static struct ceetm_class *ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
- + int *qerr, bool *act_drop)
- +{
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct ceetm_class *cl = NULL, *wbfs_cl;
- + struct tcf_result res;
- + struct tcf_proto *tcf;
- + int result;
- +
- + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- + tcf = priv->filter_list;
- + while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
- +#ifdef CONFIG_NET_CLS_ACT
- + switch (result) {
- + case TC_ACT_QUEUED:
- + case TC_ACT_STOLEN:
- + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- + case TC_ACT_SHOT:
- + /* No valid class found due to action */
- + *act_drop = true;
- + return NULL;
- + }
- +#endif
- + cl = (void *)res.class;
- + if (!cl) {
- + if (res.classid == sch->handle) {
- + /* The filter leads to the qdisc */
- + /* TODO default qdisc */
- + return NULL;
- + }
- +
- + cl = ceetm_find(res.classid, sch);
- + if (!cl)
- + /* The filter leads to an invalid class */
- + break;
- + }
- +
- + /* The class might have its own filters attached */
- + tcf = cl->filter_list;
- + }
- +
- + if (!cl) {
- + /* No valid class found */
- + /* TODO default qdisc */
- + return NULL;
- + }
- +
- + switch (cl->type) {
- + case CEETM_ROOT:
- + if (cl->root.child) {
- + /* Run the prio qdisc classifiers */
- + return ceetm_classify(skb, cl->root.child, qerr,
- + act_drop);
- + } else {
- + /* The root class does not have a child prio qdisc */
- + /* TODO default qdisc */
- + return NULL;
- + }
- + case CEETM_PRIO:
- + if (cl->prio.child) {
- + /* If filters lead to a wbfs class, return it.
- + * Otherwise, return the prio class */
- + wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
- + act_drop);
- + /* A NULL result might indicate either an erroneous
- + * filter, or no filters at all. We will assume the
- + * latter */
- + return wbfs_cl ? : cl;
- + }
- + }
- +
- + /* For wbfs and childless prio classes, return the class directly */
- + return cl;
- +}
- +
- +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
- +{
- + int ret;
- + bool act_drop = false;
- + struct Qdisc *sch = net_dev->qdisc;
- + struct ceetm_class *cl;
- + struct dpa_priv_s *priv_dpa;
- + struct qman_fq *egress_fq, *conf_fq;
- + struct ceetm_qdisc *priv = qdisc_priv(sch);
- + struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
- + struct ceetm_class_stats *cstats;
- + const int queue_mapping = dpa_get_queue_mapping(skb);
- + spinlock_t *root_lock = qdisc_lock(sch);
- +
- + spin_lock(root_lock);
- + cl = ceetm_classify(skb, sch, &ret, &act_drop);
- + spin_unlock(root_lock);
- +
- +#ifdef CONFIG_NET_CLS_ACT
- + if (act_drop) {
- + if (ret & __NET_XMIT_BYPASS)
- + qstats->drops++;
- + goto drop;
- + }
- +#endif
- + /* TODO default class */
- + if (unlikely(!cl)) {
- + qstats->drops++;
- + goto drop;
- + }
- +
- + priv_dpa = netdev_priv(net_dev);
- + conf_fq = priv_dpa->conf_fqs[queue_mapping];
- +
- + /* Choose the proper tx fq and update the basic stats (bytes and
- + * packets sent by the class) */
- + switch (cl->type) {
- + case CEETM_PRIO:
- + egress_fq = &(cl->prio.fq->fq);
- + cstats = this_cpu_ptr(cl->prio.cstats);
- + break;
- + case CEETM_WBFS:
- + egress_fq = &(cl->wbfs.fq->fq);
- + cstats = this_cpu_ptr(cl->wbfs.cstats);
- + break;
- + default:
- + qstats->drops++;
- + goto drop;
- + }
- +
- + bstats_update(&cstats->bstats, skb);
- + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
- +
- +drop:
- + dev_kfree_skb_any(skb);
- + return NET_XMIT_SUCCESS;
- +}
- +
- +static int __init ceetm_register(void)
- +{
- + int _errno = 0;
- +
- + pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
- +
- + _errno = register_qdisc(&ceetm_qdisc_ops);
- + if (unlikely(_errno))
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): register_qdisc() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- +
- + return _errno;
- +}
- +
- +static void __exit ceetm_unregister(void)
- +{
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + unregister_qdisc(&ceetm_qdisc_ops);
- +}
- +
- +module_init(ceetm_register);
- +module_exit(ceetm_unregister);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
- @@ -0,0 +1,230 @@
- +/* Copyright 2008-2016 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __DPAA_ETH_CEETM_H
- +#define __DPAA_ETH_CEETM_H
- +
- +#include <net/pkt_sched.h>
- +#include <net/netlink.h>
- +#include <lnxwrp_fm.h>
- +
- +#include "mac.h"
- +#include "dpaa_eth_common.h"
- +
- +/* Mask to determine the sub-portal id from a channel number */
- +#define CHANNEL_SP_MASK 0x1f
- +/* The number of the last channel that services DCP0, connected to FMan 0.
- + * Value validated for B4 and T series platforms.
- + */
- +#define DCP0_MAX_CHANNEL 0x80f
- +/* A2V=1 - field A2 is valid
- + * A0V=1 - field A0 is valid - enables frame confirmation
- + * OVOM=1 - override operation mode bits with values from A2
- + * EBD=1 - external buffers are deallocated at the end of the FMan flow
- + * NL=0 - the BMI releases all the internal buffers
- + */
- +#define CEETM_CONTEXT_A 0x1a00000080000000
- +
- +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
- + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
- + * are reserved for the maximum 32 CEETM channels (majors and minors are in
- + * hex).
- + */
- +#define PFIFO_MIN_OFFSET 0x21
- +
- +/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
- +#define CEETM_MAX_PRIO_QCOUNT 8
- +#define CEETM_MAX_WBFS_QCOUNT 8
- +#define CEETM_MIN_WBFS_QCOUNT 4
- +
- +/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
- + * and/or 12-15 for group B).
- + */
- +#define WBFS_GRP_A_OFFSET 8
- +#define WBFS_GRP_B_OFFSET 12
- +
- +#define WBFS_GRP_A 1
- +#define WBFS_GRP_B 2
- +#define WBFS_GRP_LARGE 3
- +
- +enum {
- + TCA_CEETM_UNSPEC,
- + TCA_CEETM_COPT,
- + TCA_CEETM_QOPS,
- + __TCA_CEETM_MAX,
- +};
- +
- +/* CEETM configuration types */
- +enum {
- + CEETM_ROOT = 1,
- + CEETM_PRIO,
- + CEETM_WBFS
- +};
- +
- +#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
- +extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
- +
- +struct ceetm_class;
- +struct ceetm_qdisc_stats;
- +struct ceetm_class_stats;
- +
- +struct ceetm_fq {
- + struct qman_fq fq;
- + struct net_device *net_dev;
- + struct ceetm_class *ceetm_cls;
- +};
- +
- +struct root_q {
- + struct Qdisc **qdiscs;
- + __u16 overhead;
- + __u32 rate;
- + __u32 ceil;
- + struct qm_ceetm_sp *sp;
- + struct qm_ceetm_lni *lni;
- + struct ceetm_qdisc_stats __percpu *qstats;
- +};
- +
- +struct prio_q {
- + __u16 qcount;
- + struct ceetm_class *parent;
- +};
- +
- +struct wbfs_q {
- + __u16 qcount;
- + int group_type;
- + struct ceetm_class *parent;
- + __u16 cr;
- + __u16 er;
- +};
- +
- +struct ceetm_qdisc {
- + int type; /* LNI/CHNL/WBFS */
- + bool shaped;
- + union {
- + struct root_q root;
- + struct prio_q prio;
- + struct wbfs_q wbfs;
- + };
- + struct Qdisc_class_hash clhash;
- + struct tcf_proto *filter_list; /* qdisc attached filters */
- +};
- +
- +/* CEETM Qdisc configuration parameters */
- +struct tc_ceetm_qopt {
- + __u32 type;
- + __u16 shaped;
- + __u16 qcount;
- + __u16 overhead;
- + __u32 rate;
- + __u32 ceil;
- + __u16 cr;
- + __u16 er;
- + __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
- +};
- +
- +struct root_c {
- + unsigned int rate;
- + unsigned int ceil;
- + unsigned int tbl;
- + bool wbfs_grp_a;
- + bool wbfs_grp_b;
- + bool wbfs_grp_large;
- + struct Qdisc *child;
- + struct qm_ceetm_channel *ch;
- +};
- +
- +struct prio_c {
- + bool cr;
- + bool er;
- + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
- + struct qm_ceetm_lfq *lfq;
- + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
- + struct qm_ceetm_ccg *ccg;
- + /* only one wbfs can be linked to one priority CQ */
- + struct Qdisc *child;
- + struct ceetm_class_stats __percpu *cstats;
- +};
- +
- +struct wbfs_c {
- + __u8 weight; /* The weight of the class between 1 and 248 */
- + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
- + struct qm_ceetm_lfq *lfq;
- + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
- + struct qm_ceetm_ccg *ccg;
- + struct ceetm_class_stats __percpu *cstats;
- +};
- +
- +struct ceetm_class {
- + struct Qdisc_class_common common;
- + int refcnt; /* usage count of this class */
- + struct tcf_proto *filter_list; /* class attached filters */
- + struct Qdisc *parent;
- + bool shaped;
- + int type; /* ROOT/PRIO/WBFS */
- + union {
- + struct root_c root;
- + struct prio_c prio;
- + struct wbfs_c wbfs;
- + };
- +};
- +
- +/* CEETM Class configuration parameters */
- +struct tc_ceetm_copt {
- + __u32 type;
- + __u16 shaped;
- + __u32 rate;
- + __u32 ceil;
- + __u16 tbl;
- + __u16 cr;
- + __u16 er;
- + __u8 weight;
- +};
- +
- +/* CEETM stats */
- +struct ceetm_qdisc_stats {
- + __u32 drops;
- +};
- +
- +struct ceetm_class_stats {
- + struct gnet_stats_basic_packed bstats;
- + __u32 ern_drop_count;
- + __u32 cgr_congested_count;
- +};
- +
- +struct tc_ceetm_xstats {
- + __u32 ern_drop_count;
- + __u32 cgr_congested_count;
- + __u64 frame_count;
- + __u64 byte_count;
- +};
- +
- +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
- +#endif
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
- @@ -0,0 +1,1787 @@
- +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/of_platform.h>
- +#include <linux/of_net.h>
- +#include <linux/etherdevice.h>
- +#include <linux/kthread.h>
- +#include <linux/percpu.h>
- +#include <linux/highmem.h>
- +#include <linux/sort.h>
- +#include <linux/fsl_qman.h>
- +#include <linux/ip.h>
- +#include <linux/ipv6.h>
- +#include <linux/if_vlan.h> /* vlan_eth_hdr */
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#ifdef CONFIG_FSL_DPAA_1588
- +#include "dpaa_1588.h"
- +#endif
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- +#include "dpaa_debugfs.h"
- +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
- +#include "mac.h"
- +
- +/* DPAA platforms benefit from hardware-assisted queue management */
- +#define DPA_NETIF_FEATURES NETIF_F_HW_ACCEL_MQ
- +
- +/* Size in bytes of the FQ taildrop threshold */
- +#define DPA_FQ_TD 0x200000
- +
- +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
- +struct ptp_priv_s ptp_priv;
- +#endif
- +
- +static struct dpa_bp *dpa_bp_array[64];
- +
- +int dpa_max_frm;
- +EXPORT_SYMBOL(dpa_max_frm);
- +
- +int dpa_rx_extra_headroom;
- +EXPORT_SYMBOL(dpa_rx_extra_headroom);
- +
- +int dpa_num_cpus = NR_CPUS;
- +
- +static const struct fqid_cell tx_confirm_fqids[] = {
- + {0, DPAA_ETH_TX_QUEUES}
- +};
- +
- +static struct fqid_cell default_fqids[][3] = {
- + [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
- + [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
- +};
- +
- +static const char fsl_qman_frame_queues[][25] = {
- + [RX] = "fsl,qman-frame-queues-rx",
- + [TX] = "fsl,qman-frame-queues-tx"
- +};
- +#ifdef CONFIG_FSL_DPAA_HOOKS
- +/* A set of callbacks for hooking into the fastpath at different points. */
- +struct dpaa_eth_hooks_s dpaa_eth_hooks;
- +EXPORT_SYMBOL(dpaa_eth_hooks);
- +/* This function should only be called on the probe paths, since it makes no
- + * effort to guarantee consistency of the destination hooks structure.
- + */
- +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
- +{
- + if (hooks)
- + dpaa_eth_hooks = *hooks;
- + else
- + pr_err("NULL pointer to hooks!\n");
- +}
- +EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
- +#endif
- +
- +int dpa_netdev_init(struct net_device *net_dev,
- + const uint8_t *mac_addr,
- + uint16_t tx_timeout)
- +{
- + int err;
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct device *dev = net_dev->dev.parent;
- +
- + net_dev->hw_features |= DPA_NETIF_FEATURES;
- +
- + net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- +
- + net_dev->features |= net_dev->hw_features;
- + net_dev->vlan_features = net_dev->features;
- +
- + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
- +
- + net_dev->ethtool_ops = &dpa_ethtool_ops;
- +
- + net_dev->needed_headroom = priv->tx_headroom;
- + net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
- +
- + err = register_netdev(net_dev);
- + if (err < 0) {
- + dev_err(dev, "register_netdev() = %d\n", err);
- + return err;
- + }
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + /* create debugfs entry for this net_device */
- + err = dpa_netdev_debugfs_create(net_dev);
- + if (err) {
- + unregister_netdev(net_dev);
- + return err;
- + }
- +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_netdev_init);
- +
- +int __cold dpa_start(struct net_device *net_dev)
- +{
- + int err, i;
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- +
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + err = mac_dev->init_phy(net_dev, priv->mac_dev);
- + if (err < 0) {
- + if (netif_msg_ifup(priv))
- + netdev_err(net_dev, "init_phy() = %d\n", err);
- + return err;
- + }
- +
- + for_each_port_device(i, mac_dev->port_dev) {
- + err = fm_port_enable(mac_dev->port_dev[i]);
- + if (err)
- + goto mac_start_failed;
- + }
- +
- + err = priv->mac_dev->start(mac_dev);
- + if (err < 0) {
- + if (netif_msg_ifup(priv))
- + netdev_err(net_dev, "mac_dev->start() = %d\n", err);
- + goto mac_start_failed;
- + }
- +
- + netif_tx_start_all_queues(net_dev);
- +
- + return 0;
- +
- +mac_start_failed:
- + for_each_port_device(i, mac_dev->port_dev)
- + fm_port_disable(mac_dev->port_dev[i]);
- +
- + return err;
- +}
- +EXPORT_SYMBOL(dpa_start);
- +
- +int __cold dpa_stop(struct net_device *net_dev)
- +{
- + int _errno, i, err;
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- +
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + netif_tx_stop_all_queues(net_dev);
- + /* Allow the Fman (Tx) port to process in-flight frames before we
- + * try switching it off.
- + */
- + usleep_range(5000, 10000);
- +
- + _errno = mac_dev->stop(mac_dev);
- + if (unlikely(_errno < 0))
- + if (netif_msg_ifdown(priv))
- + netdev_err(net_dev, "mac_dev->stop() = %d\n",
- + _errno);
- +
- + for_each_port_device(i, mac_dev->port_dev) {
- + err = fm_port_disable(mac_dev->port_dev[i]);
- + _errno = err ? err : _errno;
- + }
- +
- + if (mac_dev->phy_dev)
- + phy_disconnect(mac_dev->phy_dev);
- + mac_dev->phy_dev = NULL;
- +
- + return _errno;
- +}
- +EXPORT_SYMBOL(dpa_stop);
- +
- +void __cold dpa_timeout(struct net_device *net_dev)
- +{
- + const struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- +
- + priv = netdev_priv(net_dev);
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + if (netif_msg_timer(priv))
- + netdev_crit(net_dev, "Transmit timeout latency: %u ms\n",
- + jiffies_to_msecs(jiffies - net_dev->trans_start));
- +
- + percpu_priv->stats.tx_errors++;
- +}
- +EXPORT_SYMBOL(dpa_timeout);
- +
- +/* net_device */
- +
- +/**
- + * @param net_dev the device for which statistics are calculated
- + * @param stats the function fills this structure with the device's statistics
- + * @return the address of the structure containing the statistics
- + *
- + * Calculates the statistics for the given device by adding the statistics
- + * collected by each CPU.
- + */
- +struct rtnl_link_stats64 * __cold
- +dpa_get_stats64(struct net_device *net_dev,
- + struct rtnl_link_stats64 *stats)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + u64 *cpustats;
- + u64 *netstats = (u64 *)stats;
- + int i, j;
- + struct dpa_percpu_priv_s *percpu_priv;
- + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
- +
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + cpustats = (u64 *)&percpu_priv->stats;
- +
- + for (j = 0; j < numstats; j++)
- + netstats[j] += cpustats[j];
- + }
- +
- + return stats;
- +}
- +EXPORT_SYMBOL(dpa_get_stats64);
- +
- +int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
- +{
- + const int max_mtu = dpa_get_max_mtu();
- +
- + /* Make sure we don't exceed the Ethernet controller's MAXFRM */
- + if (new_mtu < 68 || new_mtu > max_mtu) {
- + netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
- + new_mtu, 68, max_mtu);
- + return -EINVAL;
- + }
- + net_dev->mtu = new_mtu;
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_change_mtu);
- +
- +/* .ndo_init callback */
- +int dpa_ndo_init(struct net_device *net_dev)
- +{
- + /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
- + * we choose conservatively and let the user explicitly set a higher
- + * MTU via ifconfig. Otherwise, the user may end up with different MTUs
- + * in the same LAN.
- + * If on the other hand fsl_fm_max_frm has been chosen below 1500,
- + * start with the maximum allowed.
- + */
- + int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
- +
- + pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
- + net_dev->mtu = init_mtu;
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_ndo_init);
- +
- +int dpa_set_features(struct net_device *dev, netdev_features_t features)
- +{
- + /* Not much to do here for now */
- + dev->features = features;
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_set_features);
- +
- +netdev_features_t dpa_fix_features(struct net_device *dev,
- + netdev_features_t features)
- +{
- + netdev_features_t unsupported_features = 0;
- +
- + /* In theory we should never be requested to enable features that
- + * we didn't set in netdev->features and netdev->hw_features at probe
- + * time, but double check just to be on the safe side.
- + * We don't support enabling Rx csum through ethtool yet
- + */
- + unsupported_features |= NETIF_F_RXCSUM;
- +
- + features &= ~unsupported_features;
- +
- + return features;
- +}
- +EXPORT_SYMBOL(dpa_fix_features);
- +
- +#ifdef CONFIG_FSL_DPAA_TS
- +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
- + const void *data)
- +{
- + u64 *ts, ns;
- +
- + ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
- + data);
- +
- + if (!ts || *ts == 0)
- + return 0;
- +
- + be64_to_cpus(ts);
- +
- + /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
- + ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
- +
- + return ns;
- +}
- +
- +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
- + struct skb_shared_hwtstamps *shhwtstamps, const void *data)
- +{
- + u64 ns;
- +
- + ns = dpa_get_timestamp_ns(priv, rx_tx, data);
- +
- + if (ns == 0)
- + return -EINVAL;
- +
- + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- + shhwtstamps->hwtstamp = ns_to_ktime(ns);
- +
- + return 0;
- +}
- +
- +static void dpa_ts_tx_enable(struct net_device *dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(dev);
- + struct mac_device *mac_dev = priv->mac_dev;
- +
- + if (mac_dev->fm_rtc_enable)
- + mac_dev->fm_rtc_enable(get_fm_handle(dev));
- + if (mac_dev->ptp_enable)
- + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
- +
- + priv->ts_tx_en = true;
- +}
- +
- +static void dpa_ts_tx_disable(struct net_device *dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(dev);
- +
- +#if 0
- +/* the RTC might be needed by the Rx Ts, cannot disable here
- + * no separate ptp_disable API for Rx/Tx, cannot disable here
- + */
- + struct mac_device *mac_dev = priv->mac_dev;
- +
- + if (mac_dev->fm_rtc_disable)
- + mac_dev->fm_rtc_disable(get_fm_handle(dev));
- +
- + if (mac_dev->ptp_disable)
- + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
- +#endif
- +
- + priv->ts_tx_en = false;
- +}
- +
- +static void dpa_ts_rx_enable(struct net_device *dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(dev);
- + struct mac_device *mac_dev = priv->mac_dev;
- +
- + if (mac_dev->fm_rtc_enable)
- + mac_dev->fm_rtc_enable(get_fm_handle(dev));
- + if (mac_dev->ptp_enable)
- + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
- +
- + priv->ts_rx_en = true;
- +}
- +
- +static void dpa_ts_rx_disable(struct net_device *dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(dev);
- +
- +#if 0
- +/* the RTC might be needed by the Tx Ts, cannot disable here
- + * no separate ptp_disable API for Rx/Tx, cannot disable here
- + */
- + struct mac_device *mac_dev = priv->mac_dev;
- +
- + if (mac_dev->fm_rtc_disable)
- + mac_dev->fm_rtc_disable(get_fm_handle(dev));
- +
- + if (mac_dev->ptp_disable)
- + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
- +#endif
- +
- + priv->ts_rx_en = false;
- +}
- +
- +static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- +{
- + struct hwtstamp_config config;
- +
- + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- + return -EFAULT;
- +
- + switch (config.tx_type) {
- + case HWTSTAMP_TX_OFF:
- + dpa_ts_tx_disable(dev);
- + break;
- + case HWTSTAMP_TX_ON:
- + dpa_ts_tx_enable(dev);
- + break;
- + default:
- + return -ERANGE;
- + }
- +
- + if (config.rx_filter == HWTSTAMP_FILTER_NONE)
- + dpa_ts_rx_disable(dev);
- + else {
- + dpa_ts_rx_enable(dev);
- + /* TS is set for all frame types, not only those requested */
- + config.rx_filter = HWTSTAMP_FILTER_ALL;
- + }
- +
- + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- + -EFAULT : 0;
- +}
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- +{
- +#ifdef CONFIG_FSL_DPAA_1588
- + struct dpa_priv_s *priv = netdev_priv(dev);
- +#endif
- + int ret = 0;
- +
- + /* at least one timestamping feature must be enabled */
- +#ifdef CONFIG_FSL_DPAA_TS
- + if (!netif_running(dev))
- +#endif
- + return -EINVAL;
- +
- +#ifdef CONFIG_FSL_DPAA_TS
- + if (cmd == SIOCSHWTSTAMP)
- + return dpa_ts_ioctl(dev, rq, cmd);
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- + if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
- + if (priv->tsu && priv->tsu->valid)
- + ret = dpa_ioctl_1588(dev, rq, cmd);
- + else
- + ret = -ENODEV;
- + }
- +#endif
- +
- + return ret;
- +}
- +EXPORT_SYMBOL(dpa_ioctl);
- +
- +int __cold dpa_remove(struct platform_device *of_dev)
- +{
- + int err;
- + struct device *dev;
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- +
- + dev = &of_dev->dev;
- + net_dev = dev_get_drvdata(dev);
- +
- + priv = netdev_priv(net_dev);
- +
- + dpaa_eth_sysfs_remove(dev);
- +
- + dev_set_drvdata(dev, NULL);
- + unregister_netdev(net_dev);
- +
- + err = dpa_fq_free(dev, &priv->dpa_fq_list);
- +
- + qman_delete_cgr_safe(&priv->ingress_cgr);
- + qman_release_cgrid(priv->ingress_cgr.cgrid);
- + qman_delete_cgr_safe(&priv->cgr_data.cgr);
- + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
- +
- + dpa_private_napi_del(net_dev);
- +
- + dpa_bp_free(priv);
- +
- + if (priv->buf_layout)
- + devm_kfree(dev, priv->buf_layout);
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + /* remove debugfs entry for this net_device */
- + dpa_netdev_debugfs_remove(net_dev);
- +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- + if (priv->tsu && priv->tsu->valid)
- + dpa_ptp_cleanup(priv);
- +#endif
- +
- + free_netdev(net_dev);
- +
- + return err;
- +}
- +EXPORT_SYMBOL(dpa_remove);
- +
- +struct mac_device * __cold __must_check
- +__attribute__((nonnull))
- +dpa_mac_probe(struct platform_device *_of_dev)
- +{
- + struct device *dpa_dev, *dev;
- + struct device_node *mac_node;
- + struct platform_device *of_dev;
- + struct mac_device *mac_dev;
- +#ifdef CONFIG_FSL_DPAA_1588
- + int lenp;
- + const phandle *phandle_prop;
- + struct net_device *net_dev = NULL;
- + struct dpa_priv_s *priv = NULL;
- + struct device_node *timer_node;
- +#endif
- + dpa_dev = &_of_dev->dev;
- +
- + mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
- + if (unlikely(mac_node == NULL)) {
- + dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
- + return ERR_PTR(-EFAULT);
- + }
- +
- + of_dev = of_find_device_by_node(mac_node);
- + if (unlikely(of_dev == NULL)) {
- + dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
- + mac_node->full_name);
- + of_node_put(mac_node);
- + return ERR_PTR(-EINVAL);
- + }
- + of_node_put(mac_node);
- +
- + dev = &of_dev->dev;
- +
- + mac_dev = dev_get_drvdata(dev);
- + if (unlikely(mac_dev == NULL)) {
- + dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
- + dev_name(dev));
- + return ERR_PTR(-EINVAL);
- + }
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- + phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
- + if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
- + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
- + (mac_dev->speed == SPEED_1000)))) {
- + timer_node = of_find_node_by_phandle(*phandle_prop);
- + if (timer_node)
- + net_dev = dev_get_drvdata(dpa_dev);
- + if (timer_node && net_dev) {
- + priv = netdev_priv(net_dev);
- + if (!dpa_ptp_init(priv))
- + dev_info(dev, "%s: ptp 1588 is initialized.\n",
- + mac_node->full_name);
- + }
- + }
- +#endif
- +
- +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
- + if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
- + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
- + (mac_dev->speed == SPEED_1000))) {
- + ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0);
- + if (ptp_priv.node) {
- + ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
- + if (unlikely(ptp_priv.of_dev == NULL)) {
- + dev_err(dpa_dev,
- + "Cannot find device represented by timer_node\n");
- + of_node_put(ptp_priv.node);
- + return ERR_PTR(-EINVAL);
- + }
- + ptp_priv.mac_dev = mac_dev;
- + }
- + }
- +#endif
- + return mac_dev;
- +}
- +EXPORT_SYMBOL(dpa_mac_probe);
- +
- +int dpa_set_mac_address(struct net_device *net_dev, void *addr)
- +{
- + const struct dpa_priv_s *priv;
- + int _errno;
- + struct mac_device *mac_dev;
- +
- + priv = netdev_priv(net_dev);
- +
- + _errno = eth_mac_addr(net_dev, addr);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev,
- + "eth_mac_addr() = %d\n",
- + _errno);
- + return _errno;
- + }
- +
- + mac_dev = priv->mac_dev;
- +
- + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
- + net_dev->dev_addr);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev,
- + "mac_dev->change_addr() = %d\n",
- + _errno);
- + return _errno;
- + }
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_set_mac_address);
- +
- +void dpa_set_rx_mode(struct net_device *net_dev)
- +{
- + int _errno;
- + const struct dpa_priv_s *priv;
- +
- + priv = netdev_priv(net_dev);
- +
- + if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
- + priv->mac_dev->promisc = !priv->mac_dev->promisc;
- + _errno = priv->mac_dev->set_promisc(
- + priv->mac_dev->get_mac_handle(priv->mac_dev),
- + priv->mac_dev->promisc);
- + if (unlikely(_errno < 0) && netif_msg_drv(priv))
- + netdev_err(net_dev,
- + "mac_dev->set_promisc() = %d\n",
- + _errno);
- + }
- +
- + _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
- + if (unlikely(_errno < 0) && netif_msg_drv(priv))
- + netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
- +}
- +EXPORT_SYMBOL(dpa_set_rx_mode);
- +
- +void dpa_set_buffers_layout(struct mac_device *mac_dev,
- + struct dpa_buffer_layout_s *layout)
- +{
- + struct fm_port_params params;
- +
- + /* Rx */
- + layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
- + layout[RX].parse_results = true;
- + layout[RX].hash_results = true;
- +#ifdef CONFIG_FSL_DPAA_TS
- + layout[RX].time_stamp = true;
- +#endif
- + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], ¶ms);
- + layout[RX].manip_extra_space = params.manip_extra_space;
- + /* a value of zero for data alignment means "don't care", so align to
- + * a non-zero value to prevent FMD from using its own default
- + */
- + layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
- +
- + /* Tx */
- + layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
- + layout[TX].parse_results = true;
- + layout[TX].hash_results = true;
- +#ifdef CONFIG_FSL_DPAA_TS
- + layout[TX].time_stamp = true;
- +#endif
- + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], ¶ms);
- + layout[TX].manip_extra_space = params.manip_extra_space;
- + layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
- +}
- +EXPORT_SYMBOL(dpa_set_buffers_layout);
- +
- +int __attribute__((nonnull))
- +dpa_bp_alloc(struct dpa_bp *dpa_bp)
- +{
- + int err;
- + struct bman_pool_params bp_params;
- + struct platform_device *pdev;
- +
- + if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
- + pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
- + return -EINVAL;
- + }
- +
- + memset(&bp_params, 0, sizeof(struct bman_pool_params));
- +#ifdef CONFIG_FMAN_PFC
- + bp_params.flags = BMAN_POOL_FLAG_THRESH;
- + bp_params.thresholds[0] = bp_params.thresholds[2] =
- + CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
- + bp_params.thresholds[1] = bp_params.thresholds[3] =
- + CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
- +#endif
- +
- + /* If the pool is already specified, we only create one per bpid */
- + if (dpa_bpid2pool_use(dpa_bp->bpid))
- + return 0;
- +
- + if (dpa_bp->bpid == 0)
- + bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
- + else
- + bp_params.bpid = dpa_bp->bpid;
- +
- + dpa_bp->pool = bman_new_pool(&bp_params);
- + if (unlikely(dpa_bp->pool == NULL)) {
- + pr_err("bman_new_pool() failed\n");
- + return -ENODEV;
- + }
- +
- + dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
- +
- + pdev = platform_device_register_simple("dpaa_eth_bpool",
- + dpa_bp->bpid, NULL, 0);
- + if (IS_ERR(pdev)) {
- + err = PTR_ERR(pdev);
- + goto pdev_register_failed;
- + }
- +
- + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(40));
- + if (err)
- + goto pdev_mask_failed;
- + if (!pdev->dev.dma_mask)
- + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- + else {
- + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
- + if (err)
- + goto pdev_mask_failed;
- + }
- +
- +#ifdef CONFIG_FMAN_ARM
- + /* force coherency */
- + pdev->dev.archdata.dma_coherent = true;
- + arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
- +#endif
- +
- + dpa_bp->dev = &pdev->dev;
- +
- + if (dpa_bp->seed_cb) {
- + err = dpa_bp->seed_cb(dpa_bp);
- + if (err)
- + goto pool_seed_failed;
- + }
- +
- + dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
- +
- + return 0;
- +
- +pool_seed_failed:
- +pdev_mask_failed:
- + platform_device_unregister(pdev);
- +pdev_register_failed:
- + bman_free_pool(dpa_bp->pool);
- +
- + return err;
- +}
- +EXPORT_SYMBOL(dpa_bp_alloc);
- +
- +void dpa_bp_drain(struct dpa_bp *bp)
- +{
- + int ret, num = 8;
- +
- + do {
- + struct bm_buffer bmb[8];
- + int i;
- +
- + ret = bman_acquire(bp->pool, bmb, num, 0);
- + if (ret < 0) {
- + if (num == 8) {
- + /* we have less than 8 buffers left;
- + * drain them one by one
- + */
- + num = 1;
- + ret = 1;
- + continue;
- + } else {
- + /* Pool is fully drained */
- + break;
- + }
- + }
- +
- + for (i = 0; i < num; i++) {
- + dma_addr_t addr = bm_buf_addr(&bmb[i]);
- +
- + dma_unmap_single(bp->dev, addr, bp->size,
- + DMA_BIDIRECTIONAL);
- +
- + bp->free_buf_cb(phys_to_virt(addr));
- + }
- + } while (ret > 0);
- +}
- +EXPORT_SYMBOL(dpa_bp_drain);
- +
- +static void __cold __attribute__((nonnull))
- +_dpa_bp_free(struct dpa_bp *dpa_bp)
- +{
- + struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
- +
- + /* the mapping between bpid and dpa_bp is done very late in the
- + * allocation procedure; if something failed before the mapping, the bp
- + * was not configured, therefore we don't need the below instructions
- + */
- + if (!bp)
- + return;
- +
- + if (!atomic_dec_and_test(&bp->refs))
- + return;
- +
- + if (bp->free_buf_cb)
- + dpa_bp_drain(bp);
- +
- + dpa_bp_array[bp->bpid] = NULL;
- + bman_free_pool(bp->pool);
- +
- + if (bp->dev)
- + platform_device_unregister(to_platform_device(bp->dev));
- +}
- +
- +void __cold __attribute__((nonnull))
- +dpa_bp_free(struct dpa_priv_s *priv)
- +{
- + int i;
- +
- + for (i = 0; i < priv->bp_count; i++)
- + _dpa_bp_free(&priv->dpa_bp[i]);
- +}
- +EXPORT_SYMBOL(dpa_bp_free);
- +
- +struct dpa_bp *dpa_bpid2pool(int bpid)
- +{
- + return dpa_bp_array[bpid];
- +}
- +EXPORT_SYMBOL(dpa_bpid2pool);
- +
- +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
- +{
- + dpa_bp_array[bpid] = dpa_bp;
- + atomic_set(&dpa_bp->refs, 1);
- +}
- +
- +bool dpa_bpid2pool_use(int bpid)
- +{
- + if (dpa_bpid2pool(bpid)) {
- + atomic_inc(&dpa_bp_array[bpid]->refs);
- + return true;
- + }
- +
- + return false;
- +}
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
- + void *accel_priv, select_queue_fallback_t fallback)
- +{
- + return dpa_get_queue_mapping(skb);
- +}
- +EXPORT_SYMBOL(dpa_select_queue);
- +#endif
- +
- +struct dpa_fq *dpa_fq_alloc(struct device *dev,
- + u32 fq_start,
- + u32 fq_count,
- + struct list_head *list,
- + enum dpa_fq_type fq_type)
- +{
- + int i;
- + struct dpa_fq *dpa_fq;
- +
- + dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
- + if (dpa_fq == NULL)
- + return NULL;
- +
- + for (i = 0; i < fq_count; i++) {
- + dpa_fq[i].fq_type = fq_type;
- + if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
- + dpa_fq[i].fqid = fq_start ?
- + DPAA_ETH_FQ_DELTA + fq_start + i : 0;
- + else
- + dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
- +
- + list_add_tail(&dpa_fq[i].list, list);
- + }
- +
- +#ifdef CONFIG_FMAN_PFC
- + if (fq_type == FQ_TYPE_TX)
- + for (i = 0; i < fq_count; i++)
- + dpa_fq[i].wq = i / dpa_num_cpus;
- + else
- +#endif
- + for (i = 0; i < fq_count; i++)
- + _dpa_assign_wq(dpa_fq + i);
- +
- + return dpa_fq;
- +}
- +EXPORT_SYMBOL(dpa_fq_alloc);
- +
- +/* Probing of FQs for MACful ports */
- +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
- + struct fm_port_fqs *port_fqs,
- + bool alloc_tx_conf_fqs,
- + enum port_type ptype)
- +{
- + struct fqid_cell *fqids = NULL;
- + const void *fqids_off = NULL;
- + struct dpa_fq *dpa_fq = NULL;
- + struct device_node *np = dev->of_node;
- + int num_ranges;
- + int i, lenp;
- +
- + if (ptype == TX && alloc_tx_conf_fqs) {
- + if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
- + tx_confirm_fqids->count, list,
- + FQ_TYPE_TX_CONF_MQ))
- + goto fq_alloc_failed;
- + }
- +
- + fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
- + if (fqids_off == NULL) {
- + /* No dts definition, so use the defaults. */
- + fqids = default_fqids[ptype];
- + num_ranges = 3;
- + } else {
- + num_ranges = lenp / sizeof(*fqids);
- +
- + fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
- + GFP_KERNEL);
- + if (fqids == NULL)
- + goto fqids_alloc_failed;
- +
- + /* convert to CPU endianess */
- + for (i = 0; i < num_ranges; i++) {
- + fqids[i].start = be32_to_cpup(fqids_off +
- + i * sizeof(*fqids));
- + fqids[i].count = be32_to_cpup(fqids_off +
- + i * sizeof(*fqids) + sizeof(__be32));
- + }
- + }
- +
- + for (i = 0; i < num_ranges; i++) {
- + switch (i) {
- + case 0:
- + /* The first queue is the error queue */
- + if (fqids[i].count != 1)
- + goto invalid_error_queue;
- +
- + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
- + fqids[i].count, list,
- + ptype == RX ?
- + FQ_TYPE_RX_ERROR :
- + FQ_TYPE_TX_ERROR);
- + if (dpa_fq == NULL)
- + goto fq_alloc_failed;
- +
- + if (ptype == RX)
- + port_fqs->rx_errq = &dpa_fq[0];
- + else
- + port_fqs->tx_errq = &dpa_fq[0];
- + break;
- + case 1:
- + /* the second queue is the default queue */
- + if (fqids[i].count != 1)
- + goto invalid_default_queue;
- +
- + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
- + fqids[i].count, list,
- + ptype == RX ?
- + FQ_TYPE_RX_DEFAULT :
- + FQ_TYPE_TX_CONFIRM);
- + if (dpa_fq == NULL)
- + goto fq_alloc_failed;
- +
- + if (ptype == RX)
- + port_fqs->rx_defq = &dpa_fq[0];
- + else
- + port_fqs->tx_defq = &dpa_fq[0];
- + break;
- + default:
- + /* all subsequent queues are either RX* PCD or Tx */
- + if (ptype == RX) {
- + if (!dpa_fq_alloc(dev, fqids[i].start,
- + fqids[i].count, list,
- + FQ_TYPE_RX_PCD) ||
- + !dpa_fq_alloc(dev, fqids[i].start,
- + fqids[i].count, list,
- + FQ_TYPE_RX_PCD_HI_PRIO))
- + goto fq_alloc_failed;
- + } else {
- + if (!dpa_fq_alloc(dev, fqids[i].start,
- + fqids[i].count, list,
- + FQ_TYPE_TX))
- + goto fq_alloc_failed;
- + }
- + break;
- + }
- + }
- +
- + return 0;
- +
- +fq_alloc_failed:
- +fqids_alloc_failed:
- + dev_err(dev, "Cannot allocate memory for frame queues\n");
- + return -ENOMEM;
- +
- +invalid_default_queue:
- +invalid_error_queue:
- + dev_err(dev, "Too many default or error queues\n");
- + return -EINVAL;
- +}
- +EXPORT_SYMBOL(dpa_fq_probe_mac);
- +
- +static u32 rx_pool_channel;
- +static DEFINE_SPINLOCK(rx_pool_channel_init);
- +
- +int dpa_get_channel(void)
- +{
- + spin_lock(&rx_pool_channel_init);
- + if (!rx_pool_channel) {
- + u32 pool;
- + int ret = qman_alloc_pool(&pool);
- + if (!ret)
- + rx_pool_channel = pool;
- + }
- + spin_unlock(&rx_pool_channel_init);
- + if (!rx_pool_channel)
- + return -ENOMEM;
- + return rx_pool_channel;
- +}
- +EXPORT_SYMBOL(dpa_get_channel);
- +
- +void dpa_release_channel(void)
- +{
- + qman_release_pool(rx_pool_channel);
- +}
- +EXPORT_SYMBOL(dpa_release_channel);
- +
- +int dpaa_eth_add_channel(void *__arg)
- +{
- + const cpumask_t *cpus = qman_affine_cpus();
- + u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
- + int cpu;
- + struct qman_portal *portal;
- +
- + for_each_cpu(cpu, cpus) {
- + portal = (struct qman_portal *)qman_get_affine_portal(cpu);
- + qman_p_static_dequeue_add(portal, pool);
- + }
- + return 0;
- +}
- +EXPORT_SYMBOL(dpaa_eth_add_channel);
- +
- +/**
- + * Congestion group state change notification callback.
- + * Stops the device's egress queues while they are congested and
- + * wakes them upon exiting congested state.
- + * Also updates some CGR-related stats.
- + */
- +static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
- +
- + int congested)
- +{
- + struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
- + struct dpa_priv_s, cgr_data.cgr);
- +
- + if (congested) {
- + priv->cgr_data.congestion_start_jiffies = jiffies;
- + netif_tx_stop_all_queues(priv->net_dev);
- + priv->cgr_data.cgr_congested_count++;
- + } else {
- + priv->cgr_data.congested_jiffies +=
- + (jiffies - priv->cgr_data.congestion_start_jiffies);
- + netif_tx_wake_all_queues(priv->net_dev);
- + }
- +}
- +
- +int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
- +{
- + struct qm_mcc_initcgr initcgr;
- + u32 cs_th;
- + int err;
- +
- + err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
- + if (err < 0) {
- + pr_err("Error %d allocating CGR ID\n", err);
- + goto out_error;
- + }
- + priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
- +
- + /* Enable Congestion State Change Notifications and CS taildrop */
- + initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
- + initcgr.cgr.cscn_en = QM_CGR_EN;
- +
- + /* Set different thresholds based on the MAC speed.
- + * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
- + * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- + * In such cases, we ought to reconfigure the threshold, too.
- + */
- + if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
- + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
- + else
- + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
- + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- +
- + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
- + initcgr.cgr.cstd_en = QM_CGR_EN;
- +
- + err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
- + &initcgr);
- + if (err < 0) {
- + pr_err("Error %d creating CGR with ID %d\n", err,
- + priv->cgr_data.cgr.cgrid);
- + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
- + goto out_error;
- + }
- + pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
- + priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
- + priv->cgr_data.cgr.chan);
- +
- +out_error:
- + return err;
- +}
- +EXPORT_SYMBOL(dpaa_eth_cgr_init);
- +
- +static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
- + struct dpa_fq *fq,
- + const struct qman_fq *template)
- +{
- + fq->fq_base = *template;
- + fq->net_dev = priv->net_dev;
- +
- + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
- + fq->channel = priv->channel;
- +}
- +
- +static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
- + struct dpa_fq *fq,
- + struct fm_port *port,
- + const struct qman_fq *template)
- +{
- + fq->fq_base = *template;
- + fq->net_dev = priv->net_dev;
- +
- + if (port) {
- + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
- + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
- + } else {
- + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
- + }
- +}
- +
- +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
- + struct fm_port *tx_port)
- +{
- + struct dpa_fq *fq;
- + uint16_t portals[NR_CPUS];
- + int cpu, portal_cnt = 0, num_portals = 0;
- + uint32_t pcd_fqid, pcd_fqid_hi_prio;
- + const cpumask_t *affine_cpus = qman_affine_cpus();
- + int egress_cnt = 0, conf_cnt = 0;
- +
- + /* Prepare for PCD FQs init */
- + for_each_cpu(cpu, affine_cpus)
- + portals[num_portals++] = qman_affine_channel(cpu);
- + if (num_portals == 0)
- + dev_err(priv->net_dev->dev.parent,
- + "No Qman software (affine) channels found");
- +
- + pcd_fqid = (priv->mac_dev) ?
- + DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
- + pcd_fqid_hi_prio = (priv->mac_dev) ?
- + DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
- +
- + /* Initialize each FQ in the list */
- + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- + switch (fq->fq_type) {
- + case FQ_TYPE_RX_DEFAULT:
- + BUG_ON(!priv->mac_dev);
- + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
- + break;
- + case FQ_TYPE_RX_ERROR:
- + BUG_ON(!priv->mac_dev);
- + dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
- + break;
- + case FQ_TYPE_RX_PCD:
- + /* For MACless we can't have dynamic Rx queues */
- + BUG_ON(!priv->mac_dev && !fq->fqid);
- + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
- + if (!fq->fqid)
- + fq->fqid = pcd_fqid++;
- + fq->channel = portals[portal_cnt];
- + portal_cnt = (portal_cnt + 1) % num_portals;
- + break;
- + case FQ_TYPE_RX_PCD_HI_PRIO:
- + /* For MACless we can't have dynamic Hi Pri Rx queues */
- + BUG_ON(!priv->mac_dev && !fq->fqid);
- + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
- + if (!fq->fqid)
- + fq->fqid = pcd_fqid_hi_prio++;
- + fq->channel = portals[portal_cnt];
- + portal_cnt = (portal_cnt + 1) % num_portals;
- + break;
- + case FQ_TYPE_TX:
- + dpa_setup_egress(priv, fq, tx_port,
- + &fq_cbs->egress_ern);
- + /* If we have more Tx queues than the number of cores,
- + * just ignore the extra ones.
- + */
- + if (egress_cnt < DPAA_ETH_TX_QUEUES)
- + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- + break;
- + case FQ_TYPE_TX_CONFIRM:
- + BUG_ON(!priv->mac_dev);
- + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
- + break;
- + case FQ_TYPE_TX_CONF_MQ:
- + BUG_ON(!priv->mac_dev);
- + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
- + priv->conf_fqs[conf_cnt++] = &fq->fq_base;
- + break;
- + case FQ_TYPE_TX_ERROR:
- + BUG_ON(!priv->mac_dev);
- + dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
- + break;
- + default:
- + dev_warn(priv->net_dev->dev.parent,
- + "Unknown FQ type detected!\n");
- + break;
- + }
- + }
- +
- + /* The number of Tx queues may be smaller than the number of cores, if
- + * the Tx queue range is specified in the device tree instead of being
- + * dynamically allocated.
- + * Make sure all CPUs receive a corresponding Tx queue.
- + */
- + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
- + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- + if (fq->fq_type != FQ_TYPE_TX)
- + continue;
- + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- + if (egress_cnt == DPAA_ETH_TX_QUEUES)
- + break;
- + }
- + }
- +}
- +EXPORT_SYMBOL(dpa_fq_setup);
- +
- +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
- +{
- + int _errno;
- + const struct dpa_priv_s *priv;
- + struct device *dev;
- + struct qman_fq *fq;
- + struct qm_mcc_initfq initfq;
- + struct qman_fq *confq;
- + int queue_id;
- +
- + priv = netdev_priv(dpa_fq->net_dev);
- + dev = dpa_fq->net_dev->dev.parent;
- +
- + if (dpa_fq->fqid == 0)
- + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
- +
- + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
- +
- + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
- + if (_errno) {
- + dev_err(dev, "qman_create_fq() failed\n");
- + return _errno;
- + }
- + fq = &dpa_fq->fq_base;
- +
- + if (dpa_fq->init) {
- + memset(&initfq, 0, sizeof(initfq));
- +
- + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
- + /* FIXME: why would we want to keep an empty FQ in cache? */
- + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
- +
- + /* Try to reduce the number of portal interrupts for
- + * Tx Confirmation FQs.
- + */
- + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
- +
- + /* FQ placement */
- + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
- +
- + initfq.fqd.dest.channel = dpa_fq->channel;
- + initfq.fqd.dest.wq = dpa_fq->wq;
- +
- + /* Put all egress queues in a congestion group of their own.
- + * Sensu stricto, the Tx confirmation queues are Rx FQs,
- + * rather than Tx - but they nonetheless account for the
- + * memory footprint on behalf of egress traffic. We therefore
- + * place them in the netdev's CGR, along with the Tx FQs.
- + */
- + if (dpa_fq->fq_type == FQ_TYPE_TX ||
- + dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
- + dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
- + initfq.we_mask |= QM_INITFQ_WE_CGID;
- + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- + initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
- + /* Set a fixed overhead accounting, in an attempt to
- + * reduce the impact of fixed-size skb shells and the
- + * driver's needed headroom on system memory. This is
- + * especially the case when the egress traffic is
- + * composed of small datagrams.
- + * Unfortunately, QMan's OAL value is capped to an
- + * insufficient value, but even that is better than
- + * no overhead accounting at all.
- + */
- + initfq.we_mask |= QM_INITFQ_WE_OAC;
- + initfq.fqd.oac_init.oac = QM_OAC_CG;
- + initfq.fqd.oac_init.oal =
- + (signed char)(min(sizeof(struct sk_buff) +
- + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
- + }
- +
- + if (td_enable) {
- + initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
- + qm_fqd_taildrop_set(&initfq.fqd.td,
- + DPA_FQ_TD, 1);
- + initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
- + }
- +
- + /* Configure the Tx confirmation queue, now that we know
- + * which Tx queue it pairs with.
- + */
- + if (dpa_fq->fq_type == FQ_TYPE_TX) {
- + queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
- + if (queue_id >= 0) {
- + confq = priv->conf_fqs[queue_id];
- + if (confq) {
- + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- + /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
- + * A2V=1 (contextA A2 field is valid)
- + * A0V=1 (contextA A0 field is valid)
- + * B0V=1 (contextB field is valid)
- + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
- + * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
- + */
- + initfq.fqd.context_a.hi = 0x1e000000;
- + initfq.fqd.context_a.lo = 0x80000000;
- + }
- + }
- + }
- +
- + /* Put all *private* ingress queues in our "ingress CGR". */
- + if (priv->use_ingress_cgr &&
- + (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
- + dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
- + dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
- + dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
- + initfq.we_mask |= QM_INITFQ_WE_CGID;
- + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- + initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
- + /* Set a fixed overhead accounting, just like for the
- + * egress CGR.
- + */
- + initfq.we_mask |= QM_INITFQ_WE_OAC;
- + initfq.fqd.oac_init.oac = QM_OAC_CG;
- + initfq.fqd.oac_init.oal =
- + (signed char)(min(sizeof(struct sk_buff) +
- + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
- + }
- +
- + /* Initialization common to all ingress queues */
- + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- + initfq.fqd.fq_ctrl |=
- + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
- + initfq.fqd.context_a.stashing.exclusive =
- + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
- + QM_STASHING_EXCL_ANNOTATION;
- + initfq.fqd.context_a.stashing.data_cl = 2;
- + initfq.fqd.context_a.stashing.annotation_cl = 1;
- + initfq.fqd.context_a.stashing.context_cl =
- + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
- + }
- +
- + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
- + if (_errno < 0) {
- + dev_err(dev, "qman_init_fq(%u) = %d\n",
- + qman_fq_fqid(fq), _errno);
- + qman_destroy_fq(fq, 0);
- + return _errno;
- + }
- + }
- +
- + dpa_fq->fqid = qman_fq_fqid(fq);
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_fq_init);
- +
- +int __cold __attribute__((nonnull))
- +_dpa_fq_free(struct device *dev, struct qman_fq *fq)
- +{
- + int _errno, __errno;
- + struct dpa_fq *dpa_fq;
- + const struct dpa_priv_s *priv;
- +
- + _errno = 0;
- +
- + dpa_fq = container_of(fq, struct dpa_fq, fq_base);
- + priv = netdev_priv(dpa_fq->net_dev);
- +
- + if (dpa_fq->init) {
- + _errno = qman_retire_fq(fq, NULL);
- + if (unlikely(_errno < 0) && netif_msg_drv(priv))
- + dev_err(dev, "qman_retire_fq(%u) = %d\n",
- + qman_fq_fqid(fq), _errno);
- +
- + __errno = qman_oos_fq(fq);
- + if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
- + dev_err(dev, "qman_oos_fq(%u) = %d\n",
- + qman_fq_fqid(fq), __errno);
- + if (_errno >= 0)
- + _errno = __errno;
- + }
- + }
- +
- + qman_destroy_fq(fq, 0);
- + list_del(&dpa_fq->list);
- +
- + return _errno;
- +}
- +EXPORT_SYMBOL(_dpa_fq_free);
- +
- +int __cold __attribute__((nonnull))
- +dpa_fq_free(struct device *dev, struct list_head *list)
- +{
- + int _errno, __errno;
- + struct dpa_fq *dpa_fq, *tmp;
- +
- + _errno = 0;
- + list_for_each_entry_safe(dpa_fq, tmp, list, list) {
- + __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
- + if (unlikely(__errno < 0) && _errno >= 0)
- + _errno = __errno;
- + }
- +
- + return _errno;
- +}
- +EXPORT_SYMBOL(dpa_fq_free);
- +
- +static void
- +dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
- + struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
- +{
- + struct fm_port_params tx_port_param;
- + bool frag_enabled = false;
- +
- + memset(&tx_port_param, 0, sizeof(tx_port_param));
- + dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
- + buf_layout, frag_enabled);
- +}
- +
- +static void
- +dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
- + struct dpa_fq *errq, struct dpa_fq *defq,
- + struct dpa_buffer_layout_s *buf_layout)
- +{
- + struct fm_port_params rx_port_param;
- + int i;
- + bool frag_enabled = false;
- +
- + memset(&rx_port_param, 0, sizeof(rx_port_param));
- + count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
- + rx_port_param.num_pools = (uint8_t)count;
- + for (i = 0; i < count; i++) {
- + if (i >= rx_port_param.num_pools)
- + break;
- + rx_port_param.pool_param[i].id = bp[i].bpid;
- + rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
- + }
- +
- + dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
- + buf_layout, frag_enabled);
- +}
- +
- +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
- +/* Defined as weak, to be implemented by fman pcd tester. */
- +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
- +__attribute__((weak));
- +
- +int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
- +#else
- +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
- +
- +int dpa_free_pcd_fqids(struct device *, uint32_t);
- +
- +#endif /* CONFIG_FSL_SDK_FMAN_TEST */
- +
- +
- +int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
- + uint8_t alignment, uint32_t *base_fqid)
- +{
- + dev_crit(dev, "callback not implemented!\n");
- +
- + return 0;
- +}
- +
- +int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
- +{
- +
- + dev_crit(dev, "callback not implemented!\n");
- +
- + return 0;
- +}
- +
- +void dpaa_eth_init_ports(struct mac_device *mac_dev,
- + struct dpa_bp *bp, size_t count,
- + struct fm_port_fqs *port_fqs,
- + struct dpa_buffer_layout_s *buf_layout,
- + struct device *dev)
- +{
- + struct fm_port_pcd_param rx_port_pcd_param;
- + struct fm_port *rxport = mac_dev->port_dev[RX];
- + struct fm_port *txport = mac_dev->port_dev[TX];
- +
- + dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
- + port_fqs->tx_defq, &buf_layout[TX]);
- + dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
- + port_fqs->rx_defq, &buf_layout[RX]);
- +
- + rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
- + rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
- + rx_port_pcd_param.dev = dev;
- + fm_port_pcd_bind(rxport, &rx_port_pcd_param);
- +}
- +EXPORT_SYMBOL(dpaa_eth_init_ports);
- +
- +void dpa_release_sgt(struct qm_sg_entry *sgt)
- +{
- + struct dpa_bp *dpa_bp;
- + struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
- + uint8_t i = 0, j;
- +
- + memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
- +
- + do {
- + dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
- + DPA_BUG_ON(!dpa_bp);
- +
- + j = 0;
- + do {
- + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
- + bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
- +
- + j++; i++;
- + } while (j < ARRAY_SIZE(bmb) &&
- + !qm_sg_entry_get_final(&sgt[i-1]) &&
- + qm_sg_entry_get_bpid(&sgt[i-1]) ==
- + qm_sg_entry_get_bpid(&sgt[i]));
- +
- + while (bman_release(dpa_bp->pool, bmb, j, 0))
- + cpu_relax();
- + } while (!qm_sg_entry_get_final(&sgt[i-1]));
- +}
- +EXPORT_SYMBOL(dpa_release_sgt);
- +
- +void __attribute__((nonnull))
- +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
- +{
- + struct qm_sg_entry *sgt;
- + struct dpa_bp *dpa_bp;
- + struct bm_buffer bmb;
- + dma_addr_t addr;
- + void *vaddr;
- +
- + memset(&bmb, 0, sizeof(struct bm_buffer));
- + bm_buffer_set64(&bmb, fd->addr);
- +
- + dpa_bp = dpa_bpid2pool(fd->bpid);
- + DPA_BUG_ON(!dpa_bp);
- +
- + if (fd->format == qm_fd_sg) {
- + vaddr = phys_to_virt(fd->addr);
- + sgt = vaddr + dpa_fd_offset(fd);
- +
- + dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
- + DMA_BIDIRECTIONAL);
- +
- + dpa_release_sgt(sgt);
- + addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
- + DMA_BIDIRECTIONAL);
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + dev_err(dpa_bp->dev, "DMA mapping failed");
- + return;
- + }
- + bm_buffer_set64(&bmb, addr);
- + }
- +
- + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
- + cpu_relax();
- +}
- +EXPORT_SYMBOL(dpa_fd_release);
- +
- +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_mr_entry *msg)
- +{
- + switch (msg->ern.rc & QM_MR_RC_MASK) {
- + case QM_MR_RC_CGR_TAILDROP:
- + percpu_priv->ern_cnt.cg_tdrop++;
- + break;
- + case QM_MR_RC_WRED:
- + percpu_priv->ern_cnt.wred++;
- + break;
- + case QM_MR_RC_ERROR:
- + percpu_priv->ern_cnt.err_cond++;
- + break;
- + case QM_MR_RC_ORPWINDOW_EARLY:
- + percpu_priv->ern_cnt.early_window++;
- + break;
- + case QM_MR_RC_ORPWINDOW_LATE:
- + percpu_priv->ern_cnt.late_window++;
- + break;
- + case QM_MR_RC_FQ_TAILDROP:
- + percpu_priv->ern_cnt.fq_tdrop++;
- + break;
- + case QM_MR_RC_ORPWINDOW_RETIRED:
- + percpu_priv->ern_cnt.fq_retired++;
- + break;
- + case QM_MR_RC_ORP_ZERO:
- + percpu_priv->ern_cnt.orp_zero++;
- + break;
- + }
- +}
- +EXPORT_SYMBOL(count_ern);
- +
- +/**
- + * Turn on HW checksum computation for this outgoing frame.
- + * If the current protocol is not something we support in this regard
- + * (or if the stack has already computed the SW checksum), we do nothing.
- + *
- + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- + * otherwise.
- + *
- + * Note that this function may modify the fd->cmd field and the skb data buffer
- + * (the Parse Results area).
- + */
- +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
- +{
- + fm_prs_result_t *parse_result;
- + struct iphdr *iph;
- + struct ipv6hdr *ipv6h = NULL;
- + u8 l4_proto;
- + u16 ethertype = ntohs(skb->protocol);
- + int retval = 0;
- +
- + if (skb->ip_summed != CHECKSUM_PARTIAL)
- + return 0;
- +
- + /* Note: L3 csum seems to be already computed in sw, but we can't choose
- + * L4 alone from the FM configuration anyway.
- + */
- +
- + /* Fill in some fields of the Parse Results array, so the FMan
- + * can find them as if they came from the FMan Parser.
- + */
- + parse_result = (fm_prs_result_t *)parse_results;
- +
- + /* If we're dealing with VLAN, get the real Ethernet type */
- + if (ethertype == ETH_P_8021Q) {
- + /* We can't always assume the MAC header is set correctly
- + * by the stack, so reset to beginning of skb->data
- + */
- + skb_reset_mac_header(skb);
- + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
- + }
- +
- + /* Fill in the relevant L3 parse result fields
- + * and read the L4 protocol type
- + */
- + switch (ethertype) {
- + case ETH_P_IP:
- + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
- + iph = ip_hdr(skb);
- + DPA_BUG_ON(iph == NULL);
- + l4_proto = iph->protocol;
- + break;
- + case ETH_P_IPV6:
- + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
- + ipv6h = ipv6_hdr(skb);
- + DPA_BUG_ON(ipv6h == NULL);
- + l4_proto = ipv6h->nexthdr;
- + break;
- + default:
- + /* We shouldn't even be here */
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_alert(priv->net_dev,
- + "Can't compute HW csum for L3 proto 0x%x\n",
- + ntohs(skb->protocol));
- + retval = -EIO;
- + goto return_error;
- + }
- +
- + /* Fill in the relevant L4 parse result fields */
- + switch (l4_proto) {
- + case IPPROTO_UDP:
- + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
- + break;
- + case IPPROTO_TCP:
- + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
- + break;
- + default:
- + /* This can as well be a BUG() */
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_alert(priv->net_dev,
- + "Can't compute HW csum for L4 proto 0x%x\n",
- + l4_proto);
- + retval = -EIO;
- + goto return_error;
- + }
- +
- + /* At index 0 is IPOffset_1 as defined in the Parse Results */
- + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
- + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
- +
- + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
- +
- + /* On P1023 and similar platforms fd->cmd interpretation could
- + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
- + * is not set so we do not need to check; in the future, if/when
- + * using context_a we need to check this bit
- + */
- +
- +return_error:
- + return retval;
- +}
- +EXPORT_SYMBOL(dpa_enable_tx_csum);
- +
- +#ifdef CONFIG_FSL_DPAA_CEETM
- +void dpa_enable_ceetm(struct net_device *dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(dev);
- + priv->ceetm_en = true;
- +}
- +EXPORT_SYMBOL(dpa_enable_ceetm);
- +
- +void dpa_disable_ceetm(struct net_device *dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(dev);
- + priv->ceetm_en = false;
- +}
- +EXPORT_SYMBOL(dpa_disable_ceetm);
- +#endif
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
- @@ -0,0 +1,227 @@
- +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __DPAA_ETH_COMMON_H
- +#define __DPAA_ETH_COMMON_H
- +
- +#include <linux/etherdevice.h> /* struct net_device */
- +#include <linux/fsl_bman.h> /* struct bm_buffer */
- +#include <linux/of_platform.h> /* struct platform_device */
- +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
- +
- +#include "dpaa_eth.h"
- +#include "lnxwrp_fsl_fman.h"
- +
- +#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
- + frag_enabled) \
- +{ \
- + param.errq = errq_id; \
- + param.defq = defq_id; \
- + param.priv_data_size = buf_layout->priv_data_size; \
- + param.parse_results = buf_layout->parse_results; \
- + param.hash_results = buf_layout->hash_results; \
- + param.frag_enable = frag_enabled; \
- + param.time_stamp = buf_layout->time_stamp; \
- + param.manip_extra_space = buf_layout->manip_extra_space; \
- + param.data_align = buf_layout->data_align; \
- + fm_set_##type##_port_params(port, ¶m); \
- +}
- +
- +#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
- +
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- +/* each S/G entry can be divided into two S/G entries */
- +#define DPA_SGT_ENTRIES_THRESHOLD 7
- +#else
- +#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
- +#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
- +
- +
- +#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
- +
- +/* return codes for the dpaa-eth hooks */
- +enum dpaa_eth_hook_result {
- + /* fd/skb was retained by the hook.
- + *
- + * On the Rx path, this means the Ethernet driver will _not_
- + * deliver the skb to the stack. Instead, the hook implementation
- + * is expected to properly dispose of the skb.
- + *
- + * On the Tx path, the Ethernet driver's dpa_tx() function will
- + * immediately return NETDEV_TX_OK. The hook implementation is expected
- + * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
- + * unless you know exactly what you're doing!
- + *
- + * On the confirmation/error paths, the Ethernet driver will _not_
- + * perform any fd cleanup, nor update the interface statistics.
- + */
- + DPAA_ETH_STOLEN,
- + /* fd/skb was returned to the Ethernet driver for regular processing.
- + * The hook is not allowed to, for instance, reallocate the skb (as if
- + * by linearizing, copying, cloning or reallocating the headroom).
- + */
- + DPAA_ETH_CONTINUE
- +};
- +
- +typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
- + struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
- +typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
- + struct sk_buff *skb, struct net_device *net_dev);
- +typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
- + struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
- +
- +/* used in napi related functions */
- +extern u16 qman_portal_max;
- +
- +/* from dpa_ethtool.c */
- +extern const struct ethtool_ops dpa_ethtool_ops;
- +
- +#ifdef CONFIG_FSL_DPAA_HOOKS
- +/* Various hooks used for unit-testing and/or fastpath optimizations.
- + * Currently only one set of such hooks is supported.
- + */
- +struct dpaa_eth_hooks_s {
- + /* Invoked on the Tx private path, immediately after receiving the skb
- + * from the stack.
- + */
- + dpaa_eth_egress_hook_t tx;
- +
- + /* Invoked on the Rx private path, right before passing the skb
- + * up the stack. At that point, the packet's protocol id has already
- + * been set. The skb's data pointer is now at the L3 header, and
- + * skb->mac_header points to the L2 header. skb->len has been adjusted
- + * to be the length of L3+payload (i.e., the length of the
- + * original frame minus the L2 header len).
- + * For more details on what the skb looks like, see eth_type_trans().
- + */
- + dpaa_eth_ingress_hook_t rx_default;
- +
- + /* Driver hook for the Rx error private path. */
- + dpaa_eth_confirm_hook_t rx_error;
- + /* Driver hook for the Tx confirmation private path. */
- + dpaa_eth_confirm_hook_t tx_confirm;
- + /* Driver hook for the Tx error private path. */
- + dpaa_eth_confirm_hook_t tx_error;
- +};
- +
- +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
- +
- +extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
- +#endif
- +
- +int dpa_netdev_init(struct net_device *net_dev,
- + const uint8_t *mac_addr,
- + uint16_t tx_timeout);
- +int __cold dpa_start(struct net_device *net_dev);
- +int __cold dpa_stop(struct net_device *net_dev);
- +void __cold dpa_timeout(struct net_device *net_dev);
- +struct rtnl_link_stats64 * __cold
- +dpa_get_stats64(struct net_device *net_dev,
- + struct rtnl_link_stats64 *stats);
- +int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
- +int dpa_ndo_init(struct net_device *net_dev);
- +int dpa_set_features(struct net_device *dev, netdev_features_t features);
- +netdev_features_t dpa_fix_features(struct net_device *dev,
- + netdev_features_t features);
- +#ifdef CONFIG_FSL_DPAA_TS
- +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
- + enum port_type rx_tx, const void *data);
- +/* Updates the skb shared hw timestamp from the hardware timestamp */
- +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
- + struct skb_shared_hwtstamps *shhwtstamps, const void *data);
- +#endif /* CONFIG_FSL_DPAA_TS */
- +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
- +int __cold dpa_remove(struct platform_device *of_dev);
- +struct mac_device * __cold __must_check
- +__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
- +int dpa_set_mac_address(struct net_device *net_dev, void *addr);
- +void dpa_set_rx_mode(struct net_device *net_dev);
- +void dpa_set_buffers_layout(struct mac_device *mac_dev,
- + struct dpa_buffer_layout_s *layout);
- +int __attribute__((nonnull))
- +dpa_bp_alloc(struct dpa_bp *dpa_bp);
- +void __cold __attribute__((nonnull))
- +dpa_bp_free(struct dpa_priv_s *priv);
- +struct dpa_bp *dpa_bpid2pool(int bpid);
- +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
- +bool dpa_bpid2pool_use(int bpid);
- +void dpa_bp_drain(struct dpa_bp *bp);
- +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
- + void *accel_priv, select_queue_fallback_t fallback);
- +#endif
- +struct dpa_fq *dpa_fq_alloc(struct device *dev,
- + u32 fq_start,
- + u32 fq_count,
- + struct list_head *list,
- + enum dpa_fq_type fq_type);
- +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
- + struct fm_port_fqs *port_fqs,
- + bool tx_conf_fqs_per_core,
- + enum port_type ptype);
- +int dpa_get_channel(void);
- +void dpa_release_channel(void);
- +int dpaa_eth_add_channel(void *__arg);
- +int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
- +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
- + struct fm_port *tx_port);
- +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
- +int __cold __attribute__((nonnull))
- +dpa_fq_free(struct device *dev, struct list_head *list);
- +void dpaa_eth_init_ports(struct mac_device *mac_dev,
- + struct dpa_bp *bp, size_t count,
- + struct fm_port_fqs *port_fqs,
- + struct dpa_buffer_layout_s *buf_layout,
- + struct device *dev);
- +void dpa_release_sgt(struct qm_sg_entry *sgt);
- +void __attribute__((nonnull))
- +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
- +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_mr_entry *msg);
- +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
- +#ifdef CONFIG_FSL_DPAA_CEETM
- +void dpa_enable_ceetm(struct net_device *dev);
- +void dpa_disable_ceetm(struct net_device *dev);
- +#endif
- +struct proxy_device {
- + struct mac_device *mac_dev;
- +};
- +
- +/* mac device control functions exposed by proxy interface*/
- +int dpa_proxy_start(struct net_device *net_dev);
- +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
- +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
- + struct net_device *net_dev);
- +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
- + struct net_device *net_dev);
- +
- +#endif /* __DPAA_ETH_COMMON_H */
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
- @@ -0,0 +1,1735 @@
- +/* Copyright 2013-2015 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/kthread.h>
- +#include <linux/of_net.h>
- +#include <linux/if_vlan.h>
- +#include <linux/ip.h>
- +#include <linux/ipv6.h>
- +#include <linux/percpu.h>
- +
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#include "dpaa_eth_base.h"
- +#include "dpaa_eth_generic.h"
- +
- +#define DPA_DEFAULT_TX_HEADROOM 64
- +#define DPA_GENERIC_SKB_COPY_MAX_SIZE 256
- +#define DPA_GENERIC_NAPI_WEIGHT 64
- +#define DPA_GENERIC_DESCRIPTION "FSL DPAA Generic Ethernet driver"
- +#define DPA_GENERIC_BUFFER_QUOTA 4
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +MODULE_DESCRIPTION(DPA_GENERIC_DESCRIPTION);
- +
- +static uint8_t generic_debug = -1;
- +module_param(generic_debug, byte, S_IRUGO);
- +MODULE_PARM_DESC(generic_debug, "Module/Driver verbosity level");
- +
- +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
- +static uint16_t tx_timeout = 1000;
- +module_param(tx_timeout, ushort, S_IRUGO);
- +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
- +
- +struct rtnl_link_stats64 *__cold
- +dpa_generic_get_stats64(struct net_device *netdev,
- + struct rtnl_link_stats64 *stats);
- +static int dpa_generic_set_mac_address(struct net_device *net_dev,
- + void *addr);
- +static int __cold dpa_generic_start(struct net_device *netdev);
- +static int __cold dpa_generic_stop(struct net_device *netdev);
- +static int dpa_generic_eth_probe(struct platform_device *_of_dev);
- +static int dpa_generic_remove(struct platform_device *of_dev);
- +static void dpa_generic_ern(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_mr_entry *msg);
- +static int __hot dpa_generic_tx(struct sk_buff *skb,
- + struct net_device *netdev);
- +static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf);
- +static void dpa_generic_drain_sg_bp(struct dpa_bp *sg_bp, u8 nbuf);
- +
- +static const struct net_device_ops dpa_generic_ops = {
- + .ndo_open = dpa_generic_start,
- + .ndo_start_xmit = dpa_generic_tx,
- + .ndo_stop = dpa_generic_stop,
- + .ndo_set_mac_address = dpa_generic_set_mac_address,
- + .ndo_tx_timeout = dpa_timeout,
- + .ndo_get_stats64 = dpa_generic_get_stats64,
- + .ndo_init = dpa_ndo_init,
- + .ndo_set_features = dpa_set_features,
- + .ndo_fix_features = dpa_fix_features,
- + .ndo_change_mtu = dpa_change_mtu,
- +};
- +
- +static void dpa_generic_draining_timer(unsigned long arg)
- +{
- + struct dpa_generic_priv_s *priv = (struct dpa_generic_priv_s *)arg;
- +
- + dpa_generic_drain_bp(priv->draining_tx_bp, DPA_GENERIC_BUFFER_QUOTA);
- + dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp,
- + DPA_GENERIC_BUFFER_QUOTA);
- +
- + if (priv->net_dev->flags & IFF_UP)
- + mod_timer(&(priv->timer), jiffies + 1);
- +}
- +
- +struct rtnl_link_stats64 *__cold
- +dpa_generic_get_stats64(struct net_device *netdev,
- + struct rtnl_link_stats64 *stats)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
- + u64 *cpustats;
- + u64 *netstats = (u64 *)stats;
- + int i, j;
- + struct dpa_percpu_priv_s *percpu_priv;
- + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
- +
- + for_each_online_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + cpustats = (u64 *)&percpu_priv->stats;
- +
- + for (j = 0; j < numstats; j++)
- + netstats[j] += cpustats[j];
- + }
- +
- + return stats;
- +}
- +
- +static int dpa_generic_set_mac_address(struct net_device *net_dev,
- + void *addr)
- +{
- + const struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
- + int _errno;
- +
- + _errno = eth_mac_addr(net_dev, addr);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
- + return _errno;
- + }
- +
- + return 0;
- +}
- +
- +static const struct of_device_id dpa_generic_match[] = {
- + {
- + .compatible = "fsl,dpa-ethernet-generic"
- + },
- + {}
- +};
- +
- +MODULE_DEVICE_TABLE(of, dpa_generic_match);
- +
- +static struct platform_driver dpa_generic_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME,
- + .of_match_table = dpa_generic_match,
- + .owner = THIS_MODULE,
- + },
- + .probe = dpa_generic_eth_probe,
- + .remove = dpa_generic_remove
- +};
- +
- +static int get_port_ref(struct device_node *dev_node,
- + struct fm_port **port)
- +{
- + struct platform_device *port_of_dev = NULL;
- + struct device *op_dev = NULL;
- + struct device_node *port_node = NULL;
- +
- + port_node = of_parse_phandle(dev_node, "fsl,fman-oh-port", 0);
- + if (port_node == NULL)
- + return -EINVAL;
- +
- + port_of_dev = of_find_device_by_node(port_node);
- + of_node_put(port_node);
- +
- + if (port_of_dev == NULL)
- + return -EINVAL;
- +
- + /* get the reference to oh port from FMD */
- + op_dev = &port_of_dev->dev;
- + *port = fm_port_bind(op_dev);
- +
- + if (*port == NULL)
- + return -EINVAL;
- +
- + return 0;
- +}
- +
- +static void dpaa_generic_napi_enable(struct dpa_generic_priv_s *priv)
- +{
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, j;
- +
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + for (j = 0; j < qman_portal_max; j++)
- + napi_enable(&percpu_priv->np[j].napi);
- + }
- +}
- +
- +static void dpaa_generic_napi_disable(struct dpa_generic_priv_s *priv)
- +{
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, j;
- +
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + for (j = 0; j < qman_portal_max; j++)
- + napi_disable(&percpu_priv->np[j].napi);
- + }
- +}
- +
- +static struct device_node *get_rx_op_port_node(struct platform_device *_of_dev)
- +{
- + struct device *dev = &_of_dev->dev;
- + struct device_node *port_node = NULL;
- + struct device_node *onic_node = NULL;
- + int num_ports = 0;
- +
- + onic_node = dev->of_node;
- +
- + num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
- + if (num_ports != 2) {
- + dev_err(dev, "There should be two O/H port handles in the device tree\n");
- + return ERR_PTR(-EINVAL);
- + }
- +
- + port_node = of_parse_phandle(onic_node, "fsl,oh-ports", 0);
- + if (port_node == NULL) {
- + dev_err(dev, "Cannot find O/H port node in the device tree\n");
- + return ERR_PTR(-EFAULT);
- + }
- +
- + return port_node;
- +}
- +
- +static int __cold dpa_generic_start(struct net_device *netdev)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
- +
- + /* seed default buffer pool */
- + dpa_bp_priv_seed(priv->rx_bp);
- +
- + dpaa_generic_napi_enable(priv);
- + netif_tx_start_all_queues(netdev);
- +
- + mod_timer(&priv->timer, jiffies + 100);
- +
- + return 0;
- +}
- +
- +static int __cold dpa_generic_stop(struct net_device *netdev)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
- +
- + netif_tx_stop_all_queues(netdev);
- + dpaa_generic_napi_disable(priv);
- +
- + return 0;
- +}
- +
- +static enum qman_cb_dqrr_result __hot
- +dpa_generic_rx_err_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *netdev;
- + struct dpa_generic_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + const struct qm_fd *fd;
- + int *countptr;
- +
- + netdev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(netdev);
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- + countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
- + fd = &dq->fd;
- +
- + /* TODO: extract bpid from the fd; when multiple bps are supported
- + * there won't be a default bp
- + */
- +
- + if (dpaa_eth_napi_schedule(percpu_priv, portal))
- + return qman_cb_dqrr_stop;
- +
- + if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
- + /* Unable to refill the buffer pool due to insufficient
- + * system memory. Just release the frame back into the pool,
- + * otherwise we'll soon end up with an empty buffer pool.
- + */
- + dpa_fd_release(netdev, fd);
- + goto qman_consume;
- + }
- +
- + /* limit common, possibly innocuous Rx FIFO Overflow errors'
- + * interference with zero-loss convergence benchmark results.
- + */
- + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
- + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
- + else
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_err(netdev, "Err FD status 2 = 0x%08x\n",
- + fd->status & FM_FD_STAT_RX_ERRORS);
- +
- +
- + percpu_priv->stats.rx_errors++;
- +
- + if (fd->status & FM_PORT_FRM_ERR_DMA)
- + percpu_priv->rx_errors.dme++;
- + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
- + percpu_priv->rx_errors.fpe++;
- + if (fd->status & FM_PORT_FRM_ERR_SIZE)
- + percpu_priv->rx_errors.fse++;
- + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
- + percpu_priv->rx_errors.phe++;
- +
- + /* TODO dpa_csum_validation */
- +
- + dpa_fd_release(netdev, fd);
- +
- +qman_consume:
- + return qman_cb_dqrr_consume;
- +}
- +
- +
- +static enum qman_cb_dqrr_result __hot
- +dpa_generic_rx_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *netdev;
- + struct dpa_generic_priv_s *priv;
- + struct dpa_bp *bp;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct sk_buff **skbh;
- + struct sk_buff *skb;
- + const struct qm_fd *fd = &dq->fd;
- + unsigned int skb_len;
- + u32 fd_status = fd->status;
- + u64 pad;
- + dma_addr_t addr = qm_fd_addr(fd);
- + unsigned int data_start;
- + unsigned long skb_addr;
- + int *countptr;
- +
- + netdev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(netdev);
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- + countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
- +
- + /* This is needed for TCP traffic as draining only on TX is not
- + * enough
- + */
- + dpa_generic_drain_bp(priv->draining_tx_bp, 1);
- + dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp, 1);
- +
- + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
- + return qman_cb_dqrr_stop;
- +
- + if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
- + /* Unable to refill the buffer pool due to insufficient
- + * system memory. Just release the frame back into the pool,
- + * otherwise we'll soon end up with an empty buffer pool.
- + */
- + dpa_fd_release(netdev, fd);
- + goto qman_consume;
- + }
- +
- + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), -1);
- +
- + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_warn(netdev, "FD status = 0x%08x\n",
- + fd->status & FM_FD_STAT_RX_ERRORS);
- +
- + percpu_priv->stats.rx_errors++;
- + dpa_fd_release(netdev, fd);
- + goto qman_consume;
- + }
- + if (unlikely(fd->format != qm_fd_contig)) {
- + percpu_priv->stats.rx_dropped++;
- + if (netif_msg_rx_status(priv) && net_ratelimit())
- + netdev_warn(netdev, "Dropping a SG frame\n");
- + dpa_fd_release(netdev, fd);
- + goto qman_consume;
- + }
- +
- + bp = dpa_bpid2pool(fd->bpid);
- +
- + /* find out the pad */
- + skb_addr = virt_to_phys(skb->head);
- + pad = addr - skb_addr;
- +
- + dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
- +
- + countptr = raw_cpu_ptr(bp->percpu_count);
- + (*countptr)--;
- +
- + /* The skb is currently pointed at head + headroom. The packet
- + * starts at skb->head + pad + fd offset.
- + */
- + data_start = (unsigned int)(pad + dpa_fd_offset(fd) -
- + skb_headroom(skb));
- + skb_put(skb, dpa_fd_length(fd) + data_start);
- + skb_pull(skb, data_start);
- + skb->protocol = eth_type_trans(skb, netdev);
- + if (unlikely(dpa_check_rx_mtu(skb, netdev->mtu))) {
- + percpu_priv->stats.rx_dropped++;
- + dev_kfree_skb(skb);
- + goto qman_consume;
- + }
- +
- + skb_len = skb->len;
- +
- + if (fd->status & FM_FD_STAT_L4CV)
- + skb->ip_summed = CHECKSUM_UNNECESSARY;
- + else
- + skb->ip_summed = CHECKSUM_NONE;
- +
- + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
- + goto qman_consume;
- +
- + percpu_priv->stats.rx_packets++;
- + percpu_priv->stats.rx_bytes += skb_len;
- +
- +qman_consume:
- + return qman_cb_dqrr_consume;
- +}
- +
- +static void dpa_generic_drain_sg_bp(struct dpa_bp *sgbp, u8 nbuf)
- +{
- + int ret;
- + struct bm_buffer bmb[8];
- +
- + do {
- + ret = bman_acquire(sgbp->pool, bmb, nbuf, 0);
- + } while (ret >= 0);
- +}
- +
- +inline void dpa_release_sg(struct sk_buff *skb, dma_addr_t addr,
- + struct dpa_bp *bp)
- +{
- + struct qm_sg_entry *sgt = phys_to_virt(addr + DPA_DEFAULT_TX_HEADROOM);
- + int nr_frags = skb_shinfo(skb)->nr_frags;
- + dma_addr_t sg_addr;
- + int j;
- +
- + dma_unmap_single(bp->dev, addr, DPA_DEFAULT_TX_HEADROOM +
- + sizeof(struct qm_sg_entry) * (1 + nr_frags),
- + DMA_BIDIRECTIONAL);
- +
- + for (j = 0; j <= nr_frags; j++) {
- + DPA_BUG_ON(sgt[j].extension);
- + sg_addr = qm_sg_addr(&sgt[j]);
- + dma_unmap_page(bp->dev, sg_addr,
- + sgt[j].length, DMA_BIDIRECTIONAL);
- + }
- +
- + dev_kfree_skb_any(skb);
- +}
- +
- +inline void dpa_release_contig(struct sk_buff *skb, dma_addr_t addr,
- + struct dpa_bp *bp)
- +{
- + dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
- + dev_kfree_skb_any(skb);
- +}
- +
- +static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf)
- +{
- + int ret, i;
- + struct bm_buffer bmb[8];
- + dma_addr_t addr;
- + int *countptr = raw_cpu_ptr(bp->percpu_count);
- + int count = *countptr;
- + struct sk_buff **skbh;
- +
- + do {
- + /* bman_acquire will fail if nbuf > 8 */
- + ret = bman_acquire(bp->pool, bmb, nbuf, 0);
- + if (ret > 0) {
- + for (i = 0; i < nbuf; i++) {
- + addr = bm_buf_addr(&bmb[i]);
- + skbh = (struct sk_buff **)phys_to_virt(addr);
- + dma_unmap_single(bp->dev, addr, bp->size,
- + DMA_TO_DEVICE);
- +
- + if (skb_is_nonlinear(*skbh))
- + dpa_release_sg(*skbh, addr, bp);
- + else
- + dpa_release_contig(*skbh, addr, bp);
- + }
- + count -= i;
- + }
- + } while (ret > 0);
- +
- + *countptr = count;
- +}
- +
- +/**
- + * Turn on HW checksum computation for this outgoing frame.
- + * If the current protocol is not something we support in this regard
- + * (or if the stack has already computed the SW checksum), we do nothing.
- + *
- + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- + * otherwise.
- + *
- + * Note that this function may modify the fd->cmd field and the skb data buffer
- + * (the Parse Results area).
- + */
- +static int dpa_generic_tx_csum(struct dpa_generic_priv_s *priv,
- + struct sk_buff *skb,
- + struct qm_fd *fd,
- + char *parse_results)
- +{
- + fm_prs_result_t *parse_result;
- + struct iphdr *iph;
- + struct ipv6hdr *ipv6h = NULL;
- + int l4_proto;
- + int ethertype = ntohs(skb->protocol);
- + int retval = 0;
- +
- + if (skb->ip_summed != CHECKSUM_PARTIAL)
- + return 0;
- +
- + /* Note: L3 csum seems to be already computed in sw, but we can't choose
- + * L4 alone from the FM configuration anyway.
- + */
- +
- + /* Fill in some fields of the Parse Results array, so the FMan
- + * can find them as if they came from the FMan Parser.
- + */
- + parse_result = (fm_prs_result_t *)parse_results;
- +
- + /* If we're dealing with VLAN, get the real Ethernet type */
- + if (ethertype == ETH_P_8021Q) {
- + /* We can't always assume the MAC header is set correctly
- + * by the stack, so reset to beginning of skb->data
- + */
- + skb_reset_mac_header(skb);
- + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
- + }
- +
- + /* Fill in the relevant L3 parse result fields
- + * and read the L4 protocol type
- + */
- + switch (ethertype) {
- + case ETH_P_IP:
- + parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
- + iph = ip_hdr(skb);
- + BUG_ON(iph == NULL);
- + l4_proto = iph->protocol;
- + break;
- + case ETH_P_IPV6:
- + parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
- + ipv6h = ipv6_hdr(skb);
- + BUG_ON(ipv6h == NULL);
- + l4_proto = ipv6h->nexthdr;
- + break;
- + default:
- + /* We shouldn't even be here */
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_alert(priv->net_dev,
- + "Can't compute HW csum for L3 proto 0x%x\n",
- + ntohs(skb->protocol));
- + retval = -EIO;
- + goto return_error;
- + }
- +
- + /* Fill in the relevant L4 parse result fields */
- + switch (l4_proto) {
- + case IPPROTO_UDP:
- + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
- + break;
- + case IPPROTO_TCP:
- + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
- + break;
- + default:
- + /* This can as well be a BUG() */
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_alert(priv->net_dev,
- + "Can't compute HW csum for L4 proto 0x%x\n",
- + l4_proto);
- + retval = -EIO;
- + goto return_error;
- + }
- +
- + /* At index 0 is IPOffset_1 as defined in the Parse Results */
- + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
- + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
- +
- + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
- +
- + /* On P1023 and similar platforms fd->cmd interpretation could
- + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
- + * is not set so we do not need to check; in the future, if/when
- + * using context_a we need to check this bit
- + */
- +
- +return_error:
- + return retval;
- +}
- +
- +static inline int generic_skb_to_sg_fd(struct dpa_generic_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd)
- +{
- + struct dpa_bp *dpa_bp = priv->draining_tx_bp;
- + struct dpa_bp *dpa_sg_bp = priv->draining_tx_sg_bp;
- + dma_addr_t addr;
- + struct sk_buff **skbh;
- + struct net_device *net_dev = priv->net_dev;
- + int err;
- +
- + struct qm_sg_entry *sgt;
- + void *sgt_buf;
- + void *buffer_start;
- + skb_frag_t *frag;
- + int i, j;
- + const enum dma_data_direction dma_dir = DMA_BIDIRECTIONAL;
- + const int nr_frags = skb_shinfo(skb)->nr_frags;
- +
- + memset(fd, 0, sizeof(*fd));
- + fd->format = qm_fd_sg;
- +
- + /* get a page frag to store the SGTable */
- + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * (1 + nr_frags));
- + if (unlikely(!sgt_buf)) {
- + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
- + return -ENOMEM;
- + }
- +
- + memset(sgt_buf, 0, priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * (1 + nr_frags));
- +
- + /* do this before dma_map_single(DMA_TO_DEVICE), because we may need to
- + * write into the skb.
- + */
- + err = dpa_generic_tx_csum(priv, skb, fd,
- + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
- + if (unlikely(err < 0)) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev, "HW csum error: %d\n", err);
- + goto csum_failed;
- + }
- +
- + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
- + sgt[0].bpid = dpa_sg_bp->bpid;
- + sgt[0].offset = 0;
- + sgt[0].length = skb_headlen(skb);
- + sgt[0].extension = 0;
- + sgt[0].final = 0;
- +
- + addr = dma_map_single(dpa_sg_bp->dev, skb->data, sgt[0].length,
- + dma_dir);
- + if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
- + dev_err(dpa_sg_bp->dev, "DMA mapping failed");
- + err = -EINVAL;
- + goto sg0_map_failed;
- + }
- +
- + sgt[0].addr_hi = (uint8_t)upper_32_bits(addr);
- + sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
- +
- + /* populate the rest of SGT entries */
- + for (i = 1; i <= nr_frags; i++) {
- + frag = &skb_shinfo(skb)->frags[i - 1];
- + sgt[i].bpid = dpa_sg_bp->bpid;
- + sgt[i].offset = 0;
- + sgt[i].length = frag->size;
- + sgt[i].extension = 0;
- + sgt[i].final = 0;
- +
- + DPA_BUG_ON(!skb_frag_page(frag));
- + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
- + dma_dir);
- + if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
- + dev_err(dpa_sg_bp->dev, "DMA mapping failed");
- + err = -EINVAL;
- + goto sg_map_failed;
- + }
- +
- + /* keep the offset in the address */
- + sgt[i].addr_hi = (uint8_t)upper_32_bits(addr);
- + sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
- + }
- + sgt[i - 1].final = 1;
- +
- + fd->length20 = skb->len;
- + fd->offset = priv->tx_headroom;
- +
- + /* DMA map the SGT page */
- + buffer_start = (void *)sgt - dpa_fd_offset(fd);
- + /* Can't write at "negative" offset in buffer_start, because this skb
- + * may not have been allocated by us.
- + */
- + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
- +
- + addr = dma_map_single(dpa_bp->dev, buffer_start,
- + priv->tx_headroom + sizeof(struct qm_sg_entry) * (1 + nr_frags),
- + dma_dir);
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + dev_err(dpa_bp->dev, "DMA mapping failed");
- + err = -EINVAL;
- + goto sgt_map_failed;
- + }
- +
- + fd->bpid = dpa_bp->bpid;
- + fd->addr_hi = (uint8_t)upper_32_bits(addr);
- + fd->addr_lo = lower_32_bits(addr);
- +
- + return 0;
- +
- +sgt_map_failed:
- +sg_map_failed:
- + for (j = 0; j < i; j++)
- + dma_unmap_page(dpa_sg_bp->dev, qm_sg_addr(&sgt[j]),
- + be32_to_cpu(sgt[j].length), dma_dir);
- +sg0_map_failed:
- +csum_failed:
- + put_page(virt_to_head_page(sgt_buf));
- +
- + return err;
- +}
- +
- +static int __hot dpa_generic_tx(struct sk_buff *skb, struct net_device *netdev)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
- + struct dpa_percpu_priv_s *percpu_priv =
- + raw_cpu_ptr(priv->percpu_priv);
- + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
- + struct dpa_bp *bp = priv->draining_tx_bp;
- + struct dpa_bp *sg_bp = priv->draining_tx_sg_bp;
- + struct sk_buff **skbh = NULL;
- + dma_addr_t addr;
- + struct qm_fd fd;
- + int queue_mapping;
- + struct qman_fq *egress_fq;
- + const bool nonlinear = skb_is_nonlinear(skb);
- + int i = 0, err = 0;
- + int *countptr;
- +
- + if (nonlinear && skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES) {
- + err = generic_skb_to_sg_fd(priv, skb, &fd);
- + if (unlikely(err < 0))
- + goto sg_failed;
- + percpu_priv->tx_frag_skbuffs++;
- + addr = qm_fd_addr(&fd);
- + } else {
- + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
- + struct sk_buff *skb_new;
- +
- + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
- + if (unlikely(!skb_new)) {
- + percpu_stats->tx_errors++;
- + kfree_skb(skb);
- + goto done;
- + }
- +
- + kfree_skb(skb);
- + skb = skb_new;
- + }
- +
- + clear_fd(&fd);
- +
- + /* store skb backpointer to release the skb later */
- + skbh = (struct sk_buff **)(skb->data - priv->tx_headroom);
- + *skbh = skb;
- +
- + /* do this before dma_map_single(), because we may need to write
- + * into the skb.
- + */
- + err = dpa_generic_tx_csum(priv, skb, &fd,
- + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
- + if (unlikely(err < 0)) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(netdev, "HW csum error: %d\n", err);
- + return err;
- + }
- +
- + addr = dma_map_single(bp->dev, skbh,
- + skb->len + priv->tx_headroom, DMA_TO_DEVICE);
- + if (unlikely(dma_mapping_error(bp->dev, addr))) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(netdev, "dma_map_single() failed\n");
- + goto dma_mapping_failed;
- + }
- +
- + fd.format = qm_fd_contig;
- + fd.length20 = skb->len;
- + fd.offset = priv->tx_headroom;
- + fd.addr_hi = (uint8_t)upper_32_bits(addr);
- + fd.addr_lo = lower_32_bits(addr);
- + /* fd.cmd |= FM_FD_CMD_FCO; */
- + fd.bpid = bp->bpid;
- + }
- +
- + dpa_generic_drain_bp(bp, 1);
- + dpa_generic_drain_sg_bp(sg_bp, 1);
- +
- + queue_mapping = dpa_get_queue_mapping(skb);
- + egress_fq = priv->egress_fqs[queue_mapping];
- +
- + for (i = 0; i < 100000; i++) {
- + err = qman_enqueue(egress_fq, &fd, 0);
- + if (err != -EBUSY)
- + break;
- + }
- +
- + if (unlikely(err < 0)) {
- + percpu_stats->tx_fifo_errors++;
- + goto xmit_failed;
- + }
- +
- + countptr = raw_cpu_ptr(bp->percpu_count);
- + (*countptr)++;
- +
- + percpu_stats->tx_packets++;
- + percpu_stats->tx_bytes += fd.length20;
- + netdev->trans_start = jiffies;
- +
- + goto done;
- +
- +xmit_failed:
- + dma_unmap_single(bp->dev, addr, fd.offset + fd.length20, DMA_TO_DEVICE);
- +sg_failed:
- +dma_mapping_failed:
- + percpu_stats->tx_errors++;
- + dev_kfree_skb(skb);
- +done:
- + return NETDEV_TX_OK;
- +}
- +
- +static int dpa_generic_napi_add(struct net_device *net_dev)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, cpu;
- +
- + for_each_possible_cpu(cpu) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- +
- + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
- + qman_portal_max * sizeof(struct dpa_napi_portal),
- + GFP_KERNEL);
- +
- + if (unlikely(percpu_priv->np == NULL)) {
- + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + for (i = 0; i < qman_portal_max; i++)
- + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
- + dpaa_eth_poll, DPA_GENERIC_NAPI_WEIGHT);
- + }
- +
- + return 0;
- +}
- +
- +static void dpa_generic_napi_del(struct net_device *net_dev)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
- + struct dpa_percpu_priv_s *percpu_priv;
- + int i, cpu;
- +
- + for_each_possible_cpu(cpu) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- +
- + if (percpu_priv->np) {
- + for (i = 0; i < qman_portal_max; i++)
- + netif_napi_del(&percpu_priv->np[i].napi);
- +
- + devm_kfree(net_dev->dev.parent, percpu_priv->np);
- + }
- + }
- +}
- +
- +
- +static int dpa_generic_netdev_init(struct device_node *dpa_node,
- + struct net_device *netdev)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
- + struct device *dev = netdev->dev.parent;
- + const uint8_t *mac_addr;
- + int err;
- +
- + netdev->netdev_ops = &dpa_generic_ops;
- +
- + mac_addr = of_get_mac_address(dpa_node);
- + if (mac_addr == NULL) {
- + if (netif_msg_probe(priv))
- + dev_err(dev, "No virtual MAC address found!\n");
- + return -EINVAL;
- + }
- +
- + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG;
- + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- + netdev->features |= netdev->hw_features;
- + netdev->vlan_features = netdev->features;
- +
- + memcpy(netdev->perm_addr, mac_addr, netdev->addr_len);
- + memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
- +
- + netdev->ethtool_ops = &dpa_generic_ethtool_ops;
- +
- + netdev->needed_headroom = priv->tx_headroom;
- + netdev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
- +
- + err = register_netdev(netdev);
- + if (err < 0) {
- + dev_err(dev, "register_netdev() = %d\n", err);
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static struct dpa_fq_cbs_t generic_fq_cbs = {
- + .rx_defq = { .cb = { .dqrr = dpa_generic_rx_dqrr } },
- + .rx_errq = { .cb = { .dqrr = dpa_generic_rx_err_dqrr } },
- + .egress_ern = { .cb = { .ern = dpa_generic_ern } }
- +};
- +
- +static struct fqid_cell *__fq_alloc(struct device *dev,
- + int num_ranges,
- + const void *fqids_off)
- +{
- + struct fqid_cell *fqids;
- + int i;
- +
- + fqids = kzalloc(sizeof(*fqids) * num_ranges, GFP_KERNEL);
- + if (fqids == NULL)
- + return NULL;
- +
- + /* convert to CPU endianess */
- + for (i = 0; i < num_ranges; i++) {
- + fqids[i].start = be32_to_cpup(fqids_off +
- + i * sizeof(*fqids));
- + fqids[i].count = be32_to_cpup(fqids_off +
- + i * sizeof(*fqids) + sizeof(__be32));
- + }
- +
- + return fqids;
- +}
- +
- +static struct list_head *dpa_generic_fq_probe(struct platform_device *_of_dev,
- + struct fm_port *tx_port)
- +{
- + struct device *dev = &_of_dev->dev;
- + struct device_node *oh_node = NULL;
- + struct device_node *onic_node = NULL;
- + struct fqid_cell *fqids;
- + const void *fqids_off;
- + struct dpa_fq *fq, *tmp;
- + struct list_head *list;
- + int num_ranges;
- + int i, lenp;
- +
- + onic_node = dev->of_node;
- +
- + list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- + if (!list) {
- + dev_err(dev, "Cannot allocate space for frame queues list\n");
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + INIT_LIST_HEAD(list);
- +
- + /* RX queues (RX error, RX default) are specified in Rx O/H port node */
- + oh_node = get_rx_op_port_node(_of_dev);
- + fqids_off = of_get_property(oh_node, "fsl,qman-frame-queues-oh", &lenp);
- + if (fqids_off == NULL) {
- + dev_err(dev, "Need Rx FQ definition in dts for generic devices\n");
- + return ERR_PTR(-EINVAL);
- + }
- + of_node_put(oh_node);
- +
- + num_ranges = lenp / sizeof(*fqids);
- + if (num_ranges != 2) {
- + dev_err(dev, "Need 2 Rx FQ definitions in dts for generic devices\n");
- + return ERR_PTR(-EINVAL);
- + }
- +
- + fqids = __fq_alloc(dev, num_ranges, fqids_off);
- + if (!dpa_fq_alloc(dev, fqids[0].start, fqids[0].count, list,
- + FQ_TYPE_RX_ERROR) ||
- + !dpa_fq_alloc(dev, fqids[1].start, fqids[1].count,
- + list, FQ_TYPE_RX_DEFAULT)) {
- + dev_err(dev, "Cannot allocate space for default frame queues\n");
- + return ERR_PTR(-ENOMEM);
- + }
- + kfree(fqids);
- +
- + /* TX queues */
- + fqids_off = of_get_property(onic_node, "fsl,qman-frame-queues-tx",
- + &lenp);
- + if (fqids_off == NULL) {
- + dev_err(dev, "Need Tx FQ definition in dts for generic devices\n");
- + return ERR_PTR(-EINVAL);
- + }
- +
- + num_ranges = lenp / sizeof(*fqids);
- + fqids = __fq_alloc(dev, num_ranges, fqids_off);
- + for (i = 0; i < num_ranges; i++) {
- + if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
- + FQ_TYPE_TX)) {
- + dev_err(dev, "_dpa_fq_alloc() failed\n");
- + return ERR_PTR(-ENOMEM);
- + }
- + }
- + kfree(fqids);
- +
- + /* optional RX PCD queues */
- + lenp = 0;
- + fqids_off = of_get_property(onic_node,
- + "fsl,qman-frame-queues-rx", &lenp);
- + num_ranges = lenp / sizeof(*fqids);
- + fqids = __fq_alloc(dev, num_ranges, fqids_off);
- + for (i = 0; i < num_ranges; i++) {
- + if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
- + FQ_TYPE_RX_PCD)) {
- + dev_err(dev, "_dpa_fq_alloc() failed\n");
- + return ERR_PTR(-ENOMEM);
- + }
- + }
- + kfree(fqids);
- +
- + list_for_each_entry_safe(fq, tmp, list, list) {
- + if (fq->fq_type == FQ_TYPE_TX)
- + fq->channel = fm_get_tx_port_channel(tx_port);
- + }
- +
- + return list;
- +}
- +
- +static void dpa_generic_ern(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_mr_entry *msg)
- +{
- + struct net_device *netdev;
- + const struct dpa_generic_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct qm_fd fd = msg->ern.fd;
- +
- + netdev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(netdev);
- + /* Non-migratable context, safe to use raw_cpu_ptr */
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- + percpu_priv->stats.tx_dropped++;
- + percpu_priv->stats.tx_fifo_errors++;
- + count_ern(percpu_priv, msg);
- +
- + /* release this buffer into the draining buffer pool */
- + dpa_fd_release(netdev, &fd);
- +}
- +
- +static int dpa_generic_rx_bp_probe(struct platform_device *_of_dev,
- + struct fm_port *rx_port,
- + int *rx_bp_count,
- + struct dpa_bp **rx_bp,
- + struct dpa_buffer_layout_s **rx_buf_layout)
- +{
- + struct device *dev = &_of_dev->dev;
- + struct fm_port_params params;
- + struct dpa_bp *bp = NULL;
- + int bp_count = 0;
- + int bpid;
- + const __be32 *bpool_cfg = NULL;
- + struct device_node *dev_node = NULL;
- + struct device_node *oh_node = NULL;
- + struct dpa_buffer_layout_s *buf_layout = NULL;
- + int lenp = 0;
- + int na = 0, ns = 0;
- + int err = 0, i = 0;
- +
- + oh_node = get_rx_op_port_node(_of_dev);
- +
- + bp_count = of_count_phandle_with_args(oh_node,
- + "fsl,bman-buffer-pools", NULL);
- + if (bp_count <= 0) {
- + dev_err(dev, "Missing buffer pool handles from onic node from device tree\n");
- + return -EINVAL;
- + }
- +
- + bp = devm_kzalloc(dev, bp_count * sizeof(*bp), GFP_KERNEL);
- + if (unlikely(bp == NULL)) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + err = -ENOMEM;
- + goto _return_of_node_put;
- + }
- +
- + dev_node = of_find_node_by_path("/");
- + if (unlikely(dev_node == NULL)) {
- + dev_err(dev, "of_find_node_by_path(/) failed\n");
- + err = -EINVAL;
- + goto _return_of_node_put;
- + }
- +
- + na = of_n_addr_cells(dev_node);
- + ns = of_n_size_cells(dev_node);
- +
- + of_node_put(dev_node);
- +
- + for (i = 0; i < bp_count; i++) {
- + dev_node = of_parse_phandle(oh_node,
- + "fsl,bman-buffer-pools", i);
- + if (dev_node == NULL) {
- + dev_err(dev, "Cannot find buffer pool node in the device tree\n");
- + err = -EINVAL;
- + goto _return_of_node_put;
- + }
- +
- + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
- + if (err) {
- + dev_err(dev, "Cannot find buffer pool ID in the buffer pool node in the device tree\n");
- + goto _return_of_node_put;
- + }
- +
- + bp[i].bpid = (uint8_t)bpid;
- +
- + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
- + &lenp);
- + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
- + bp[i].config_count = (int)of_read_number(bpool_cfg, ns);
- + bp[i].size = of_read_number(bpool_cfg + ns, ns);
- + bp[i].paddr = 0;
- + bp[i].seed_pool = false;
- + } else {
- + dev_err(dev, "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
- + dev_node->full_name);
- + err = -EINVAL;
- + goto _return_of_node_put;
- + }
- +
- + bp[i].percpu_count = devm_alloc_percpu(dev,
- + *bp[i].percpu_count);
- + }
- +
- + of_node_put(oh_node);
- +
- + buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
- + if (!buf_layout) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + err = -ENOMEM;
- + goto _return_of_node_put;
- + }
- +
- + buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
- + buf_layout->parse_results = false;
- + buf_layout->hash_results = false;
- + buf_layout->time_stamp = false;
- + fm_port_get_buff_layout_ext_params(rx_port, ¶ms);
- + buf_layout->manip_extra_space = params.manip_extra_space;
- + /* a value of zero for data alignment means "don't care", so align to
- + * a non-zero value to prevent FMD from using its own default
- + */
- + buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
- +
- + *rx_buf_layout = buf_layout;
- + *rx_bp = bp;
- + *rx_bp_count = bp_count;
- +
- + return 0;
- +
- +_return_of_node_put:
- + if (dev_node)
- + of_node_put(dev_node);
- +
- + return err;
- +}
- +
- +static int dpa_generic_tx_bp_probe(struct platform_device *_of_dev,
- + struct fm_port *tx_port,
- + struct dpa_bp **draining_tx_bp,
- + struct dpa_bp **draining_tx_sg_bp,
- + struct dpa_buffer_layout_s **tx_buf_layout)
- +{
- + struct device *dev = &_of_dev->dev;
- + struct fm_port_params params;
- + struct dpa_bp *bp = NULL;
- + struct dpa_bp *bp_sg = NULL;
- + struct dpa_buffer_layout_s *buf_layout = NULL;
- +
- + buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
- + if (!buf_layout) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
- + buf_layout->parse_results = true;
- + buf_layout->hash_results = true;
- + buf_layout->time_stamp = false;
- +
- + fm_port_get_buff_layout_ext_params(tx_port, ¶ms);
- + buf_layout->manip_extra_space = params.manip_extra_space;
- + buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
- +
- + bp = devm_kzalloc(dev, sizeof(*bp), GFP_KERNEL);
- + if (unlikely(bp == NULL)) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + bp->size = dpa_bp_size(buf_layout);
- + bp->percpu_count = devm_alloc_percpu(dev, *bp->percpu_count);
- + bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
- +
- + *draining_tx_bp = bp;
- +
- + bp_sg = devm_kzalloc(dev, sizeof(*bp_sg), GFP_KERNEL);
- + if (unlikely(bp_sg == NULL)) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + bp_sg->size = dpa_bp_size(buf_layout);
- + bp_sg->percpu_count = alloc_percpu(*bp_sg->percpu_count);
- + bp_sg->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
- +
- + *draining_tx_sg_bp = bp_sg;
- +
- + *tx_buf_layout = buf_layout;
- +
- + return 0;
- +}
- +
- +static int dpa_generic_buff_dealloc_probe(struct platform_device *_of_dev,
- + int *disable_buff_dealloc)
- +{
- + struct device *dev = &_of_dev->dev;
- + const phandle *disable_handle = NULL;
- + int lenp = 0;
- + int err = 0;
- +
- + disable_handle = of_get_property(dev->of_node,
- + "fsl,disable_buff_dealloc", &lenp);
- + if (disable_handle != NULL)
- + *disable_buff_dealloc = 1;
- +
- + return err;
- +}
- +
- +static int dpa_generic_port_probe(struct platform_device *_of_dev,
- + struct fm_port **rx_port,
- + struct fm_port **tx_port)
- +{
- + struct device *dev = &_of_dev->dev;
- + struct device_node *dev_node = NULL;
- + struct device_node *onic_node = NULL;
- + int num_ports = 0;
- + int err = 0;
- +
- + onic_node = dev->of_node;
- +
- + num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
- + if (num_ports != 2) {
- + dev_err(dev, "There should be two OH ports in device tree (one for RX, one for TX\n");
- + return -EINVAL;
- + }
- +
- + dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", RX);
- + if (dev_node == NULL) {
- + dev_err(dev, "Cannot find Rx OH port node in device tree\n");
- + return err;
- + }
- +
- + err = get_port_ref(dev_node, rx_port);
- + if (err) {
- + dev_err(dev, "Cannot read Rx OH port node in device tree\n");
- + return err;
- + }
- +
- + dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", TX);
- + if (dev_node == NULL) {
- + dev_err(dev, "Cannot find Tx OH port node in device tree\n");
- + return -EFAULT;
- + }
- +
- + err = get_port_ref(dev_node, tx_port);
- + if (err) {
- + dev_err(dev, "Cannot read Tx OH port node in device tree\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static inline void dpa_generic_setup_ingress(
- + const struct dpa_generic_priv_s *priv,
- + struct dpa_fq *fq,
- + const struct qman_fq *template)
- +{
- + fq->fq_base = *template;
- + fq->net_dev = priv->net_dev;
- +
- + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
- + fq->channel = priv->channel;
- +}
- +
- +static inline void dpa_generic_setup_egress(
- + const struct dpa_generic_priv_s *priv,
- + struct dpa_fq *fq,
- + struct fm_port *port,
- + const struct qman_fq *template)
- +{
- + fq->fq_base = *template;
- + fq->net_dev = priv->net_dev;
- +
- + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
- + fq->channel = fm_get_tx_port_channel(port);
- +}
- +
- +static void dpa_generic_fq_setup(struct dpa_generic_priv_s *priv,
- + const struct dpa_fq_cbs_t *fq_cbs,
- + struct fm_port *tx_port)
- +{
- + struct dpa_fq *fq;
- + int egress_cnt = 0;
- +
- + /* Initialize each FQ in the list */
- + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- + switch (fq->fq_type) {
- + case FQ_TYPE_RX_DEFAULT:
- + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
- + break;
- + case FQ_TYPE_RX_ERROR:
- + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_errq);
- + break;
- + case FQ_TYPE_RX_PCD:
- + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
- + break;
- + case FQ_TYPE_TX:
- + dpa_generic_setup_egress(priv, fq,
- + tx_port, &fq_cbs->egress_ern);
- + /* If we have more Tx queues than the number of cores,
- + * just ignore the extra ones.
- + */
- + if (egress_cnt < DPAA_ETH_TX_QUEUES)
- + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- + break;
- + default:
- + dev_warn(priv->net_dev->dev.parent,
- + "Unknown FQ type detected!\n");
- + break;
- + }
- + }
- +
- + /* The number of Tx queues may be smaller than the number of cores, if
- + * the Tx queue range is specified in the device tree instead of being
- + * dynamically allocated.
- + * Make sure all CPUs receive a corresponding Tx queue.
- + */
- + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
- + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
- + if (fq->fq_type != FQ_TYPE_TX)
- + continue;
- + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- + if (egress_cnt == DPAA_ETH_TX_QUEUES)
- + break;
- + }
- + }
- +}
- +
- +static int dpa_generic_fq_init(struct dpa_fq *dpa_fq, int disable_buff_dealloc)
- +{
- + int _errno;
- + struct device *dev;
- + struct qman_fq *fq;
- + struct qm_mcc_initfq initfq;
- +
- + dev = dpa_fq->net_dev->dev.parent;
- +
- + if (dpa_fq->fqid == 0)
- + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
- +
- + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
- + if (_errno) {
- + dev_err(dev, "qman_create_fq() failed\n");
- + return _errno;
- + }
- + fq = &dpa_fq->fq_base;
- +
- + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
- + /* FIXME: why would we want to keep an empty FQ in cache? */
- + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
- +
- + /* FQ placement */
- + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
- +
- + initfq.fqd.dest.channel = dpa_fq->channel;
- + initfq.fqd.dest.wq = dpa_fq->wq;
- +
- + if (dpa_fq->fq_type == FQ_TYPE_TX && !disable_buff_dealloc) {
- + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- + /* ContextA: A2V=1 (contextA A2 field is valid)
- + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
- + */
- + initfq.fqd.context_a.hi = 0x10000000;
- + initfq.fqd.context_a.lo = 0x80000000;
- + }
- +
- + /* Initialization common to all ingress queues */
- + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- + initfq.fqd.fq_ctrl |=
- + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
- + initfq.fqd.context_a.stashing.exclusive =
- + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
- + QM_STASHING_EXCL_ANNOTATION;
- + initfq.fqd.context_a.stashing.data_cl = 2;
- + initfq.fqd.context_a.stashing.annotation_cl = 1;
- + initfq.fqd.context_a.stashing.context_cl =
- + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
- + }
- +
- + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
- + if (_errno < 0) {
- + dev_err(dev, "qman_init_fq(%u) = %d\n",
- + qman_fq_fqid(fq), _errno);
- + qman_destroy_fq(fq, 0);
- + return _errno;
- + }
- +
- + dpa_fq->fqid = qman_fq_fqid(fq);
- +
- + return 0;
- +}
- +
- +static int dpa_generic_fq_create(struct net_device *netdev,
- + struct list_head *dpa_fq_list,
- + struct fm_port *tx_port)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
- + struct dpa_fq *fqs = NULL, *tmp = NULL;
- + struct task_struct *kth;
- + int err = 0;
- + int channel;
- +
- + INIT_LIST_HEAD(&priv->dpa_fq_list);
- +
- + list_replace_init(dpa_fq_list, &priv->dpa_fq_list);
- +
- + channel = dpa_get_channel();
- + if (channel < 0)
- + return channel;
- + priv->channel = (uint16_t)channel;
- +
- + /* Start a thread that will walk the cpus with affine portals
- + * and add this pool channel to each's dequeue mask.
- + */
- + kth = kthread_run(dpaa_eth_add_channel,
- + (void *)(unsigned long)priv->channel,
- + "dpaa_%p:%d", netdev, priv->channel);
- + if (!kth)
- + return -ENOMEM;
- +
- + dpa_generic_fq_setup(priv, &generic_fq_cbs, tx_port);
- +
- + /* Add the FQs to the interface, and make them active */
- + list_for_each_entry_safe(fqs, tmp, &priv->dpa_fq_list, list) {
- + err = dpa_generic_fq_init(fqs, priv->disable_buff_dealloc);
- + if (err)
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int dpa_generic_bp_create(struct net_device *net_dev,
- + int rx_bp_count,
- + struct dpa_bp *rx_bp,
- + struct dpa_buffer_layout_s *rx_buf_layout,
- + struct dpa_bp *draining_tx_bp,
- + struct dpa_bp *draining_tx_sg_bp,
- + struct dpa_buffer_layout_s *tx_buf_layout)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
- + int err = 0;
- +
- + /* TODO: multiple Rx bps */
- + priv->rx_bp_count = rx_bp_count;
- + priv->rx_bp = rx_bp;
- + priv->rx_buf_layout = rx_buf_layout;
- + priv->draining_tx_bp = draining_tx_bp;
- + priv->draining_tx_sg_bp = draining_tx_sg_bp;
- + priv->tx_buf_layout = tx_buf_layout;
- +
- + err = dpa_bp_alloc(priv->rx_bp);
- + if (err < 0) {
- + priv->rx_bp = NULL;
- + return err;
- + }
- +
- + err = dpa_bp_alloc(priv->draining_tx_bp);
- + if (err < 0) {
- + priv->draining_tx_bp = NULL;
- + return err;
- + }
- +
- + err = dpa_bp_alloc(priv->draining_tx_sg_bp);
- + if (err < 0) {
- + priv->draining_tx_sg_bp = NULL;
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static void dpa_generic_relase_bp(struct dpa_bp *bp)
- +{
- + if (!bp)
- + return;
- +
- + if (!atomic_dec_and_test(&bp->refs))
- + return;
- +
- + if (bp->free_buf_cb)
- + dpa_bp_drain(bp);
- +
- + bman_free_pool(bp->pool);
- +
- + if (bp->dev)
- + platform_device_unregister(to_platform_device(bp->dev));
- +}
- +
- +static void dpa_generic_bp_free(struct dpa_generic_priv_s *priv)
- +{
- + int i = 0;
- +
- + /* release the rx bpools */
- + for (i = 0; i < priv->rx_bp_count; i++)
- + dpa_generic_relase_bp(&priv->rx_bp[i]);
- +
- + /* release the tx draining bpools */
- + dpa_generic_relase_bp(priv->draining_tx_bp);
- + dpa_generic_relase_bp(priv->draining_tx_sg_bp);
- +}
- +
- +static int dpa_generic_remove(struct platform_device *of_dev)
- +{
- + int err;
- + struct device *dev;
- + struct net_device *net_dev;
- + struct dpa_generic_priv_s *priv;
- +
- + dev = &of_dev->dev;
- + net_dev = dev_get_drvdata(dev);
- + priv = netdev_priv(net_dev);
- +
- + dpaa_eth_generic_sysfs_remove(dev);
- +
- + dev_set_drvdata(dev, NULL);
- + unregister_netdev(net_dev);
- +
- + err = dpa_fq_free(dev, &priv->dpa_fq_list);
- +
- + dpa_generic_napi_del(net_dev);
- +
- + dpa_generic_bp_free(priv);
- +
- + free_netdev(net_dev);
- +
- + return err;
- +}
- +
- +static int dpa_generic_eth_probe(struct platform_device *_of_dev)
- +{
- + struct device *dev = &_of_dev->dev;
- + struct device_node *dpa_node = dev->of_node;
- + struct net_device *netdev = NULL;
- + struct dpa_generic_priv_s *priv;
- + struct fm_port *rx_port = NULL;
- + struct fm_port *tx_port = NULL;
- + struct dpa_percpu_priv_s *percpu_priv;
- + int rx_bp_count = 0;
- + int disable_buff_dealloc = 0;
- + struct dpa_bp *rx_bp = NULL, *draining_tx_bp = NULL;
- + struct dpa_bp *draining_tx_sg_bp = NULL;
- + struct dpa_buffer_layout_s *rx_buf_layout = NULL, *tx_buf_layout = NULL;
- + struct list_head *dpa_fq_list;
- + static u8 generic_idx;
- + int err = 0;
- + int i = 0;
- +
- + if (!of_device_is_available(dpa_node))
- + return -ENODEV;
- +
- + err = dpa_generic_port_probe(_of_dev, &tx_port, &rx_port);
- + if (err < 0)
- + return err;
- +
- + err = dpa_generic_rx_bp_probe(_of_dev, rx_port, &rx_bp_count,
- + &rx_bp, &rx_buf_layout);
- + if (err < 0)
- + return err;
- +
- + err = dpa_generic_tx_bp_probe(_of_dev, tx_port, &draining_tx_bp,
- + &draining_tx_sg_bp, &tx_buf_layout);
- + if (err < 0)
- + return err;
- +
- + dpa_fq_list = dpa_generic_fq_probe(_of_dev, tx_port);
- + if (IS_ERR(dpa_fq_list))
- + return PTR_ERR(dpa_fq_list);
- +
- + err = dpa_generic_buff_dealloc_probe(_of_dev, &disable_buff_dealloc);
- + if (err < 0)
- + return err;
- +
- + /* just one queue for now */
- + netdev = alloc_etherdev_mq(sizeof(*priv), 1);
- + if (!netdev) {
- + dev_err(dev, "alloc_etherdev_mq() failed\n");
- + return -ENOMEM;
- + }
- +
- + SET_NETDEV_DEV(netdev, dev);
- + dev_set_drvdata(dev, netdev);
- + priv = netdev_priv(netdev);
- + priv->net_dev = netdev;
- + sprintf(priv->if_type, "generic%d", generic_idx++);
- + priv->msg_enable = netif_msg_init(generic_debug, -1);
- + priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
- +
- + init_timer(&priv->timer);
- + priv->timer.data = (unsigned long)priv;
- + priv->timer.function = dpa_generic_draining_timer;
- +
- + err = dpa_generic_bp_create(netdev, rx_bp_count, rx_bp, rx_buf_layout,
- + draining_tx_bp, draining_tx_sg_bp, tx_buf_layout);
- + if (err < 0)
- + goto bp_create_failed;
- +
- + priv->disable_buff_dealloc = disable_buff_dealloc;
- +
- + err = dpa_generic_fq_create(netdev, dpa_fq_list, rx_port);
- + if (err < 0)
- + goto fq_create_failed;
- +
- + priv->tx_headroom = dpa_get_headroom(tx_buf_layout);
- + priv->rx_headroom = dpa_get_headroom(rx_buf_layout);
- + priv->rx_port = rx_port;
- + priv->tx_port = tx_port;
- + priv->mac_dev = NULL;
- +
- +
- + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
- + if (priv->percpu_priv == NULL) {
- + dev_err(dev, "devm_alloc_percpu() failed\n");
- + err = -ENOMEM;
- + goto alloc_percpu_failed;
- + }
- + for_each_online_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- + memset(percpu_priv, 0, sizeof(*percpu_priv));
- + }
- +
- + /* Initialize NAPI */
- + err = dpa_generic_napi_add(netdev);
- + if (err < 0)
- + goto napi_add_failed;
- +
- + err = dpa_generic_netdev_init(dpa_node, netdev);
- + if (err < 0)
- + goto netdev_init_failed;
- +
- + dpaa_eth_generic_sysfs_init(&netdev->dev);
- +
- + pr_info("fsl_dpa_generic: Probed %s interface as %s\n",
- + priv->if_type, netdev->name);
- +
- + return 0;
- +
- +netdev_init_failed:
- +napi_add_failed:
- + dpa_generic_napi_del(netdev);
- +alloc_percpu_failed:
- + if (netdev)
- + dpa_fq_free(dev, &priv->dpa_fq_list);
- +fq_create_failed:
- +bp_create_failed:
- + if (netdev)
- + dpa_generic_bp_free(priv);
- + dev_set_drvdata(dev, NULL);
- + if (netdev)
- + free_netdev(netdev);
- +
- + return err;
- +}
- +
- +static int __init __cold dpa_generic_load(void)
- +{
- + int _errno;
- +
- + pr_info(KBUILD_MODNAME ": " DPA_GENERIC_DESCRIPTION "\n");
- +
- + /* initialise dpaa_eth mirror values */
- + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
- + dpa_max_frm = fm_get_max_frm();
- +
- + _errno = platform_driver_register(&dpa_generic_driver);
- + if (unlikely(_errno < 0)) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): platform_driver_register() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- + }
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + return _errno;
- +}
- +
- +/* waiting for all referenced ports to be initialized
- + * by other kernel modules (proxy ethernet, offline_port)
- + */
- +late_initcall(dpa_generic_load);
- +
- +static void __exit __cold dpa_generic_unload(void)
- +{
- + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + platform_driver_unregister(&dpa_generic_driver);
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +}
- +module_exit(dpa_generic_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
- @@ -0,0 +1,90 @@
- +/* Copyright 2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __DPA_ETH_GENERIC_H
- +#define __DPA_ETH_GENERIC_H
- +
- +#include "lnxwrp_fsl_fman.h"
- +#include "dpaa_eth.h"
- +
- +struct dpa_generic_priv_s {
- + struct net_device *net_dev;
- + /* use the same percpu_priv as other DPAA Ethernet drivers */
- + struct dpa_percpu_priv_s __percpu *percpu_priv;
- +
- + /* up to 4 bps supported for RX */
- + int rx_bp_count;
- + struct dpa_bp *rx_bp;
- + struct dpa_buffer_layout_s *rx_buf_layout;
- +
- + struct dpa_bp *draining_tx_bp;
- + struct dpa_bp *draining_tx_sg_bp;
- + struct dpa_buffer_layout_s *tx_buf_layout;
- +
- + /* Store here the needed Tx headroom for convenience and speed
- + * (even though it can be computed based on the fields of buf_layout)
- + */
- + uint16_t tx_headroom;
- + uint16_t rx_headroom;
- +
- + /* In some scenarios, when VSP are not enabled on the Tx O/H port,
- + * the buffers will be released by other hardware modules
- + */
- + int disable_buff_dealloc;
- +
- + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
- +
- + struct fm_port *rx_port;
- + struct fm_port *tx_port;
- +
- + /* oNIC can have limited control capabilities over a MAC device */
- + struct mac_device *mac_dev;
- +
- + uint16_t channel; /* "fsl,qman-channel-id" */
- + struct list_head dpa_fq_list;
- +
- + uint32_t msg_enable; /* net_device message level */
- +
- + struct dpa_buffer_layout_s *buf_layout;
- + char if_type[30];
- +
- + /* periodic drain */
- + struct timer_list timer;
- +};
- +
- +extern const struct ethtool_ops dpa_generic_ethtool_ops;
- +
- +void dpaa_eth_generic_sysfs_init(struct device *dev);
- +void dpaa_eth_generic_sysfs_remove(struct device *dev);
- +int __init dpa_generic_debugfs_module_init(void);
- +void __exit dpa_generic_debugfs_module_exit(void);
- +
- +#endif /* __DPA_ETH_GENERIC_H */
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
- @@ -0,0 +1,201 @@
- +/* Copyright 2014 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/kthread.h>
- +#include <linux/io.h>
- +#include <linux/of_net.h>
- +
- +#include "dpaa_eth_generic.h"
- +#include "mac.h" /* struct mac_device */
- +
- +static ssize_t dpaa_eth_generic_show_addr(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct mac_device *mac_dev = priv->mac_dev;
- +
- + if (mac_dev)
- + return sprintf(buf, "%llx\n",
- + (unsigned long long)mac_dev->res->start);
- + else
- + return sprintf(buf, "none\n");
- +}
- +
- +static ssize_t dpaa_eth_generic_show_type(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + ssize_t res = 0;
- + res = sprintf(buf, "generic\n");
- +
- + return res;
- +}
- +
- +static ssize_t dpaa_eth_generic_show_fqids(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
- + ssize_t bytes = 0;
- + int i = 0;
- + char *str;
- + struct dpa_fq *fq;
- + struct dpa_fq *tmp;
- + struct dpa_fq *prev = NULL;
- + u32 first_fqid = 0;
- + u32 last_fqid = 0;
- + char *prevstr = NULL;
- +
- + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
- + switch (fq->fq_type) {
- + case FQ_TYPE_RX_DEFAULT:
- + str = "Rx default";
- + break;
- + case FQ_TYPE_RX_ERROR:
- + str = "Rx error";
- + break;
- + case FQ_TYPE_RX_PCD:
- + str = "Rx PCD";
- + break;
- + case FQ_TYPE_TX_CONFIRM:
- + str = "Tx default confirmation";
- + break;
- + case FQ_TYPE_TX_CONF_MQ:
- + str = "Tx confirmation (mq)";
- + break;
- + case FQ_TYPE_TX_ERROR:
- + str = "Tx error";
- + break;
- + case FQ_TYPE_TX:
- + str = "Tx";
- + break;
- + default:
- + str = "Unknown";
- + }
- +
- + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
- + str != prevstr)) {
- + if (last_fqid == first_fqid)
- + bytes += sprintf(buf + bytes,
- + "%s: %d\n", prevstr, prev->fqid);
- + else
- + bytes += sprintf(buf + bytes,
- + "%s: %d - %d\n", prevstr,
- + first_fqid, last_fqid);
- + }
- +
- + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
- + last_fqid = fq->fqid;
- + else
- + first_fqid = last_fqid = fq->fqid;
- +
- + prev = fq;
- + prevstr = str;
- + i++;
- + }
- +
- + if (prev) {
- + if (last_fqid == first_fqid)
- + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
- + prev->fqid);
- + else
- + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
- + first_fqid, last_fqid);
- + }
- +
- + return bytes;
- +}
- +
- +static ssize_t dpaa_eth_generic_show_bpids(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + ssize_t bytes = 0;
- + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct dpa_bp *rx_bp = priv->rx_bp;
- + struct dpa_bp *draining_tx_bp = priv->draining_tx_bp;
- + int i = 0;
- +
- + bytes += snprintf(buf + bytes, PAGE_SIZE, "Rx buffer pools:\n");
- + for (i = 0; i < priv->rx_bp_count; i++)
- + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u ",
- + rx_bp[i].bpid);
- +
- + bytes += snprintf(buf + bytes, PAGE_SIZE, "\n");
- + bytes += snprintf(buf + bytes, PAGE_SIZE, "Draining buffer pool:\n");
- + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n", draining_tx_bp->bpid);
- +
- + return bytes;
- +}
- +
- +static ssize_t dpaa_eth_generic_show_mac_regs(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct mac_device *mac_dev = priv->mac_dev;
- + int n = 0;
- +
- + if (mac_dev)
- + n = fm_mac_dump_regs(mac_dev, buf, n);
- + else
- + return sprintf(buf, "no mac control\n");
- +
- + return n;
- +}
- +
- +static struct device_attribute dpaa_eth_generic_attrs[] = {
- + __ATTR(device_addr, S_IRUGO, dpaa_eth_generic_show_addr, NULL),
- + __ATTR(device_type, S_IRUGO, dpaa_eth_generic_show_type, NULL),
- + __ATTR(fqids, S_IRUGO, dpaa_eth_generic_show_fqids, NULL),
- + __ATTR(bpids, S_IRUGO, dpaa_eth_generic_show_bpids, NULL),
- + __ATTR(mac_regs, S_IRUGO, dpaa_eth_generic_show_mac_regs, NULL),
- +};
- +
- +void dpaa_eth_generic_sysfs_init(struct device *dev)
- +{
- + int i;
- +
- + for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
- + if (device_create_file(dev, &dpaa_eth_generic_attrs[i])) {
- + dev_err(dev, "Error creating sysfs file\n");
- + while (i > 0)
- + device_remove_file(dev,
- + &dpaa_eth_generic_attrs[--i]);
- + return;
- + }
- +}
- +
- +void dpaa_eth_generic_sysfs_remove(struct device *dev)
- +{
- + int i;
- +
- + for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
- + device_remove_file(dev, &dpaa_eth_generic_attrs[i]);
- +}
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
- @@ -0,0 +1,499 @@
- +/* Copyright 2008-2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/of_platform.h>
- +#include <linux/of_net.h>
- +#include <linux/etherdevice.h>
- +#include <linux/kthread.h>
- +#include <linux/percpu.h>
- +#include <linux/highmem.h>
- +#include <linux/fsl_qman.h>
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#include "dpaa_eth_base.h"
- +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
- +#include "mac.h"
- +
- +/* For MAC-based interfaces, we compute the tx needed headroom from the
- + * associated Tx port's buffer layout settings.
- + * For MACless interfaces just use a default value.
- + */
- +#define DPA_DEFAULT_TX_HEADROOM 64
- +
- +#define DPA_DESCRIPTION "FSL DPAA MACless Ethernet driver"
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +
- +MODULE_DESCRIPTION(DPA_DESCRIPTION);
- +
- +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
- +static uint16_t macless_tx_timeout = 1000;
- +module_param(macless_tx_timeout, ushort, S_IRUGO);
- +MODULE_PARM_DESC(macless_tx_timeout, "The MACless Tx timeout in ms");
- +
- +/* forward declarations */
- +static int __cold dpa_macless_start(struct net_device *net_dev);
- +static int __cold dpa_macless_stop(struct net_device *net_dev);
- +static int __cold dpa_macless_set_address(struct net_device *net_dev,
- + void *addr);
- +static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev);
- +
- +static int dpaa_eth_macless_probe(struct platform_device *_of_dev);
- +static netdev_features_t
- +dpa_macless_fix_features(struct net_device *dev, netdev_features_t features);
- +
- +static const struct net_device_ops dpa_macless_ops = {
- + .ndo_open = dpa_macless_start,
- + .ndo_start_xmit = dpa_shared_tx,
- + .ndo_stop = dpa_macless_stop,
- + .ndo_tx_timeout = dpa_timeout,
- + .ndo_get_stats64 = dpa_get_stats64,
- + .ndo_set_mac_address = dpa_macless_set_address,
- + .ndo_set_rx_mode = dpa_macless_set_rx_mode,
- + .ndo_validate_addr = eth_validate_addr,
- +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- + .ndo_select_queue = dpa_select_queue,
- +#endif
- + .ndo_change_mtu = dpa_change_mtu,
- + .ndo_init = dpa_ndo_init,
- + .ndo_set_features = dpa_set_features,
- + .ndo_fix_features = dpa_macless_fix_features,
- +};
- +
- +static const struct of_device_id dpa_macless_match[] = {
- + {
- + .compatible = "fsl,dpa-ethernet-macless"
- + },
- + {}
- +};
- +MODULE_DEVICE_TABLE(of, dpa_macless_match);
- +
- +static struct platform_driver dpa_macless_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME "-macless",
- + .of_match_table = dpa_macless_match,
- + .owner = THIS_MODULE,
- + },
- + .probe = dpaa_eth_macless_probe,
- + .remove = dpa_remove
- +};
- +
- +static const char macless_frame_queues[][25] = {
- + [RX] = "fsl,qman-frame-queues-rx",
- + [TX] = "fsl,qman-frame-queues-tx"
- +};
- +
- +static int __cold dpa_macless_start(struct net_device *net_dev)
- +{
- + const struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
- +
- + netif_tx_start_all_queues(net_dev);
- +
- + if (proxy_dev)
- + dpa_proxy_start(net_dev);
- +
- +
- + return 0;
- +}
- +
- +static int __cold dpa_macless_stop(struct net_device *net_dev)
- +{
- + const struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
- +
- + netif_tx_stop_all_queues(net_dev);
- +
- + if (proxy_dev)
- + dpa_proxy_stop(proxy_dev, net_dev);
- +
- + return 0;
- +}
- +
- +static int dpa_macless_set_address(struct net_device *net_dev, void *addr)
- +{
- + const struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
- + int _errno;
- +
- + _errno = eth_mac_addr(net_dev, addr);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
- + return _errno;
- + }
- +
- + if (proxy_dev) {
- + _errno = dpa_proxy_set_mac_address(proxy_dev, net_dev);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev, "proxy_set_mac_address() = %d\n",
- + _errno);
- + return _errno;
- + }
- + }
- +
- + return 0;
- +}
- +
- +static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev)
- +{
- + const struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
- +
- + if (proxy_dev)
- + dpa_proxy_set_rx_mode(proxy_dev, net_dev);
- +}
- +
- +static netdev_features_t
- +dpa_macless_fix_features(struct net_device *dev, netdev_features_t features)
- +{
- + netdev_features_t unsupported_features = 0;
- +
- + /* In theory we should never be requested to enable features that
- + * we didn't set in netdev->features and netdev->hw_features at probe
- + * time, but double check just to be on the safe side.
- + */
- + unsupported_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- + /* We don't support enabling Rx csum through ethtool yet */
- + unsupported_features |= NETIF_F_RXCSUM;
- +
- + features &= ~unsupported_features;
- +
- + return features;
- +}
- +
- +static int dpa_macless_netdev_init(struct device_node *dpa_node,
- + struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
- + struct device *dev = net_dev->dev.parent;
- + const uint8_t *mac_addr;
- +
- + net_dev->netdev_ops = &dpa_macless_ops;
- +
- + if (proxy_dev) {
- + struct mac_device *mac_dev = proxy_dev->mac_dev;
- + net_dev->mem_start = mac_dev->res->start;
- + net_dev->mem_end = mac_dev->res->end;
- +
- + return dpa_netdev_init(net_dev, mac_dev->addr,
- + macless_tx_timeout);
- + } else {
- + /* Get the MAC address from device tree */
- + mac_addr = of_get_mac_address(dpa_node);
- +
- + if (mac_addr == NULL) {
- + if (netif_msg_probe(priv))
- + dev_err(dev, "No MAC address found!\n");
- + return -EINVAL;
- + }
- +
- + return dpa_netdev_init(net_dev, mac_addr,
- + macless_tx_timeout);
- + }
- +}
- +
- +/* Probing of FQs for MACless ports */
- +static int dpa_fq_probe_macless(struct device *dev, struct list_head *list,
- + enum port_type ptype)
- +{
- + struct device_node *np = dev->of_node;
- + const struct fqid_cell *fqids;
- + int num_ranges;
- + int i, lenp;
- +
- + fqids = of_get_property(np, macless_frame_queues[ptype], &lenp);
- + if (fqids == NULL) {
- + dev_err(dev, "Need FQ definition in dts for MACless devices\n");
- + return -EINVAL;
- + }
- +
- + num_ranges = lenp / sizeof(*fqids);
- +
- + /* All ranges defined in the device tree are used as Rx/Tx queues */
- + for (i = 0; i < num_ranges; i++) {
- + if (!dpa_fq_alloc(dev, be32_to_cpu(fqids[i].start),
- + be32_to_cpu(fqids[i].count), list,
- + ptype == RX ? FQ_TYPE_RX_PCD : FQ_TYPE_TX)) {
- + dev_err(dev, "_dpa_fq_alloc() failed\n");
- + return -ENOMEM;
- + }
- + }
- +
- + return 0;
- +}
- +
- + static struct proxy_device *
- +dpa_macless_proxy_probe(struct platform_device *_of_dev)
- +{
- + struct device *dev;
- + const phandle *proxy_prop;
- + struct proxy_device *proxy_dev;
- + struct device_node *proxy_node;
- + struct platform_device *proxy_pdev;
- + int lenp;
- +
- + dev = &_of_dev->dev;
- +
- + proxy_prop = of_get_property(dev->of_node, "proxy", &lenp);
- + if (!proxy_prop)
- + return NULL;
- +
- + proxy_node = of_find_node_by_phandle(*proxy_prop);
- + if (!proxy_node) {
- + dev_err(dev, "Cannot find proxy node\n");
- + return NULL;
- + }
- +
- + proxy_pdev = of_find_device_by_node(proxy_node);
- + if (!proxy_pdev) {
- + of_node_put(proxy_node);
- + dev_err(dev, "Cannot find device represented by proxy node\n");
- + return NULL;
- + }
- +
- + proxy_dev = dev_get_drvdata(&proxy_pdev->dev);
- +
- + of_node_put(proxy_node);
- +
- + return proxy_dev;
- +}
- +
- +static int dpaa_eth_macless_probe(struct platform_device *_of_dev)
- +{
- + int err = 0, i, channel;
- + struct device *dev;
- + struct device_node *dpa_node;
- + struct dpa_bp *dpa_bp;
- + struct dpa_fq *dpa_fq, *tmp;
- + size_t count;
- + struct net_device *net_dev = NULL;
- + struct dpa_priv_s *priv = NULL;
- + struct dpa_percpu_priv_s *percpu_priv;
- + static struct proxy_device *proxy_dev;
- + struct task_struct *kth;
- + static u8 macless_idx;
- +
- + dev = &_of_dev->dev;
- +
- + dpa_node = dev->of_node;
- +
- + if (!of_device_is_available(dpa_node))
- + return -ENODEV;
- +
- + /* Get the buffer pools assigned to this interface */
- + dpa_bp = dpa_bp_probe(_of_dev, &count);
- + if (IS_ERR(dpa_bp))
- + return PTR_ERR(dpa_bp);
- +
- + for (i = 0; i < count; i++)
- + dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
- +
- + proxy_dev = dpa_macless_proxy_probe(_of_dev);
- +
- +
- + /* Allocate this early, so we can store relevant information in
- + * the private area (needed by 1588 code in dpa_mac_probe)
- + */
- + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
- + if (!net_dev) {
- + dev_err(dev, "alloc_etherdev_mq() failed\n");
- + return -ENOMEM;
- + }
- +
- + /* Do this here, so we can be verbose early */
- + SET_NETDEV_DEV(net_dev, dev);
- + dev_set_drvdata(dev, net_dev);
- +
- + priv = netdev_priv(net_dev);
- + priv->net_dev = net_dev;
- + sprintf(priv->if_type, "macless%d", macless_idx++);
- +
- + priv->msg_enable = netif_msg_init(advanced_debug, -1);
- +
- + priv->peer = NULL;
- + priv->mac_dev = NULL;
- + if (proxy_dev) {
- + /* This is a temporary solution for the need of
- + * having main driver upstreamability: adjust_link
- + * is a general function that should work for both
- + * private driver and macless driver with MAC device
- + * control capabilities even if the last will not be
- + * upstreamable.
- + * TODO: find a convenient solution (wrapper over
- + * main priv structure, etc.)
- + */
- + priv->mac_dev = proxy_dev->mac_dev;
- +
- + /* control over proxy's mac device */
- + priv->peer = (void *)proxy_dev;
- + }
- +
- + INIT_LIST_HEAD(&priv->dpa_fq_list);
- +
- + err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list, RX);
- + if (!err)
- + err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list,
- + TX);
- + if (err < 0)
- + goto fq_probe_failed;
- +
- + /* bp init */
- + priv->bp_count = count;
- + err = dpa_bp_create(net_dev, dpa_bp, count);
- + if (err < 0)
- + goto bp_create_failed;
- +
- + channel = dpa_get_channel();
- +
- + if (channel < 0) {
- + err = channel;
- + goto get_channel_failed;
- + }
- +
- + priv->channel = (uint16_t)channel;
- +
- + /* Start a thread that will walk the cpus with affine portals
- + * and add this pool channel to each's dequeue mask.
- + */
- + kth = kthread_run(dpaa_eth_add_channel,
- + (void *)(unsigned long)priv->channel,
- + "dpaa_%p:%d", net_dev, priv->channel);
- + if (!kth) {
- + err = -ENOMEM;
- + goto add_channel_failed;
- + }
- +
- + dpa_fq_setup(priv, &shared_fq_cbs, NULL);
- +
- + /* Add the FQs to the interface, and make them active */
- + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
- + /* For MAC-less devices we only get here for RX frame queues
- + * initialization, which are the TX queues of the other
- + * partition.
- + * It is safe to rely on one partition to set the FQ taildrop
- + * threshold for the TX queues of the other partition
- + * because the ERN notifications will be received by the
- + * partition doing qman_enqueue.
- + */
- + err = dpa_fq_init(dpa_fq, true);
- + if (err < 0)
- + goto fq_alloc_failed;
- + }
- +
- + priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
- +
- + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
- +
- + if (priv->percpu_priv == NULL) {
- + dev_err(dev, "devm_alloc_percpu() failed\n");
- + err = -ENOMEM;
- + goto alloc_percpu_failed;
- + }
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- + memset(percpu_priv, 0, sizeof(*percpu_priv));
- + }
- +
- + err = dpa_macless_netdev_init(dpa_node, net_dev);
- + if (err < 0)
- + goto netdev_init_failed;
- +
- + dpaa_eth_sysfs_init(&net_dev->dev);
- +
- + pr_info("fsl_dpa_macless: Probed %s interface as %s\n",
- + priv->if_type, net_dev->name);
- +
- + return 0;
- +
- +netdev_init_failed:
- +alloc_percpu_failed:
- +fq_alloc_failed:
- + if (net_dev)
- + dpa_fq_free(dev, &priv->dpa_fq_list);
- +add_channel_failed:
- +get_channel_failed:
- + if (net_dev)
- + dpa_bp_free(priv);
- +bp_create_failed:
- +fq_probe_failed:
- + dev_set_drvdata(dev, NULL);
- + if (net_dev)
- + free_netdev(net_dev);
- +
- + return err;
- +}
- +
- +static int __init __cold dpa_macless_load(void)
- +{
- + int _errno;
- +
- + pr_info(DPA_DESCRIPTION "\n");
- +
- + /* Initialize dpaa_eth mirror values */
- + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
- + dpa_max_frm = fm_get_max_frm();
- +
- + _errno = platform_driver_register(&dpa_macless_driver);
- + if (unlikely(_errno < 0)) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): platform_driver_register() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- + }
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + return _errno;
- +}
- +module_init(dpa_macless_load);
- +
- +static void __exit __cold dpa_macless_unload(void)
- +{
- + platform_driver_unregister(&dpa_macless_driver);
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +}
- +module_exit(dpa_macless_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
- @@ -0,0 +1,2156 @@
- +/* Copyright 2015 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/kernel.h>
- +#include <linux/moduleparam.h>
- +
- +#include <net/sock.h>
- +#include <linux/netlink.h>
- +#include <linux/skbuff.h>
- +
- +#include "dpaa_eth_macsec.h"
- +#include "dpaa_eth_common.h"
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- +#include "dpaa_1588.h"
- +#endif
- +
- +static struct sock *nl_sk;
- +static struct macsec_priv_s *macsec_priv[FM_MAX_NUM_OF_MACS];
- +static char *macsec_ifs[FM_MAX_NUM_OF_MACS];
- +static int macsec_ifs_cnt;
- +
- +static char ifs[MAX_LEN];
- +const struct ethtool_ops *dpa_ethtool_ops_prev;
- +static struct ethtool_ops dpa_macsec_ethtool_ops;
- +
- +module_param_string(ifs, ifs, MAX_LEN, 0000);
- +MODULE_PARM_DESC(ifs, "Comma separated interface list");
- +
- +struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev)
- +{
- + return macsec_priv[net_dev->ifindex - 1];
- +}
- +
- +static void macsec_setup_ethtool_ops(struct net_device *net_dev)
- +{
- + /* remember private driver's ethtool ops just once */
- + if (!dpa_ethtool_ops_prev) {
- + dpa_ethtool_ops_prev = net_dev->ethtool_ops;
- +
- + memcpy(&dpa_macsec_ethtool_ops, net_dev->ethtool_ops,
- + sizeof(struct ethtool_ops));
- + dpa_macsec_ethtool_ops.get_sset_count =
- + dpa_macsec_get_sset_count;
- + dpa_macsec_ethtool_ops.get_ethtool_stats =
- + dpa_macsec_get_ethtool_stats;
- + dpa_macsec_ethtool_ops.get_strings =
- + dpa_macsec_get_strings;
- + }
- +
- + net_dev->ethtool_ops = &dpa_macsec_ethtool_ops;
- +}
- +
- +static void macsec_restore_ethtool_ops(struct net_device *net_dev)
- +{
- + net_dev->ethtool_ops = dpa_ethtool_ops_prev;
- +}
- +
- +
- +static int ifname_to_id(char *ifname)
- +{
- + int i;
- +
- + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
- + if (macsec_priv[i]->net_dev &&
- + (strcmp(ifname, macsec_priv[i]->net_dev->name) == 0)) {
- + return i;
- + }
- + }
- +
- + return -1;
- +}
- +
- +static void deinit_macsec(int macsec_id)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- + int i;
- +
- + selected_macsec_priv = macsec_priv[macsec_id];
- +
- + if (selected_macsec_priv->en_state == SECY_ENABLED) {
- + for (i = 0; i < NUM_OF_RX_SC; i++) {
- + if (!selected_macsec_priv->rx_sc_dev[i])
- + continue;
- + fm_macsec_secy_rxsa_disable_receive(
- + selected_macsec_priv->fm_ms_secy,
- + selected_macsec_priv->rx_sc_dev[i],
- + selected_macsec_priv->an);
- + pr_debug("disable rx_sa done\n");
- +
- + fm_macsec_secy_delete_rx_sa(
- + selected_macsec_priv->fm_ms_secy,
- + selected_macsec_priv->rx_sc_dev[i],
- + selected_macsec_priv->an);
- + pr_debug("delete rx_sa done\n");
- +
- + fm_macsec_secy_delete_rxsc(
- + selected_macsec_priv->fm_ms_secy,
- + selected_macsec_priv->rx_sc_dev[i]);
- + pr_debug("delete rx_sc done\n");
- + }
- +
- + fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
- + selected_macsec_priv->an);
- + pr_debug("delete tx_sa done\n");
- +
- + fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
- + selected_macsec_priv->fm_ms_secy = NULL;
- + pr_debug("secy free done\n");
- + }
- +
- + if (selected_macsec_priv->en_state != MACSEC_DISABLED) {
- + fm_macsec_disable(selected_macsec_priv->fm_macsec);
- + fm_macsec_free(selected_macsec_priv->fm_macsec);
- + selected_macsec_priv->fm_macsec = NULL;
- + pr_debug("macsec disable and free done\n");
- + }
- +}
- +
- +static void parse_ifs(void)
- +{
- + char *token, *strpos = ifs;
- +
- + while ((token = strsep(&strpos, ","))) {
- + if (strlen(token) == 0)
- + return;
- + else
- + macsec_ifs[macsec_ifs_cnt] = token;
- + macsec_ifs_cnt++;
- + }
- +}
- +
- +static void macsec_exception(handle_t _macsec_priv_s,
- + fm_macsec_exception exception)
- +{
- + struct macsec_priv_s *priv;
- + priv = (struct macsec_priv_s *)_macsec_priv_s;
- +
- + switch (exception) {
- + case (SINGLE_BIT_ECC):
- + dev_warn(priv->mac_dev->dev, "%s:%s SINGLE_BIT_ECC exception\n",
- + KBUILD_BASENAME".c", __func__);
- + break;
- + case (MULTI_BIT_ECC):
- + dev_warn(priv->mac_dev->dev, "%s:%s MULTI_BIT_ECC exception\n",
- + KBUILD_BASENAME".c", __func__);
- + break;
- + default:
- + dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
- + KBUILD_BASENAME".c", __func__, exception);
- + break;
- + }
- +}
- +
- +
- +static void macsec_secy_exception(handle_t _macsec_priv_s,
- + fm_macsec_secy_exception exception)
- +{
- + struct macsec_priv_s *priv;
- + priv = (struct macsec_priv_s *)_macsec_priv_s;
- +
- + switch (exception) {
- + case (SECY_EX_FRAME_DISCARDED):
- + dev_warn(priv->mac_dev->dev,
- + "%s:%s SECY_EX_FRAME_DISCARDED exception\n",
- + KBUILD_BASENAME".c", __func__);
- + break;
- + default:
- + dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
- + KBUILD_BASENAME".c", __func__, exception);
- + break;
- + }
- +}
- +
- +static void macsec_secy_events(handle_t _macsec_priv_s,
- + fm_macsec_secy_event event)
- +{
- + struct macsec_priv_s *priv;
- + priv = (struct macsec_priv_s *)_macsec_priv_s;
- +
- + switch (event) {
- + case (SECY_EV_NEXT_PN):
- + dev_dbg(priv->mac_dev->dev, "%s:%s SECY_EV_NEXT_PN event\n",
- + KBUILD_BASENAME".c", __func__);
- + break;
- + default:
- + dev_dbg(priv->mac_dev->dev, "%s:%s event %d\n",
- + KBUILD_BASENAME".c", __func__, event);
- + break;
- + }
- +}
- +
- +static struct qman_fq *macsec_get_tx_conf_queue(
- + const struct macsec_priv_s *macsec_priv,
- + struct qman_fq *tx_fq)
- +{
- + int i;
- +
- + for (i = 0; i < MACSEC_ETH_TX_QUEUES; i++)
- + if (macsec_priv->egress_fqs[i] == tx_fq)
- + return macsec_priv->conf_fqs[i];
- + return NULL;
- +}
- +
- +/* Initialize qman fqs. Still need to set context_a, specifically the bits
- + * that identify the secure channel.
- + */
- +static int macsec_fq_init(struct dpa_fq *dpa_fq)
- +{
- + struct qman_fq *fq;
- + struct device *dev;
- + struct qm_mcc_initfq initfq;
- + uint32_t sc_phys_id;
- + int _errno, macsec_id;
- +
- + dev = dpa_fq->net_dev->dev.parent;
- + macsec_id = dpa_fq->net_dev->ifindex - 1;
- +
- + if (dpa_fq->fqid == 0)
- + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
- +
- + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
- + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
- +
- + if (_errno) {
- + dev_err(dev, "qman_create_fq() failed\n");
- + return _errno;
- + }
- +
- + fq = &dpa_fq->fq_base;
- +
- + if (dpa_fq->init) {
- + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
- + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
- +
- + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
- +
- + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
- +
- + initfq.fqd.dest.channel = dpa_fq->channel;
- + initfq.fqd.dest.wq = dpa_fq->wq;
- +
- + if (dpa_fq->fq_type == FQ_TYPE_TX) {
- + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- +
- + /* Obtain the TX scId from fman */
- + _errno = fm_macsec_secy_get_txsc_phys_id(
- + macsec_priv[macsec_id]->fm_ms_secy,
- + &sc_phys_id);
- + if (unlikely(_errno < 0)) {
- + dev_err(dev, "fm_macsec_secy_get_txsc_phys_id = %d\n",
- + _errno);
- + return _errno;
- + }
- +
- + /* Write the TX SC-ID in the context of the FQ.
- + * A2V=1 (use the A2 field)
- + * A0V=1 (use the A0 field)
- + * OVOM=1
- + * MCV=1 (MACsec controlled frames)
- + * MACCMD=the TX scId
- + */
- + initfq.fqd.context_a.hi = 0x1a100000 |
- + sc_phys_id << 16;
- + initfq.fqd.context_a.lo = 0x80000000;
- + }
- +
- + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
- + if (_errno < 0) {
- + dev_err(dev, "qman_init_fq(%u) = %d\n",
- + qman_fq_fqid(fq), _errno);
- + qman_destroy_fq(fq, 0);
- + return _errno;
- + }
- + }
- +
- + dpa_fq->fqid = qman_fq_fqid(fq);
- +
- + return 0;
- +}
- +
- +/* Configure and enable secy. */
- +static int enable_secy(struct generic_msg *gen, int *macsec_id)
- +{
- + struct enable_secy *sec;
- + int _errno;
- + struct fm_macsec_secy_params secy_params;
- + struct dpa_fq *dpa_fq, *tmp;
- + struct macsec_priv_s *selected_macsec_priv;
- +
- + sec = &gen->payload.secy;
- +
- + if (sec->macsec_id < 0 || sec->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + _errno = -EINVAL;
- + goto _return;
- + }
- + *macsec_id = sec->macsec_id;
- + selected_macsec_priv = macsec_priv[sec->macsec_id];
- +
- + if (selected_macsec_priv->fm_ms_secy) {
- + pr_err("Secy has already been enabled\n");
- + return -EINVAL;
- + }
- +
- + memset(&secy_params, 0, sizeof(secy_params));
- + secy_params.fm_macsec_h = selected_macsec_priv->fm_macsec;
- + secy_params.num_receive_channels = NUM_OF_RX_SC;
- + secy_params.tx_sc_params.sci = sec->sci;
- +
- + /* Set encryption method */
- + secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_128;
- +#if (DPAA_VERSION >= 11)
- + secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_256;
- +#endif /* (DPAA_VERSION >= 11) */
- + secy_params.exception_f = macsec_secy_exception;
- + secy_params.event_f = macsec_secy_events;
- + secy_params.app_h = selected_macsec_priv;
- +
- + selected_macsec_priv->fm_ms_secy =
- + fm_macsec_secy_config(&secy_params);
- +
- + if (unlikely(selected_macsec_priv->fm_ms_secy == NULL)) {
- + _errno = -EINVAL;
- + goto _return;
- + }
- +
- + /* Configure the insertion mode */
- + if (sec->config_insertion_mode) {
- + _errno = fm_macsec_secy_config_sci_insertion_mode(
- + selected_macsec_priv->fm_ms_secy,
- + sec->sci_insertion_mode);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + /* Configure the frame protection */
- + if (sec->config_protect_frames) {
- + _errno = fm_macsec_secy_config_protect_frames(
- + selected_macsec_priv->fm_ms_secy,
- + sec->protect_frames);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + /* Configure the replay window */
- + if (sec->config_replay_window) {
- + _errno = fm_macsec_secy_config_replay_window(
- + selected_macsec_priv->fm_ms_secy,
- + sec->replay_protect,
- + sec->replay_window);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + /* Configure the validation mode */
- + if (sec->config_validation_mode) {
- + _errno = fm_macsec_secy_config_validation_mode(
- + selected_macsec_priv->fm_ms_secy,
- + sec->validate_frames);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + /* Select the exceptions that will be signaled */
- + if (sec->config_exception) {
- + _errno = fm_macsec_secy_config_exception(
- + selected_macsec_priv->fm_ms_secy,
- + sec->exception,
- + sec->enable_exception);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + /* Select the events that will be signaled */
- + if (sec->config_event) {
- + _errno = fm_macsec_secy_config_event(
- + selected_macsec_priv->fm_ms_secy,
- + sec->event,
- + sec->enable_event);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + /* Configure a point-to-point connection */
- + if (sec->config_point_to_point) {
- + _errno = fm_macsec_secy_config_point_to_point(
- + selected_macsec_priv->fm_ms_secy);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + /* Configure the connection's confidentiality state */
- + if (sec->config_confidentiality) {
- + _errno = fm_macsec_secy_config_confidentiality(
- + selected_macsec_priv->fm_ms_secy,
- + sec->confidentiality_enable,
- + sec->confidentiality_offset);
- + if (unlikely(_errno < 0))
- + goto _return;
- + }
- +
- + _errno = fm_macsec_secy_init(selected_macsec_priv->fm_ms_secy);
- + if (unlikely(_errno < 0))
- + goto _return_fm_macsec_secy_free;
- +
- + list_for_each_entry_safe(dpa_fq,
- + tmp,
- + &selected_macsec_priv->dpa_fq_list,
- + list) {
- + _errno = macsec_fq_init(dpa_fq);
- + if (_errno < 0)
- + goto _return;
- + }
- +
- + return 0;
- +
- +_return_fm_macsec_secy_free:
- + fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
- + selected_macsec_priv->fm_ms_secy = NULL;
- +_return:
- + return _errno;
- +}
- +
- +static int set_macsec_exception(struct generic_msg *gen)
- +{
- + struct set_exception *set_ex;
- + struct macsec_priv_s *selected_macsec_priv;
- + int rv;
- +
- + set_ex = &(gen->payload.set_ex);
- +
- + selected_macsec_priv = macsec_priv[set_ex->macsec_id];
- +
- + rv = fm_macsec_set_exception(selected_macsec_priv->fm_macsec,
- + set_ex->exception,
- + set_ex->enable_exception);
- + if (unlikely(rv < 0))
- + pr_err("error when setting the macsec exception mask\n");
- +
- + return rv;
- +}
- +
- +static int create_tx_sa(struct generic_msg *gen)
- +{
- + struct create_tx_sa *c_tx_sa;
- + macsec_sa_key_t sa_key;
- + int rv;
- + struct macsec_priv_s *selected_macsec_priv;
- +
- + c_tx_sa = &(gen->payload.c_tx_sa);
- +
- + if (c_tx_sa->macsec_id < 0 ||
- + c_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + kfree(c_tx_sa);
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[c_tx_sa->macsec_id];
- +
- + /* set macsec_priv field */
- + selected_macsec_priv->an = c_tx_sa->an;
- +
- + /* because of the algorithms used */
- + if (unlikely(c_tx_sa->sak_len > 32)) {
- + pr_warn("size of secure key is greater than 32 bytes!\n");
- + kfree(c_tx_sa);
- + return -EINVAL;
- + }
- +
- + rv = copy_from_user(&sa_key,
- + c_tx_sa->sak,
- + c_tx_sa->sak_len);
- + if (unlikely(rv != 0)) {
- + pr_err("copy_from_user could not copy %i bytes\n", rv);
- + return -EFAULT;
- + }
- +
- + rv = fm_macsec_secy_create_tx_sa(selected_macsec_priv->fm_ms_secy,
- + c_tx_sa->an,
- + sa_key);
- + if (unlikely(rv < 0))
- + pr_err("error when creating tx sa\n");
- +
- + return rv;
- +}
- +
- +static int modify_tx_sa_key(struct generic_msg *gen)
- +{
- + struct modify_tx_sa_key *tx_sa_key;
- + struct macsec_priv_s *selected_macsec_priv;
- + macsec_sa_key_t sa_key;
- + int rv;
- +
- + tx_sa_key = &(gen->payload.modify_tx_sa_key);
- +
- + if (tx_sa_key->macsec_id < 0 ||
- + tx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
- + return -EINVAL;
- + selected_macsec_priv = macsec_priv[tx_sa_key->macsec_id];
- +
- + /* set macsec_priv field */
- + selected_macsec_priv->an = tx_sa_key->an;
- +
- + if (unlikely(tx_sa_key->sak_len > 32)) {
- + pr_warn("size of secure key is greater than 32 bytes!\n");
- + kfree(tx_sa_key);
- + return -EINVAL;
- + }
- +
- + rv = copy_from_user(&sa_key,
- + tx_sa_key->sak,
- + tx_sa_key->sak_len);
- + if (unlikely(rv != 0)) {
- + pr_err("copy_from_user could not copy %i bytes\n", rv);
- + return -EFAULT;
- + }
- +
- + rv = fm_macsec_secy_txsa_modify_key(selected_macsec_priv->fm_ms_secy,
- + tx_sa_key->an,
- + sa_key);
- + if (unlikely(rv < 0))
- + pr_err("error while modifying the tx sa key\n");
- +
- + return rv;
- +}
- +
- +static int activate_tx_sa(struct generic_msg *gen)
- +{
- + struct activate_tx_sa *a_tx_sa;
- + struct macsec_priv_s *selected_macsec_priv;
- + int rv;
- +
- + a_tx_sa = &(gen->payload.a_tx_sa);
- +
- + if (a_tx_sa->macsec_id < 0 ||
- + a_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + kfree(a_tx_sa);
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[a_tx_sa->macsec_id];
- +
- + rv = fm_macsec_secy_txsa_set_active(selected_macsec_priv->fm_ms_secy,
- + a_tx_sa->an);
- + if (unlikely(rv < 0))
- + pr_err("error when creating tx sa\n");
- +
- + return rv;
- +}
- +
- +static int get_tx_sa_an(struct generic_msg *gen, macsec_an_t *an)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- +
- + if (gen->payload.macsec_id < 0 ||
- + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS)
- + return -EINVAL;
- +
- + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
- +
- + fm_macsec_secy_txsa_get_active(selected_macsec_priv->fm_ms_secy, an);
- +
- + return 0;
- +}
- +
- +static int create_rx_sc(struct generic_msg *gen)
- +{
- + struct fm_macsec_secy_sc_params params;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *rx_sc_dev;
- + uint32_t sc_phys_id;
- + int i;
- +
- + if (gen->payload.c_rx_sc.macsec_id < 0 ||
- + gen->payload.c_rx_sc.macsec_id >= FM_MAX_NUM_OF_MACS)
- + return -EINVAL;
- + selected_macsec_priv = macsec_priv[gen->payload.c_rx_sc.macsec_id];
- +
- + for (i = 0; i < NUM_OF_RX_SC; i++)
- + if (!selected_macsec_priv->rx_sc_dev[i])
- + break;
- + if (i == NUM_OF_RX_SC) {
- + pr_err("number of maximum RX_SC's has been reached\n");
- + return -EINVAL;
- + }
- +
- + params.sci = gen->payload.c_rx_sc.sci;
- + params.cipher_suite = SECY_GCM_AES_128;
- +#if (DPAA_VERSION >= 11)
- + params.cipher_suite = SECY_GCM_AES_256;
- +#endif /* (DPAA_VERSION >= 11) */
- +
- + rx_sc_dev = fm_macsec_secy_create_rxsc(selected_macsec_priv->fm_ms_secy,
- + ¶ms);
- +
- + fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
- + rx_sc_dev,
- + &sc_phys_id);
- +
- + selected_macsec_priv->rx_sc_dev[sc_phys_id] = rx_sc_dev;
- +
- + return sc_phys_id;
- +}
- +
- +static int create_rx_sa(struct generic_msg *gen)
- +{
- + struct create_rx_sa *c_rx_sa;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + macsec_sa_key_t sak;
- + int rv;
- +
- + c_rx_sa = &(gen->payload.c_rx_sa);
- +
- + if (unlikely(c_rx_sa->sak_len > 32)) {
- + pr_warn("size of secure key is greater than 32 bytes!\n");
- + return -EINVAL;
- + }
- + rv = copy_from_user(&sak,
- + c_rx_sa->sak,
- + c_rx_sa->sak_len);
- + if (unlikely(rv != 0)) {
- + pr_err("copy_from_user could not copy %i bytes\n", rv);
- + return -EFAULT;
- + }
- +
- + if (c_rx_sa->macsec_id < 0 ||
- + c_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS)
- + return -EINVAL;
- +
- + selected_macsec_priv = macsec_priv[c_rx_sa->macsec_id];
- +
- + if (c_rx_sa->rx_sc_id < 0 || c_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- +
- + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[c_rx_sa->rx_sc_id];
- +
- + rv = fm_macsec_secy_create_rx_sa(selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + c_rx_sa->an,
- + c_rx_sa->lpn,
- + sak);
- + if (unlikely(rv < 0)) {
- + pr_err("fm_macsec_secy_create_rx_sa failed\n");
- + return -EBUSY;
- + }
- +
- + return 0;
- +}
- +
- +static int modify_rx_sa_key(struct generic_msg *gen)
- +{
- + struct modify_rx_sa_key *rx_sa_key;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc;
- + macsec_sa_key_t sa_key;
- + int rv;
- +
- + rx_sa_key = &(gen->payload.modify_rx_sa_key);
- +
- + if (rx_sa_key->macsec_id < 0 ||
- + rx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
- + return -EINVAL;
- + selected_macsec_priv = macsec_priv[rx_sa_key->macsec_id];
- +
- + if (rx_sa_key->rx_sc_id < 0 || rx_sa_key->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- + selected_rx_sc = selected_macsec_priv->rx_sc_dev[rx_sa_key->rx_sc_id];
- +
- + /* set macsec_priv field */
- + selected_macsec_priv->an = rx_sa_key->an;
- +
- + if (unlikely(rx_sa_key->sak_len > 32)) {
- + pr_warn("size of secure key is greater than 32 bytes!\n");
- + kfree(rx_sa_key);
- + return -EINVAL;
- + }
- +
- + rv = copy_from_user(&sa_key,
- + rx_sa_key->sak,
- + rx_sa_key->sak_len);
- + if (unlikely(rv != 0)) {
- + pr_err("copy_from_user could not copy %i bytes\n", rv);
- + return -EFAULT;
- + }
- +
- + rv = fm_macsec_secy_rxsa_modify_key(selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc,
- + rx_sa_key->an,
- + sa_key);
- + if (unlikely(rv < 0))
- + pr_err("error while modifying the rx sa key\n");
- +
- + return rv;
- +}
- +
- +static int update_npn(struct generic_msg *gen)
- +{
- + struct update_npn *update_npn;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err;
- +
- + update_npn = &(gen->payload.update_npn);
- +
- + if (update_npn->macsec_id < 0 ||
- + update_npn->macsec_id >= FM_MAX_NUM_OF_MACS)
- + return -EINVAL;
- + selected_macsec_priv = macsec_priv[update_npn->macsec_id];
- +
- + if (update_npn->rx_sc_id < 0 || update_npn->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- +
- + selected_rx_sc_dev =
- + selected_macsec_priv->rx_sc_dev[update_npn->rx_sc_id];
- +
- + err = fm_macsec_secy_rxsa_update_next_pn(
- + selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + update_npn->an,
- + update_npn->pn);
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_rxsa_update_next_pn failed\n");
- + return -EBUSY;
- + }
- +
- + return 0;
- +}
- +
- +static int update_lpn(struct generic_msg *gen)
- +{
- + struct update_lpn *update_lpn;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err;
- +
- + update_lpn = &(gen->payload.update_lpn);
- +
- + if (update_lpn->macsec_id < 0 ||
- + update_lpn->macsec_id >= FM_MAX_NUM_OF_MACS)
- + return -EINVAL;
- + selected_macsec_priv = macsec_priv[update_lpn->macsec_id];
- +
- + if (update_lpn->rx_sc_id < 0 || update_lpn->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- + selected_rx_sc_dev =
- + selected_macsec_priv->rx_sc_dev[update_lpn->rx_sc_id];
- +
- + err = fm_macsec_secy_rxsa_update_lowest_pn(
- + selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + update_lpn->an,
- + update_lpn->pn);
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_rxsa_update_lowest_pn failed\n");
- + return -EBUSY;
- + }
- +
- + return 0;
- +}
- +
- +static int activate_rx_sa(struct generic_msg *gen)
- +{
- + struct activate_rx_sa *a_rx_sa;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err;
- +
- + a_rx_sa = &(gen->payload.a_rx_sa);
- +
- + if (a_rx_sa->macsec_id < 0 ||
- + a_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[a_rx_sa->macsec_id];
- +
- + if (a_rx_sa->rx_sc_id < 0 || a_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[a_rx_sa->rx_sc_id];
- +
- + err = fm_macsec_secy_rxsa_enable_receive(
- + selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + a_rx_sa->an);
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_rxsa_enable_receive failed\n");
- + return -EBUSY;
- + }
- +
- + return 0;
- +}
- +
- +static int get_tx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- + int err;
- +
- + if (gen->payload.macsec_id < 0 ||
- + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
- +
- + err = fm_macsec_secy_get_txsc_phys_id(selected_macsec_priv->fm_ms_secy,
- + sc_id);
- +
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_get_txsc_phys_id failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int get_rx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
- +{
- + struct get_rx_sc_id *get_rx_sc_id;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err;
- +
- + get_rx_sc_id = &(gen->payload.get_rx_sc_id);
- +
- + if (get_rx_sc_id->macsec_id < 0 ||
- + get_rx_sc_id->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[get_rx_sc_id->macsec_id];
- +
- + if (get_rx_sc_id->rx_sc_id < 0 ||
- + get_rx_sc_id->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- + selected_rx_sc_dev =
- + selected_macsec_priv->rx_sc_dev[get_rx_sc_id->rx_sc_id];
- +
- + err = fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + sc_id);
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_get_rxsc_phys_id failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int get_macsec_revision(struct generic_msg *gen, int *macsec_revision)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- + int err;
- +
- + if (gen->payload.macsec_id < 0 ||
- + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
- +
- + err = fm_macsec_get_revision(selected_macsec_priv->fm_macsec,
- + macsec_revision);
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_get_revision failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int rx_sa_disable(struct generic_msg *gen)
- +{
- + struct disable_rx_sa *disable_rx_sa;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err;
- +
- + disable_rx_sa = &(gen->payload.d_rx_sa);
- +
- + if (disable_rx_sa->macsec_id < 0 ||
- + disable_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[disable_rx_sa->macsec_id];
- +
- + if (disable_rx_sa->rx_sc_id < 0 ||
- + disable_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- + selected_rx_sc_dev =
- + selected_macsec_priv->rx_sc_dev[disable_rx_sa->rx_sc_id];
- +
- + err = fm_macsec_secy_rxsa_disable_receive(
- + selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + selected_macsec_priv->an);
- +
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_rxsa_disable_receive failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int rx_sa_delete(struct generic_msg *gen)
- +{
- + struct delete_rx_sa *delete_rx_sa;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err;
- +
- + delete_rx_sa = &(gen->payload.del_rx_sa);
- +
- + if (delete_rx_sa->macsec_id < 0 ||
- + delete_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[delete_rx_sa->macsec_id];
- +
- + if (delete_rx_sa->rx_sc_id < 0 ||
- + delete_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- + selected_rx_sc_dev =
- + selected_macsec_priv->rx_sc_dev[delete_rx_sa->rx_sc_id];
- +
- + err = fm_macsec_secy_delete_rx_sa(selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + selected_macsec_priv->an);
- +
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_delete_rx_sa failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int rx_sc_delete(struct generic_msg *gen)
- +{
- + struct delete_rx_sc *delete_rx_sc;
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err;
- +
- + delete_rx_sc = &(gen->payload.del_rx_sc);
- +
- + if (delete_rx_sc->macsec_id < 0 ||
- + delete_rx_sc->macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[delete_rx_sc->macsec_id];
- +
- + if (delete_rx_sc->rx_sc_id < 0 ||
- + delete_rx_sc->rx_sc_id >= NUM_OF_RX_SC)
- + return -EINVAL;
- + selected_rx_sc_dev =
- + selected_macsec_priv->rx_sc_dev[delete_rx_sc->rx_sc_id];
- +
- + err = fm_macsec_secy_delete_rxsc(selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev);
- +
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_delete_rxsc failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int tx_sa_delete(struct generic_msg *gen)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- + int err;
- +
- + if (gen->payload.del_tx_sa.macsec_id < 0 ||
- + gen->payload.del_tx_sa.macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[gen->payload.del_tx_sa.macsec_id];
- +
- + err = fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
- + selected_macsec_priv->an);
- +
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_delete_tx_sa failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int disable_secy(struct generic_msg *gen, int *macsec_id)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- + int err;
- +
- + if (gen->payload.macsec_id < 0 ||
- + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
- + *macsec_id = gen->payload.macsec_id;
- +
- + err = fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
- + selected_macsec_priv->fm_ms_secy = NULL;
- +
- + if (unlikely(err < 0)) {
- + pr_err("fm_macsec_secy_free failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static int disable_macsec(struct generic_msg *gen, int *macsec_id)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- + int err;
- +
- + if (gen->payload.macsec_id < 0 ||
- + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- +
- + selected_macsec_priv =
- + macsec_priv[gen->payload.macsec_id];
- + *macsec_id = gen->payload.macsec_id;
- +
- + err = fm_macsec_disable(selected_macsec_priv->fm_macsec);
- + err += fm_macsec_free(selected_macsec_priv->fm_macsec);
- + selected_macsec_priv->fm_macsec = NULL;
- +
- + if (unlikely(err < 0)) {
- + pr_err("macsec disable failed\n");
- + return err;
- + }
- +
- + return 0;
- +
- +}
- +
- +static int disable_all(struct generic_msg *gen, int *macsec_id)
- +{
- + struct macsec_priv_s *selected_macsec_priv;
- + struct rx_sc_dev *selected_rx_sc_dev;
- + int err = 0, i;
- +
- + if (gen->payload.macsec_id < 0 ||
- + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
- + return -EINVAL;
- + }
- +
- + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
- + *macsec_id = gen->payload.macsec_id;
- +
- + for (i = 0; i < NUM_OF_RX_SC; i++) {
- + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[i];
- +
- + if (!selected_rx_sc_dev)
- + continue;
- +
- + err += fm_macsec_secy_rxsa_disable_receive(
- + selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + selected_macsec_priv->an);
- +
- + err += fm_macsec_secy_delete_rx_sa(
- + selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev,
- + selected_macsec_priv->an);
- +
- + err += fm_macsec_secy_delete_rxsc(
- + selected_macsec_priv->fm_ms_secy,
- + selected_rx_sc_dev);
- + }
- +
- + err += fm_macsec_secy_delete_tx_sa(
- + selected_macsec_priv->fm_ms_secy,
- + selected_macsec_priv->an);
- +
- + err += fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
- + selected_macsec_priv->fm_ms_secy = NULL;
- +
- + err += fm_macsec_disable(selected_macsec_priv->fm_macsec);
- +
- + err += fm_macsec_free(selected_macsec_priv->fm_macsec);
- + selected_macsec_priv->fm_macsec = NULL;
- +
- + if (unlikely(err < 0)) {
- + pr_err("macsec disable failed\n");
- + return err;
- + }
- +
- + return 0;
- +}
- +
- +static inline void macsec_setup_ingress(struct macsec_priv_s *macsec_priv,
- + struct dpa_fq *fq,
- + const struct qman_fq *template)
- +{
- + fq->fq_base = *template;
- + fq->net_dev = macsec_priv->net_dev;
- +
- + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
- + fq->channel = macsec_priv->channel;
- +}
- +
- +static inline void macsec_setup_egress(struct macsec_priv_s *macsec_priv,
- + struct dpa_fq *fq,
- + struct fm_port *port,
- + const struct qman_fq *template)
- +{
- + fq->fq_base = *template;
- + fq->net_dev = macsec_priv->net_dev;
- +
- + if (port) {
- + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
- + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
- + } else {
- + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
- + }
- +}
- +
- +/* At the moment, we don't create recycle queues. */
- +static void macsec_fq_setup(struct macsec_priv_s *macsec_priv,
- + const struct dpa_fq_cbs_t *fq_cbs,
- + struct fm_port *tx_port)
- +{
- + struct dpa_fq *fq;
- + int egress_cnt = 0, conf_cnt = 0;
- +
- + /* Initialize each FQ in the list */
- + list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
- + switch (fq->fq_type) {
- + /* Normal TX queues */
- + case FQ_TYPE_TX:
- + macsec_setup_egress(macsec_priv, fq, tx_port,
- + &fq_cbs->egress_ern);
- + /* If we have more Tx queues than the number of cores,
- + * just ignore the extra ones.
- + */
- + if (egress_cnt < MACSEC_ETH_TX_QUEUES)
- + macsec_priv->egress_fqs[egress_cnt++] =
- + &fq->fq_base;
- + break;
- + case FQ_TYPE_TX_CONFIRM:
- + BUG_ON(!macsec_priv->mac_dev);
- + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
- + break;
- + /* TX confirm multiple queues */
- + case FQ_TYPE_TX_CONF_MQ:
- + BUG_ON(!macsec_priv->mac_dev);
- + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
- + macsec_priv->conf_fqs[conf_cnt++] = &fq->fq_base;
- + break;
- + case FQ_TYPE_TX_ERROR:
- + BUG_ON(!macsec_priv->mac_dev);
- + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_errq);
- + break;
- + default:
- + dev_warn(macsec_priv->net_dev->dev.parent,
- + "Unknown FQ type detected!\n");
- + break;
- + }
- + }
- +
- + /* The number of Tx queues may be smaller than the number of cores, if
- + * the Tx queue range is specified in the device tree instead of being
- + * dynamically allocated.
- + * Make sure all CPUs receive a corresponding Tx queue.
- + */
- + while (egress_cnt < MACSEC_ETH_TX_QUEUES) {
- + list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
- + if (fq->fq_type != FQ_TYPE_TX)
- + continue;
- + macsec_priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- + if (egress_cnt == MACSEC_ETH_TX_QUEUES)
- + break;
- + }
- + }
- +
- +}
- +
- +static const struct fqid_cell tx_fqids[] = {
- + {0, MACSEC_ETH_TX_QUEUES}
- +};
- +
- +static const struct fqid_cell tx_confirm_fqids[] = {
- + {0, MACSEC_ETH_TX_QUEUES}
- +};
- +
- +/* Allocate percpu priv. This is used to keep track of rx and tx packets on
- + * each cpu (take into consideration that the number of queues is equal to the
- + * number of cpus, so there is one queue/cpu).
- + */
- +static void alloc_priv(struct macsec_percpu_priv_s *percpu_priv,
- + struct macsec_priv_s *macsec_priv, struct device *dev)
- +{
- + int i, err;
- +
- + macsec_priv->percpu_priv = alloc_percpu(*macsec_priv->percpu_priv);
- +
- + if (unlikely(macsec_priv->percpu_priv == NULL)) {
- + dev_err(dev, "alloc_percpu() failed\n");
- + err = -ENOMEM;
- + dpa_fq_free(dev, &macsec_priv->dpa_fq_list);
- + }
- +
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(macsec_priv->percpu_priv, i);
- + memset(percpu_priv, 0, sizeof(*percpu_priv));
- + }
- +
- +}
- +
- +/* On RX, we only need to retain the information about frames, if they were
- + * encrypted or not. Statistics regarding this will be printed in a log file.
- + */
- +static int macsec_rx_hook(void *ptr, struct net_device *net_dev, u32 fqid)
- +{
- +
- + struct qm_fd *rx_fd = (struct qm_fd *)ptr;
- + struct macsec_percpu_priv_s *percpu_priv_m;
- + struct macsec_priv_s *selected_macsec_priv;
- +
- + selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
- +
- + percpu_priv_m = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
- +
- + if ((rx_fd->status & FM_FD_STAT_RX_MACSEC) != 0) {
- + if (netif_msg_hw(selected_macsec_priv) && net_ratelimit())
- + netdev_warn(net_dev, "FD status = 0x%u\n",
- + rx_fd->status & FM_FD_STAT_RX_MACSEC);
- + percpu_priv_m->rx_macsec++;
- + }
- +
- + return DPAA_ETH_CONTINUE;
- +}
- +
- +/* Split TX traffic. If encryption enabled, send packets on specific QMAN frame
- + * queues. Other way, let them be handled by dpa eth. Also, keep track of the
- + * number of packets that are walking away through "macsec" queues.
- + */
- +static enum dpaa_eth_hook_result macsec_tx_hook(struct sk_buff *skb,
- + struct net_device *net_dev)
- +{
- + struct dpa_priv_s *dpa_priv;
- + struct qm_fd fd;
- + struct macsec_percpu_priv_s *macsec_percpu_priv;
- + struct dpa_percpu_priv_s *dpa_percpu_priv;
- + int i, err = 0;
- + int *countptr, offset = 0;
- + const bool nonlinear = skb_is_nonlinear(skb);
- + struct qman_fq *egress_fq;
- + struct macsec_priv_s *selected_macsec_priv;
- +
- + selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
- +
- + if (!selected_macsec_priv->net_dev ||
- + (selected_macsec_priv->en_state != SECY_ENABLED) ||
- + (ntohs(skb->protocol) == ETH_P_PAE))
- + return DPAA_ETH_CONTINUE;
- +
- + dpa_priv = netdev_priv(net_dev);
- + /* Non-migratable context, safe to use raw_cpu_ptr */
- + macsec_percpu_priv = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
- + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
- +
- + countptr = raw_cpu_ptr(dpa_priv->dpa_bp->percpu_count);
- +
- + clear_fd(&fd);
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- + if (dpa_priv->tsu && dpa_priv->tsu->valid &&
- + dpa_priv->tsu->hwts_tx_en_ioctl)
- + fd.cmd |= FM_FD_CMD_UPD;
- +#endif
- +#ifdef CONFIG_FSL_DPAA_TS
- + if (unlikely(dpa_priv->ts_tx_en &&
- + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- + fd.cmd |= FM_FD_CMD_UPD;
- + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
- + * we don't feed FMan with more fragments than it supports.
- + * Btw, we're using the first sgt entry to store the linear part of
- + * the skb, so we're one extra frag short.
- + */
- + if (nonlinear &&
- + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
- + /* Just create a S/G fd based on the skb */
- + err = skb_to_sg_fd(dpa_priv, skb, &fd);
- + dpa_percpu_priv->tx_frag_skbuffs++;
- + } else {
- + /* Make sure we have enough headroom to accommodate private
- + * data, parse results, etc. Normally this shouldn't happen if
- + * we're here via the standard kernel stack.
- + */
- + if (unlikely(skb_headroom(skb) < dpa_priv->tx_headroom)) {
- + struct sk_buff *skb_new;
- +
- + skb_new = skb_realloc_headroom(skb,
- + dpa_priv->tx_headroom);
- + if (unlikely(!skb_new)) {
- + dev_kfree_skb(skb);
- + dpa_percpu_priv->stats.tx_errors++;
- + return DPAA_ETH_STOLEN;
- + }
- + dev_kfree_skb(skb);
- + skb = skb_new;
- + }
- +
- + /* We're going to store the skb backpointer at the beginning
- + * of the data buffer, so we need a privately owned skb
- + */
- +
- + /* Code borrowed from skb_unshare(). */
- + if (skb_cloned(skb)) {
- + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
- + kfree_skb(skb);
- + skb = nskb;
- + /* skb_copy() has now linearized the skbuff. */
- + } else if (unlikely(nonlinear)) {
- + /* We are here because the egress skb contains
- + * more fragments than we support. In this case,
- + * we have no choice but to linearize it ourselves.
- + */
- + err = __skb_linearize(skb);
- + }
- + if (unlikely(!skb || err < 0)) {
- + /* Common out-of-memory error path */
- + goto enomem;
- + }
- +
- + /* Finally, create a contig FD from this skb */
- + err = skb_to_contig_fd(dpa_priv, skb, &fd, countptr, &offset);
- + }
- + if (unlikely(err < 0))
- + goto skb_to_fd_failed;
- +
- + if (fd.bpid != 0xff) {
- + skb_recycle(skb);
- + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
- + * but we need the skb to look as if returned by build_skb().
- + * We need to manually adjust the tailptr as well.
- + */
- + skb->data = skb->head + offset;
- + skb_reset_tail_pointer(skb);
- +
- + (*countptr)++;
- + dpa_percpu_priv->tx_returned++;
- + }
- +
- + egress_fq = selected_macsec_priv->egress_fqs[smp_processor_id()];
- + if (fd.bpid == 0xff)
- + fd.cmd |= qman_fq_fqid(macsec_get_tx_conf_queue(
- + selected_macsec_priv,
- + egress_fq));
- +
- + for (i = 0; i < 100000; i++) {
- + err = qman_enqueue(egress_fq, &fd, 0);
- + if (err != -EBUSY)
- + break;
- + }
- +
- + if (unlikely(err < 0)) {
- + dpa_percpu_priv->stats.tx_errors++;
- + dpa_percpu_priv->stats.tx_fifo_errors++;
- + goto xmit_failed;
- + }
- +
- + macsec_percpu_priv->tx_macsec++;
- + dpa_percpu_priv->stats.tx_packets++;
- + dpa_percpu_priv->stats.tx_bytes += dpa_fd_length(&fd);
- +
- + net_dev->trans_start = jiffies;
- + return DPAA_ETH_STOLEN;
- +
- +xmit_failed:
- + if (fd.bpid != 0xff) {
- + (*countptr)--;
- + dpa_percpu_priv->tx_returned--;
- + dpa_fd_release(net_dev, &fd);
- + dpa_percpu_priv->stats.tx_errors++;
- + return DPAA_ETH_STOLEN;
- + }
- + _dpa_cleanup_tx_fd(dpa_priv, &fd);
- +skb_to_fd_failed:
- +enomem:
- + dpa_percpu_priv->stats.tx_errors++;
- + dev_kfree_skb(skb);
- + return DPAA_ETH_STOLEN;
- +}
- +
- +/* Allocate and initialize macsec priv and fqs. Also, create debugfs entry for
- + * a spcific interface. Iterate thourgh existing devices in order to find the
- + * one we want to have macsec for.
- + */
- +static int macsec_setup(void)
- +{
- + struct net_device *net_dev;
- + struct macsec_percpu_priv_s *percpu_priv = NULL;
- + struct dpa_priv_s *dpa_priv = NULL;
- + struct dpa_fq *dpa_fq;
- + struct device *dev = NULL;
- + int err, i, j, macsec_id;
- +
- + pr_debug("Entering: %s\n", __func__);
- +
- + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
- + macsec_priv[i] = kzalloc(sizeof(*(macsec_priv[i])), GFP_KERNEL);
- +
- + if (unlikely(macsec_priv[i] == NULL)) {
- + int j;
- + for (j = 0; j < i; j++)
- + kfree(macsec_priv[j]);
- + pr_err("could not allocate\n");
- + return -ENOMEM;
- + }
- + }
- +
- + for (i = 0; i < macsec_ifs_cnt; i++) {
- + net_dev = first_net_device(&init_net);
- + macsec_id = net_dev->ifindex - 1;
- + while (net_dev) {
- + macsec_id = net_dev->ifindex - 1;
- +
- + /* to maintain code readability and less than
- + * 80 characters per line
- + */
- + if (strcmp(net_dev->name, macsec_ifs[i]) != 0) {
- + net_dev = next_net_device(net_dev);
- + continue;
- + }
- +
- + /* strcmp(net_dev->name, macsec_ifs[i]) == 0 */
- + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
- + macsec_priv[macsec_id]->net_dev = net_dev;
- + dpa_priv = netdev_priv(net_dev);
- + macsec_priv[macsec_id]->mac_dev = dpa_priv->mac_dev;
- + macsec_priv[macsec_id]->channel = dpa_priv->channel;
- + dev = net_dev->dev.parent;
- +
- + INIT_LIST_HEAD(&macsec_priv[macsec_id]->dpa_fq_list);
- +
- + dpa_fq = dpa_fq_alloc(dev,
- + tx_fqids->start, tx_fqids->count,
- + &macsec_priv[macsec_id]->dpa_fq_list,
- + FQ_TYPE_TX);
- + if (unlikely(dpa_fq == NULL)) {
- + dev_err(dev, "dpa_fq_alloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + dpa_fq = dpa_fq_alloc(dev,
- + tx_confirm_fqids->start,
- + tx_confirm_fqids->count,
- + &macsec_priv[macsec_id]->dpa_fq_list,
- + FQ_TYPE_TX_CONF_MQ);
- + if (unlikely(dpa_fq == NULL)) {
- + dev_err(dev, "dpa_fq_alloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + macsec_fq_setup(macsec_priv[macsec_id], &private_fq_cbs,
- + macsec_priv[macsec_id]->mac_dev->port_dev[TX]);
- +
- + alloc_priv(percpu_priv, macsec_priv[macsec_id], dev);
- +
- + break;
- + }
- + if (macsec_priv[macsec_id]->net_dev == NULL) {
- + pr_err("Interface unknown\n");
- + err = -EINVAL;
- + goto _error;
- + }
- +
- + /* setup specific ethtool ops for macsec */
- + macsec_setup_ethtool_ops(net_dev);
- + }
- + return 0;
- +
- +_error:
- + for (j = 0; j < i; i++) {
- + net_dev = first_net_device(&init_net);
- + while (net_dev) {
- + macsec_id = net_dev->ifindex - 1;
- + if (strcmp(net_dev->name, macsec_ifs[j]) != 0) {
- + net_dev = next_net_device(net_dev);
- + continue;
- + }
- + dpa_fq_free(net_dev->dev.parent,
- + &macsec_priv[macsec_id]->dpa_fq_list);
- + break;
- + }
- + macsec_restore_ethtool_ops(macsec_priv[j]->net_dev);
- + kfree(macsec_priv[j]);
- + }
- + for (j = i; j < FM_MAX_NUM_OF_MACS; j++)
- + kfree(macsec_priv[j]);
- + return err;
- +}
- +
- +static int enable_macsec(struct generic_msg *gen)
- +{
- + struct fm_macsec_params macsec_params;
- + int rv, macsec_id;
- + void __iomem *mac_dev_base_addr;
- + uintptr_t macsec_reg_addr;
- + struct macsec_data *mdata;
- + char if_name[IFNAMSIZ];
- + struct macsec_priv_s *selected_macsec_priv;
- +
- + mdata = &gen->payload.en_macsec;
- +
- + if (unlikely(mdata->if_name_length > IFNAMSIZ)) {
- + pr_err("interface name too long\n");
- + return -EINVAL;
- + }
- +
- + rv = copy_from_user(if_name, mdata->if_name, mdata->if_name_length);
- + if (unlikely(rv != 0)) {
- + pr_err("copy_from_user could not copy %i bytes\n", rv);
- + return -EFAULT;
- + }
- +
- + macsec_id = ifname_to_id(if_name);
- + if (macsec_id < 0 || macsec_id >= FM_MAX_NUM_OF_MACS) {
- + pr_err("error on converting to macsec_id\n");
- + return -ENXIO;
- + }
- +
- + selected_macsec_priv = macsec_priv[macsec_id];
- +
- + if (selected_macsec_priv->fm_macsec) {
- + pr_err("macsec has already been configured\n");
- + return -EINVAL;
- + }
- +
- + mac_dev_base_addr = selected_macsec_priv->mac_dev->vaddr;
- +
- + macsec_reg_addr = (uintptr_t)(mac_dev_base_addr + MACSEC_REG_OFFSET);
- +
- + memset(&macsec_params, 0, sizeof(macsec_params));
- + macsec_params.fm_h = (handle_t)selected_macsec_priv->mac_dev->fm;
- + macsec_params.guest_mode = FALSE;
- + /* The MACsec offset relative to the memory mapped MAC device */
- + macsec_params.non_guest_params.base_addr = macsec_reg_addr;
- + macsec_params.non_guest_params.fm_mac_h =
- + (handle_t)selected_macsec_priv->mac_dev->get_mac_handle(
- + selected_macsec_priv->mac_dev);
- + macsec_params.non_guest_params.exception_f = macsec_exception;
- + macsec_params.non_guest_params.app_h = selected_macsec_priv->mac_dev;
- +
- + selected_macsec_priv->fm_macsec = fm_macsec_config(&macsec_params);
- + if (unlikely(selected_macsec_priv->fm_macsec == NULL))
- + return -EINVAL;
- +
- + if (mdata->config_unknown_sci_treatment) {
- + rv = fm_macsec_config_unknown_sci_frame_treatment(
- + selected_macsec_priv->fm_macsec,
- + mdata->unknown_sci_treatment);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + if (mdata->config_invalid_tag_treatment) {
- + rv = fm_macsec_config_invalid_tags_frame_treatment(
- + selected_macsec_priv->fm_macsec,
- + mdata->deliver_uncontrolled);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + if (mdata->config_kay_frame_treatment) {
- + rv = fm_macsec_config_kay_frame_treatment(
- + selected_macsec_priv->fm_macsec,
- + mdata->discard_uncontrolled);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + if (mdata->config_untag_treatment) {
- + rv = fm_macsec_config_untag_frame_treatment(
- + selected_macsec_priv->fm_macsec,
- + mdata->untag_treatment);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + if (mdata->config_pn_exhaustion_threshold) {
- + rv = fm_macsec_config_pn_exhaustion_threshold(
- + selected_macsec_priv->fm_macsec,
- + mdata->pn_threshold);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + if (mdata->config_keys_unreadable) {
- + rv = fm_macsec_config_keys_unreadable(
- + selected_macsec_priv->fm_macsec);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + if (mdata->config_sectag_without_sci) {
- + rv = fm_macsec_config_sectag_without_sci(
- + selected_macsec_priv->fm_macsec);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + if (mdata->config_exception) {
- + rv = fm_macsec_config_exception(selected_macsec_priv->fm_macsec,
- + mdata->exception,
- + mdata->enable_exception);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- + }
- +
- + rv = fm_macsec_init(selected_macsec_priv->fm_macsec);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- +
- + rv = fm_macsec_enable(selected_macsec_priv->fm_macsec);
- + if (unlikely(rv < 0))
- + goto _return_fm_macsec_free;
- +
- + return macsec_id;
- +
- +_return_fm_macsec_free:
- + fm_macsec_free(selected_macsec_priv->fm_macsec);
- + selected_macsec_priv->fm_macsec = NULL;
- + return rv;
- +}
- +
- +static int send_result(struct nlmsghdr *nlh, int pid, int result)
- +{
- + int res;
- + struct sk_buff *skb_out;
- + size_t msg_size = sizeof(result);
- +
- + skb_out = nlmsg_new(msg_size, 0);
- + if (unlikely(!skb_out)) {
- + pr_err("Failed to allocate new skb\n");
- + goto _ret_err;
- + }
- +
- + nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
- + if (unlikely(!nlh)) {
- + pr_err("Failed to send\n");
- + goto _ret_err;
- + }
- +
- + NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
- + memcpy(nlmsg_data(nlh), &result, msg_size);
- +
- + res = nlmsg_unicast(nl_sk, skb_out, pid);
- + if (unlikely(res < 0)) {
- + pr_err("Error while sending back to user\n");
- + goto _ret_err;
- + }
- +
- + return 0;
- +
- +_ret_err:
- + return -1;
- +}
- +
- +/* Kernel communicates with user space through netlink sockets. This function
- + * implements the responses of the kernel. The generic struct is used for
- + * easier handling of the code, which otherwise would have been duplicated.
- + */
- +static void switch_messages(struct sk_buff *skb)
- +{
- + struct nlmsghdr *nlh;
- + int pid, rv;
- + enum msg_type cmd;
- +
- + struct dpa_fq *dpa_fq, *tmp;
- + struct device *dev;
- +
- + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
- +
- + struct generic_msg *check;
- + int macsec_id = 0;
- + uint32_t sc_id, macsec_revision;
- + macsec_an_t ret_an;
- + int i;
- +
- + pr_debug("Entering: %s\n", __func__);
- +
- + if (unlikely(!skb)) {
- + pr_err("skb null\n");
- + return;
- + }
- +
- + nlh = (struct nlmsghdr *)skb->data;
- + check = kmalloc(sizeof(*check), GFP_KERNEL);
- + memcpy(check, nlmsg_data(nlh), sizeof(*check));
- + pid = nlh->nlmsg_pid; /*pid of sending process */
- + cmd = check->chf;
- +
- + switch (cmd) {
- + case ENABLE_MACSEC:
- + pr_debug("ENABLE_MACSEC\n");
- +
- + macsec_id = enable_macsec(check);
- +
- + if (macsec_id >= 0)
- + macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
- +
- + rv = send_result(nlh, pid, (macsec_id < 0) ? NACK : macsec_id);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case SET_EXCEPTION:
- + pr_debug("SET_EXCEPTION\n");
- +
- + rv = set_macsec_exception(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case ENABLE_SECY:
- + pr_debug("ENABLE_SECY\n");
- +
- + rv = enable_secy(check, &macsec_id);
- +
- + if (rv == 0)
- + macsec_priv[macsec_id]->en_state = SECY_ENABLED;
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case GET_REVISION:
- + pr_debug("GET_REVISION\n");
- +
- + rv = get_macsec_revision(check, &macsec_revision);
- +
- + rv = send_result(nlh, pid,
- + (rv < 0) ? NACK : (int)macsec_revision);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case GET_TXSC_PHYS_ID:
- + pr_debug("GET_TXSC_PHYS_ID\n");
- +
- + rv = get_tx_sc_phys_id(check, &sc_id);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case TX_SA_CREATE:
- + pr_debug("TX_SA_CREATE\n");
- +
- + rv = create_tx_sa(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case MODIFY_TXSA_KEY:
- + pr_debug("MODIFY_TXSA_KEY\n");
- +
- + rv = modify_tx_sa_key(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case TX_SA_ACTIVATE:
- + pr_debug("TX_SA_ACTIVATE\n");
- +
- + rv = activate_tx_sa(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case GET_TXSA_AN:
- + pr_debug("GET_TXSA_AN\n");
- +
- + rv = get_tx_sa_an(check, &ret_an);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)ret_an);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case RX_SC_CREATE:
- + pr_debug("RX_SC_CREATE\n");
- +
- + sc_id = create_rx_sc(check);
- +
- + rv = send_result(nlh, pid, (sc_id < 0) ? NACK : (int)sc_id);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case GET_RXSC_PHYS_ID:
- + pr_debug("GET_RXSC_PHYS_ID\n");
- +
- + rv = get_rx_sc_phys_id(check, &sc_id);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case RX_SA_CREATE:
- + pr_debug("RX_SA_CREATE\n");
- +
- + rv = create_rx_sa(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case MODIFY_RXSA_KEY:
- + pr_debug("MODIFY_RXSA_KEY\n");
- +
- + rv = modify_rx_sa_key(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case UPDATE_NPN:
- + pr_debug("UPDATE_NPN\n");
- +
- + rv = update_npn(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case UPDATE_LPN:
- + pr_debug("UPDATE_LPN\n");
- +
- + rv = update_lpn(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case RX_SA_ACTIVATE:
- + pr_debug("RX_SA_ACTIVATE\n");
- +
- + rv = activate_rx_sa(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case RX_SA_DISABLE:
- + pr_debug("RX_SA_DISABLE\n");
- +
- + rv = rx_sa_disable(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case RX_SA_DELETE:
- + pr_debug("RX_SA_DELETE\n");
- +
- + rv = rx_sa_delete(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case RX_SC_DELETE:
- + pr_debug("RX_SC_DELETE\n");
- +
- + rv = rx_sc_delete(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case TX_SA_DELETE:
- + pr_debug("TX_SA_DELETE\n");
- +
- + rv = tx_sa_delete(check);
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case DISABLE_SECY:
- + pr_debug("DISABLE_SECY\n");
- +
- + rv = disable_secy(check, &macsec_id);
- +
- + if (unlikely(rv < 0))
- + macsec_priv[macsec_id]->en_state = SECY_ENABLED;
- + else
- + macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case DISABLE_MACSEC:
- + pr_debug("DISABLE_MACSEC\n");
- +
- + rv = disable_macsec(check, &macsec_id);
- +
- + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- +
- + break;
- +
- + case DISABLE_ALL:
- + pr_debug("DISABLE_ALL\n");
- +
- + rv = disable_all(check, &macsec_id);
- +
- + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
- +
- + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
- + if (unlikely(rv < 0))
- + goto _release;
- + break;
- +
- + default:
- + /* should never get here */
- + pr_err("not a state\n");
- + break;
- + }
- +
- + return;
- +
- +_release:
- + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++)
- + deinit_macsec(i);
- +
- + /* Reset the TX hooks */
- + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
- + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
- +
- + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
- +
- + if (!macsec_priv[i]->net_dev)
- + continue;
- +
- + free_percpu(macsec_priv[i]->percpu_priv);
- +
- + /* Delete the fman queues */
- + list_for_each_entry_safe(dpa_fq,
- + tmp,
- + &macsec_priv[i]->dpa_fq_list,
- + list) {
- + dev = dpa_fq->net_dev->dev.parent;
- + rv = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
- + if (unlikely(rv < 0))
- + pr_err("_dpa_fq_fre=%d\n", rv);
- + }
- +
- + macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
- + kfree(macsec_priv[i]);
- + macsec_priv[i] = NULL;
- + }
- +
- + kfree(check);
- +
- + netlink_kernel_release(nl_sk);
- +}
- +
- +struct netlink_kernel_cfg ms_cfg = {
- + .groups = 1,
- + .input = switch_messages,
- +};
- +
- +static int __init macsec_init(void)
- +{
- + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
- + int ret, i;
- +
- + pr_debug("Entering: %s\n", __func__);
- +
- + /* If there is no interface we want macsec on, just exit. */
- + parse_ifs();
- + for (i = 0; i < macsec_ifs_cnt; i++) {
- + if (!macsec_ifs[i]) {
- + pr_err("Interface unknown\n");
- + return -EINVAL;
- + }
- + }
- +
- + /* Actually send the info to the user through a given socket. */
- + nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &ms_cfg);
- + if (unlikely(!nl_sk)) {
- + pr_err("Error creating socket.\n");
- + ret = -ENOMEM;
- + goto _release;
- + }
- +
- + ret = macsec_setup();
- + if (unlikely(ret != 0)) {
- + pr_err("Setup of macsec failed\n");
- + goto _release;
- + }
- +
- + /* set dpaa hooks for default queues */
- + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
- + macsec_dpaa_eth_hooks.tx = (dpaa_eth_egress_hook_t)(macsec_tx_hook);
- + macsec_dpaa_eth_hooks.rx_default =
- + (dpaa_eth_ingress_hook_t)(macsec_rx_hook);
- +
- + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
- +
- + return 0;
- +
- +_release:
- + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
- + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
- + netlink_kernel_release(nl_sk);
- + return ret;
- +}
- +
- +static void __exit macsec_exit(void)
- +{
- + int _errno;
- + struct dpa_fq *dpa_fq, *tmp;
- + struct device *dev;
- + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
- + int i;
- +
- + pr_debug("exiting macsec module\n");
- +
- + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
- + /* release has already been done, due to errors,
- + * in switch_messages we will return to exit the module properly
- + */
- + if (!macsec_priv[i]->net_dev) {
- + pr_debug("no release needed\n");
- + continue;
- + }
- + deinit_macsec(i);
- + }
- +
- + /* Reset the TX hooks before exiting */
- + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
- + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
- +
- + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
- +
- + if (!macsec_priv[i]->net_dev) {
- + pr_debug("no release needed\n");
- + continue;
- + }
- +
- + free_percpu(macsec_priv[i]->percpu_priv);
- +
- + /* Delete the fman queues */
- + list_for_each_entry_safe(dpa_fq, tmp,
- + &macsec_priv[i]->dpa_fq_list, list) {
- + if (dpa_fq) {
- + dev = dpa_fq->net_dev->dev.parent;
- + _errno = _dpa_fq_free(dev,
- + (struct qman_fq *)dpa_fq);
- + if (unlikely(_errno < 0))
- + pr_err("_dpa_fq_fre=%d\n", _errno);
- + }
- + }
- +
- + /* restore ethtool ops to the previous private ones */
- + macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
- +
- + kfree(macsec_priv[i]);
- + }
- +
- + netlink_kernel_release(nl_sk);
- +
- + pr_debug("exited macsec module\n");
- +}
- +
- +module_init(macsec_init);
- +module_exit(macsec_exit);
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
- @@ -0,0 +1,294 @@
- +/* Copyright 2015 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __DPAA_ETH_MACSEC_H
- +#define __DPAA_ETH_MACSEC_H
- +
- +#include "mac.h"
- +
- +#define NETLINK_USER 31
- +#define MAX_NUM_OF_SECY 1
- +#define MAX_LEN 100
- +#define FM_FD_STAT_RX_MACSEC 0x00800000
- +#define MACSEC_ETH_TX_QUEUES NR_CPUS
- +#define MACSEC_REG_OFFSET 0x800
- +#define ACK 0
- +#define NACK -1
- +
- +extern const struct dpa_fq_cbs_t private_fq_cbs;
- +
- +extern int dpa_macsec_get_sset_count(struct net_device *net_dev, int type);
- +extern void
- +dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
- + struct ethtool_stats *stats, u64 *data);
- +extern void
- +dpa_macsec_get_strings(struct net_device *net_dev,
- + u32 stringset, u8 *data);
- +
- +enum msg_type {ENABLE_MACSEC,
- + SET_EXCEPTION,
- + ENABLE_SECY,
- + TX_SA_CREATE,
- + TX_SA_ACTIVATE,
- + RX_SC_CREATE,
- + RX_SA_CREATE,
- + RX_SA_ACTIVATE,
- + RX_SA_DISABLE,
- + RX_SA_DELETE,
- + RX_SC_DELETE,
- + TX_SA_DELETE,
- + DISABLE_MACSEC,
- + DISABLE_SECY,
- + DISABLE_ALL,
- + GET_REVISION,
- + UPDATE_NPN,
- + UPDATE_LPN,
- + GET_TXSC_PHYS_ID,
- + GET_RXSC_PHYS_ID,
- + GET_TXSA_AN,
- + MODIFY_TXSA_KEY,
- + MODIFY_RXSA_KEY,
- +};
- +
- +enum macsec_enablement {MACSEC_DISABLED, MACSEC_ENABLED, SECY_ENABLED};
- +
- +struct enable_secy {
- + int macsec_id;
- +
- + u64 sci; /* MAC address(48b) + port_id(16b) */
- +
- + bool config_insertion_mode;
- + fm_macsec_sci_insertion_mode sci_insertion_mode;
- +
- + bool config_protect_frames;
- + bool protect_frames;
- +
- + bool config_replay_window;
- + bool replay_protect;
- + uint32_t replay_window;
- +
- + bool config_validation_mode;
- + fm_macsec_valid_frame_behavior validate_frames;
- +
- + bool config_confidentiality;
- + bool confidentiality_enable;
- + uint32_t confidentiality_offset;
- +
- + bool config_point_to_point;
- +
- + bool config_exception;
- + bool enable_exception;
- + fm_macsec_secy_exception exception;
- +
- + bool config_event;
- + bool enable_event;
- + fm_macsec_secy_event event;
- +};
- +
- +struct macsec_data {
- + char *if_name;
- + size_t if_name_length; /* including string terminator */
- +
- + bool config_unknown_sci_treatment;
- + fm_macsec_unknown_sci_frame_treatment unknown_sci_treatment;
- +
- + bool config_invalid_tag_treatment;
- + bool deliver_uncontrolled;
- +
- + bool config_kay_frame_treatment;
- + bool discard_uncontrolled;
- +
- + bool config_untag_treatment;
- + fm_macsec_untag_frame_treatment untag_treatment;
- +
- + bool config_pn_exhaustion_threshold;
- + uint32_t pn_threshold;
- +
- + bool config_keys_unreadable;
- +
- + bool config_sectag_without_sci;
- +
- + bool config_exception;
- + bool enable_exception;
- + fm_macsec_exception exception;
- +};
- +
- +struct set_exception {
- + int macsec_id;
- + bool enable_exception;
- + fm_macsec_exception exception;
- +};
- +
- +struct create_tx_sa {
- + int macsec_id;
- + u8 an; /* association number */
- + u8 *sak; /* secure assoc key */
- + u32 sak_len; /* assoc key length */
- +};
- +
- +struct modify_tx_sa_key {
- + int macsec_id;
- + u8 an; /* association number */
- + u8 *sak; /* secure assoc key */
- + u32 sak_len; /* assoc key length */
- +};
- +
- +struct activate_tx_sa {
- + int macsec_id;
- + u8 an; /* association number */
- +};
- +
- +struct create_rx_sc {
- + int macsec_id;
- + u64 sci;
- +};
- +
- +struct delete_rx_sc {
- + int macsec_id;
- + u32 rx_sc_id;
- +};
- +
- +struct get_rx_sc_id {
- + int macsec_id;
- + u32 rx_sc_id;
- +};
- +
- +struct create_rx_sa {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- + u32 lpn;
- + u8 *sak;
- + u32 sak_len;
- +};
- +
- +struct activate_rx_sa {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- +};
- +
- +struct disable_rx_sa {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- +};
- +
- +struct delete_rx_sa {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- +};
- +
- +struct delete_tx_sa {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- +};
- +
- +struct update_npn {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- + u32 pn;
- +};
- +
- +struct update_lpn {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- + u32 pn;
- +};
- +
- +struct modify_rx_sa_key {
- + int macsec_id;
- + u32 rx_sc_id;
- + u8 an;
- + u8 *sak;
- + u32 sak_len;
- +};
- +
- +struct generic_msg {
- + enum msg_type chf;
- + union {
- + int macsec_id;
- + struct macsec_data en_macsec;
- + struct enable_secy secy;
- + struct create_tx_sa c_tx_sa;
- + struct activate_tx_sa a_tx_sa;
- + struct create_rx_sc c_rx_sc;
- + struct get_rx_sc_id get_rx_sc_id;
- + struct create_rx_sa c_rx_sa;
- + struct activate_rx_sa a_rx_sa;
- + struct disable_rx_sa d_rx_sa;
- + struct delete_rx_sa del_rx_sa;
- + struct delete_rx_sc del_rx_sc;
- + struct delete_tx_sa del_tx_sa;
- + struct update_npn update_npn;
- + struct update_lpn update_lpn;
- + struct modify_tx_sa_key modify_tx_sa_key;
- + struct modify_rx_sa_key modify_rx_sa_key;
- + struct set_exception set_ex;
- + } payload;
- +};
- +
- +struct macsec_percpu_priv_s {
- + u64 rx_macsec;
- + u64 tx_macsec;
- +};
- +
- +struct macsec_priv_s {
- + struct macsec_percpu_priv_s __percpu *percpu_priv;
- +
- + struct net_device *net_dev;
- + struct mac_device *mac_dev;
- +
- + struct qman_fq *egress_fqs[MACSEC_ETH_TX_QUEUES];
- + struct qman_fq *conf_fqs[MACSEC_ETH_TX_QUEUES];
- + struct list_head dpa_fq_list;
- + uint32_t msg_enable; /* net_device message level */
- + uint16_t channel;
- + struct fm_macsec_dev *fm_macsec;
- +
- + struct fm_macsec_secy_dev *fm_ms_secy;
- + uint8_t an;
- +
- + struct rx_sc_dev *rx_sc_dev[NUM_OF_RX_SC];
- + uint8_t *sa_key;
- + enum macsec_enablement en_state;
- +
- + uintptr_t vaddr;
- + struct resource *fman_resource;
- +};
- +
- +struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev);
- +
- +#endif /* __DPAA_ETH_MACSEC_H */
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
- @@ -0,0 +1,381 @@
- +/* Copyright 2008-2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/of_platform.h>
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#include "dpaa_eth_base.h"
- +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
- +#include "mac.h"
- +
- +#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +
- +MODULE_DESCRIPTION(DPA_DESCRIPTION);
- +
- +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
- +#ifdef CONFIG_PM
- +
- +static int proxy_suspend(struct device *dev)
- +{
- + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
- + struct mac_device *mac_dev = proxy_dev->mac_dev;
- + int err = 0;
- +
- + err = fm_port_suspend(mac_dev->port_dev[RX]);
- + if (err)
- + goto port_suspend_failed;
- +
- + err = fm_port_suspend(mac_dev->port_dev[TX]);
- + if (err)
- + err = fm_port_resume(mac_dev->port_dev[RX]);
- +
- +port_suspend_failed:
- + return err;
- +}
- +
- +static int proxy_resume(struct device *dev)
- +{
- + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
- + struct mac_device *mac_dev = proxy_dev->mac_dev;
- + int err = 0;
- +
- + err = fm_port_resume(mac_dev->port_dev[TX]);
- + if (err)
- + goto port_resume_failed;
- +
- + err = fm_port_resume(mac_dev->port_dev[RX]);
- + if (err)
- + err = fm_port_suspend(mac_dev->port_dev[TX]);
- +
- +port_resume_failed:
- + return err;
- +}
- +
- +static const struct dev_pm_ops proxy_pm_ops = {
- + .suspend = proxy_suspend,
- + .resume = proxy_resume,
- +};
- +
- +#define PROXY_PM_OPS (&proxy_pm_ops)
- +
- +#else /* CONFIG_PM */
- +
- +#define PROXY_PM_OPS NULL
- +
- +#endif /* CONFIG_PM */
- +
- +static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
- +{
- + int err = 0, i;
- + struct device *dev;
- + struct device_node *dpa_node;
- + struct dpa_bp *dpa_bp;
- + struct list_head proxy_fq_list;
- + size_t count;
- + struct fm_port_fqs port_fqs;
- + struct dpa_buffer_layout_s *buf_layout = NULL;
- + struct mac_device *mac_dev;
- + struct proxy_device *proxy_dev;
- +
- + dev = &_of_dev->dev;
- +
- + dpa_node = dev->of_node;
- +
- + if (!of_device_is_available(dpa_node))
- + return -ENODEV;
- +
- + /* Get the buffer pools assigned to this interface */
- + dpa_bp = dpa_bp_probe(_of_dev, &count);
- + if (IS_ERR(dpa_bp))
- + return PTR_ERR(dpa_bp);
- +
- + mac_dev = dpa_mac_probe(_of_dev);
- + if (IS_ERR(mac_dev))
- + return PTR_ERR(mac_dev);
- +
- + proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
- + if (!proxy_dev) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + return -ENOMEM;
- + }
- +
- + proxy_dev->mac_dev = mac_dev;
- + dev_set_drvdata(dev, proxy_dev);
- +
- + /* We have physical ports, so we need to establish
- + * the buffer layout.
- + */
- + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
- + GFP_KERNEL);
- + if (!buf_layout) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + return -ENOMEM;
- + }
- + dpa_set_buffers_layout(mac_dev, buf_layout);
- +
- + INIT_LIST_HEAD(&proxy_fq_list);
- +
- + memset(&port_fqs, 0, sizeof(port_fqs));
- +
- + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
- + if (!err)
- + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
- + TX);
- + if (err < 0) {
- + devm_kfree(dev, buf_layout);
- + return err;
- + }
- +
- + /* Proxy initializer - Just configures the MAC on behalf of
- + * another partition.
- + */
- + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
- + buf_layout, dev);
- +
- + /* Proxy interfaces need to be started, and the allocated
- + * memory freed
- + */
- + devm_kfree(dev, buf_layout);
- + devm_kfree(dev, dpa_bp);
- +
- + /* Free FQ structures */
- + devm_kfree(dev, port_fqs.rx_defq);
- + devm_kfree(dev, port_fqs.rx_errq);
- + devm_kfree(dev, port_fqs.tx_defq);
- + devm_kfree(dev, port_fqs.tx_errq);
- +
- + for_each_port_device(i, mac_dev->port_dev) {
- + err = fm_port_enable(mac_dev->port_dev[i]);
- + if (err)
- + goto port_enable_fail;
- + }
- +
- + dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
- + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
- + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
- +
- + return 0; /* Proxy interface initialization ended */
- +
- +port_enable_fail:
- + for_each_port_device(i, mac_dev->port_dev)
- + fm_port_disable(mac_dev->port_dev[i]);
- + dpa_eth_proxy_remove(_of_dev);
- +
- + return err;
- +}
- +
- +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
- + struct net_device *net_dev)
- +{
- + struct mac_device *mac_dev;
- + int _errno;
- +
- + mac_dev = proxy_dev->mac_dev;
- +
- + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
- + net_dev->dev_addr);
- + if (_errno < 0)
- + return _errno;
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_proxy_set_mac_address);
- +
- +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
- + struct net_device *net_dev)
- +{
- + struct mac_device *mac_dev = proxy_dev->mac_dev;
- + int _errno;
- +
- + if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
- + mac_dev->promisc = !mac_dev->promisc;
- + _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
- + mac_dev->promisc);
- + if (unlikely(_errno < 0))
- + netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
- + _errno);
- + }
- +
- + _errno = mac_dev->set_multi(net_dev, mac_dev);
- + if (unlikely(_errno < 0))
- + return _errno;
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
- +
- +int dpa_proxy_start(struct net_device *net_dev)
- +{
- + struct mac_device *mac_dev;
- + const struct dpa_priv_s *priv;
- + struct proxy_device *proxy_dev;
- + int _errno;
- + int i;
- +
- + priv = netdev_priv(net_dev);
- + proxy_dev = (struct proxy_device *)priv->peer;
- + mac_dev = proxy_dev->mac_dev;
- +
- + _errno = mac_dev->init_phy(net_dev, mac_dev);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev, "init_phy() = %d\n",
- + _errno);
- + return _errno;
- + }
- +
- + for_each_port_device(i, mac_dev->port_dev) {
- + _errno = fm_port_enable(mac_dev->port_dev[i]);
- + if (_errno)
- + goto port_enable_fail;
- + }
- +
- + _errno = mac_dev->start(mac_dev);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev, "mac_dev->start() = %d\n",
- + _errno);
- + goto port_enable_fail;
- + }
- +
- + return _errno;
- +
- +port_enable_fail:
- + for_each_port_device(i, mac_dev->port_dev)
- + fm_port_disable(mac_dev->port_dev[i]);
- +
- + return _errno;
- +}
- +EXPORT_SYMBOL(dpa_proxy_start);
- +
- +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
- +{
- + struct mac_device *mac_dev = proxy_dev->mac_dev;
- + const struct dpa_priv_s *priv = netdev_priv(net_dev);
- + int _errno, i, err;
- +
- + _errno = mac_dev->stop(mac_dev);
- + if (_errno < 0) {
- + if (netif_msg_drv(priv))
- + netdev_err(net_dev, "mac_dev->stop() = %d\n",
- + _errno);
- + return _errno;
- + }
- +
- + for_each_port_device(i, mac_dev->port_dev) {
- + err = fm_port_disable(mac_dev->port_dev[i]);
- + _errno = err ? err : _errno;
- + }
- +
- + if (mac_dev->phy_dev)
- + phy_disconnect(mac_dev->phy_dev);
- + mac_dev->phy_dev = NULL;
- +
- + return _errno;
- +}
- +EXPORT_SYMBOL(dpa_proxy_stop);
- +
- +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
- +{
- + struct device *dev = &of_dev->dev;
- + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
- +
- + kfree(proxy_dev);
- +
- + dev_set_drvdata(dev, NULL);
- +
- + return 0;
- +}
- +
- +static const struct of_device_id dpa_proxy_match[] = {
- + {
- + .compatible = "fsl,dpa-ethernet-init"
- + },
- + {}
- +};
- +MODULE_DEVICE_TABLE(of, dpa_proxy_match);
- +
- +static struct platform_driver dpa_proxy_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME "-proxy",
- + .of_match_table = dpa_proxy_match,
- + .owner = THIS_MODULE,
- + .pm = PROXY_PM_OPS,
- + },
- + .probe = dpaa_eth_proxy_probe,
- + .remove = dpa_eth_proxy_remove
- +};
- +
- +static int __init __cold dpa_proxy_load(void)
- +{
- + int _errno;
- +
- + pr_info(DPA_DESCRIPTION "\n");
- +
- + /* Initialize dpaa_eth mirror values */
- + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
- + dpa_max_frm = fm_get_max_frm();
- +
- + _errno = platform_driver_register(&dpa_proxy_driver);
- + if (unlikely(_errno < 0)) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): platform_driver_register() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- + }
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + return _errno;
- +}
- +module_init(dpa_proxy_load);
- +
- +static void __exit __cold dpa_proxy_unload(void)
- +{
- + platform_driver_unregister(&dpa_proxy_driver);
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +}
- +module_exit(dpa_proxy_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
- @@ -0,0 +1,1128 @@
- +/* Copyright 2012 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/skbuff.h>
- +#include <linux/highmem.h>
- +#include <linux/fsl_bman.h>
- +
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#ifdef CONFIG_FSL_DPAA_1588
- +#include "dpaa_1588.h"
- +#endif
- +#ifdef CONFIG_FSL_DPAA_CEETM
- +#include "dpaa_eth_ceetm.h"
- +#endif
- +
- +/* DMA map and add a page frag back into the bpool.
- + * @vaddr fragment must have been allocated with netdev_alloc_frag(),
- + * specifically for fitting into @dpa_bp.
- + */
- +static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
- + int *count_ptr)
- +{
- + struct bm_buffer bmb;
- + dma_addr_t addr;
- +
- + memset(&bmb, 0, sizeof(struct bm_buffer));
- +
- + addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
- + DMA_BIDIRECTIONAL);
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + dev_err(dpa_bp->dev, "DMA mapping failed");
- + return;
- + }
- +
- + bm_buffer_set64(&bmb, addr);
- +
- + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
- + cpu_relax();
- +
- + (*count_ptr)++;
- +}
- +
- +static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
- +{
- + struct bm_buffer bmb[8];
- + void *new_buf;
- + dma_addr_t addr;
- + uint8_t i;
- + struct device *dev = dpa_bp->dev;
- + struct sk_buff *skb, **skbh;
- +
- + memset(bmb, 0, sizeof(struct bm_buffer) * 8);
- +
- + for (i = 0; i < 8; i++) {
- + /* We'll prepend the skb back-pointer; can't use the DPA
- + * priv space, because FMan will overwrite it (from offset 0)
- + * if it ends up being the second, third, etc. fragment
- + * in a S/G frame.
- + *
- + * We only need enough space to store a pointer, but allocate
- + * an entire cacheline for performance reasons.
- + */
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + new_buf = page_address(alloc_page(GFP_ATOMIC));
- +#else
- + new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
- +#endif
- + if (unlikely(!new_buf))
- + goto netdev_alloc_failed;
- + new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
- +
- + skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
- + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- + if (unlikely(!skb)) {
- + put_page(virt_to_head_page(new_buf));
- + goto build_skb_failed;
- + }
- + DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
- +
- + addr = dma_map_single(dev, new_buf,
- + dpa_bp->size, DMA_BIDIRECTIONAL);
- + if (unlikely(dma_mapping_error(dev, addr)))
- + goto dma_map_failed;
- +
- + bm_buffer_set64(&bmb[i], addr);
- + }
- +
- +release_bufs:
- + /* Release the buffers. In case bman is busy, keep trying
- + * until successful. bman_release() is guaranteed to succeed
- + * in a reasonable amount of time
- + */
- + while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
- + cpu_relax();
- + return i;
- +
- +dma_map_failed:
- + kfree_skb(skb);
- +
- +build_skb_failed:
- +netdev_alloc_failed:
- + net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
- + WARN_ONCE(1, "Memory allocation failure on Rx\n");
- +
- + bm_buffer_set64(&bmb[i], 0);
- + /* Avoid releasing a completely null buffer; bman_release() requires
- + * at least one buffer.
- + */
- + if (likely(i))
- + goto release_bufs;
- +
- + return 0;
- +}
- +
- +/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
- +static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
- +{
- + int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
- + *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
- +}
- +
- +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
- +{
- + int i;
- +
- + /* Give each CPU an allotment of "config_count" buffers */
- + for_each_possible_cpu(i) {
- + int j;
- +
- + /* Although we access another CPU's counters here
- + * we do it at boot time so it is safe
- + */
- + for (j = 0; j < dpa_bp->config_count; j += 8)
- + dpa_bp_add_8_bufs(dpa_bp, i);
- + }
- + return 0;
- +}
- +EXPORT_SYMBOL(dpa_bp_priv_seed);
- +
- +/* Add buffers/(pages) for Rx processing whenever bpool count falls below
- + * REFILL_THRESHOLD.
- + */
- +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
- +{
- + int count = *countptr;
- + int new_bufs;
- +
- + if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
- + do {
- + new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
- + if (unlikely(!new_bufs)) {
- + /* Avoid looping forever if we've temporarily
- + * run out of memory. We'll try again at the
- + * next NAPI cycle.
- + */
- + break;
- + }
- + count += new_bufs;
- + } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
- +
- + *countptr = count;
- + if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
- + return -ENOMEM;
- + }
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(dpaa_eth_refill_bpools);
- +
- +/* Cleanup function for outgoing frame descriptors that were built on Tx path,
- + * either contiguous frames or scatter/gather ones.
- + * Skb freeing is not handled here.
- + *
- + * This function may be called on error paths in the Tx function, so guard
- + * against cases when not all fd relevant fields were filled in.
- + *
- + * Return the skb backpointer, since for S/G frames the buffer containing it
- + * gets freed here.
- + */
- +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
- + const struct qm_fd *fd)
- +{
- + const struct qm_sg_entry *sgt;
- + int i;
- + struct dpa_bp *dpa_bp = priv->dpa_bp;
- + dma_addr_t addr = qm_fd_addr(fd);
- + dma_addr_t sg_addr;
- + struct sk_buff **skbh;
- + struct sk_buff *skb = NULL;
- + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- + int nr_frags;
- + int sg_len;
- +
- + /* retrieve skb back pointer */
- + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
- +
- + if (unlikely(fd->format == qm_fd_sg)) {
- + nr_frags = skb_shinfo(skb)->nr_frags;
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- +/* addressing the 4k DMA issue can yield a larger number of fragments than
- + * the skb had
- + */
- + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
- + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
- + dma_dir);
- +#else
- + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
- + sizeof(struct qm_sg_entry) * (1 + nr_frags),
- + dma_dir);
- +#endif
- + /* The sgt buffer has been allocated with netdev_alloc_frag(),
- + * it's from lowmem.
- + */
- + sgt = phys_to_virt(addr + dpa_fd_offset(fd));
- +#ifdef CONFIG_FSL_DPAA_1588
- + if (priv->tsu && priv->tsu->valid &&
- + priv->tsu->hwts_tx_en_ioctl)
- + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
- +#endif
- +#ifdef CONFIG_FSL_DPAA_TS
- + if (unlikely(priv->ts_tx_en &&
- + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- + struct skb_shared_hwtstamps shhwtstamps;
- +
- + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
- + skb_tstamp_tx(skb, &shhwtstamps);
- + }
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- + /* sgt[0] is from lowmem, was dma_map_single()-ed */
- + sg_addr = qm_sg_addr(&sgt[0]);
- + sg_len = qm_sg_entry_get_len(&sgt[0]);
- + dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + i = 1;
- + do {
- + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
- + sg_addr = qm_sg_addr(&sgt[i]);
- + sg_len = qm_sg_entry_get_len(&sgt[i]);
- + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
- + } while (!qm_sg_entry_get_final(&sgt[i++]));
- +#else
- + /* remaining pages were mapped with dma_map_page() */
- + for (i = 1; i <= nr_frags; i++) {
- + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
- + sg_addr = qm_sg_addr(&sgt[i]);
- + sg_len = qm_sg_entry_get_len(&sgt[i]);
- + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
- + }
- +#endif
- +
- + /* Free the page frag that we allocated on Tx */
- + put_page(virt_to_head_page(sgt));
- + } else {
- + dma_unmap_single(dpa_bp->dev, addr,
- + skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
- +#ifdef CONFIG_FSL_DPAA_TS
- + /* get the timestamp for non-SG frames */
- +#ifdef CONFIG_FSL_DPAA_1588
- + if (priv->tsu && priv->tsu->valid &&
- + priv->tsu->hwts_tx_en_ioctl)
- + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
- +#endif
- + if (unlikely(priv->ts_tx_en &&
- + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- + struct skb_shared_hwtstamps shhwtstamps;
- +
- + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
- + skb_tstamp_tx(skb, &shhwtstamps);
- + }
- +#endif
- + }
- +
- + return skb;
- +}
- +EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
- +
- +#ifndef CONFIG_FSL_DPAA_TS
- +bool dpa_skb_is_recyclable(struct sk_buff *skb)
- +{
- + /* No recycling possible if skb buffer is kmalloc'ed */
- + if (skb->head_frag == 0)
- + return false;
- +
- + /* or if it's an userspace buffer */
- + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- + return false;
- +
- + /* or if it's cloned or shared */
- + if (skb_shared(skb) || skb_cloned(skb) ||
- + skb->fclone != SKB_FCLONE_UNAVAILABLE)
- + return false;
- +
- + return true;
- +}
- +EXPORT_SYMBOL(dpa_skb_is_recyclable);
- +
- +bool dpa_buf_is_recyclable(struct sk_buff *skb,
- + uint32_t min_size,
- + uint16_t min_offset,
- + unsigned char **new_buf_start)
- +{
- + unsigned char *new;
- +
- + /* In order to recycle a buffer, the following conditions must be met:
- + * - buffer size no less than the buffer pool size
- + * - buffer size no higher than an upper limit (to avoid moving too much
- + * system memory to the buffer pools)
- + * - buffer address aligned to cacheline bytes
- + * - offset of data from start of buffer no lower than a minimum value
- + * - offset of data from start of buffer no higher than a maximum value
- + */
- + new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
- +
- + /* left align to the nearest cacheline */
- + new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
- +
- + if (likely(new >= skb->head &&
- + new >= (skb->data - DPA_MAX_FD_OFFSET) &&
- + skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
- + *new_buf_start = new;
- + return true;
- + }
- +
- + return false;
- +}
- +EXPORT_SYMBOL(dpa_buf_is_recyclable);
- +#endif
- +
- +/* Build a linear skb around the received buffer.
- + * We are guaranteed there is enough room at the end of the data buffer to
- + * accommodate the shared info area of the skb.
- + */
- +static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
- + const struct qm_fd *fd, int *use_gro)
- +{
- + dma_addr_t addr = qm_fd_addr(fd);
- + ssize_t fd_off = dpa_fd_offset(fd);
- + void *vaddr;
- + const fm_prs_result_t *parse_results;
- + struct sk_buff *skb = NULL, **skbh;
- +
- + vaddr = phys_to_virt(addr);
- + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
- +
- + /* Retrieve the skb and adjust data and tail pointers, to make sure
- + * forwarded skbs will have enough space on Tx if extra headers
- + * are added.
- + */
- + DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
- + /* When using jumbo Rx buffers, we risk having frames dropped due to
- + * the socket backlog reaching its maximum allowed size.
- + * Use the frame length for the skb truesize instead of the buffer
- + * size, as this is the size of the data that actually gets copied to
- + * userspace.
- + */
- + skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
- +#endif
- +
- + DPA_BUG_ON(fd_off != priv->rx_headroom);
- + skb_reserve(skb, fd_off);
- + skb_put(skb, dpa_fd_length(fd));
- +
- + /* Peek at the parse results for csum validation */
- + parse_results = (const fm_prs_result_t *)(vaddr +
- + DPA_RX_PRIV_DATA_SIZE);
- + _dpa_process_parse_results(parse_results, fd, skb, use_gro);
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
- + dpa_ptp_store_rxstamp(priv, skb, vaddr);
- +#endif
- +#ifdef CONFIG_FSL_DPAA_TS
- + if (priv->ts_rx_en)
- + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- + return skb;
- +}
- +
- +
- +/* Build an skb with the data of the first S/G entry in the linear portion and
- + * the rest of the frame as skb fragments.
- + *
- + * The page fragment holding the S/G Table is recycled here.
- + */
- +static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
- + const struct qm_fd *fd, int *use_gro,
- + int *count_ptr)
- +{
- + const struct qm_sg_entry *sgt;
- + dma_addr_t addr = qm_fd_addr(fd);
- + ssize_t fd_off = dpa_fd_offset(fd);
- + dma_addr_t sg_addr;
- + void *vaddr, *sg_vaddr;
- + struct dpa_bp *dpa_bp;
- + struct page *page, *head_page;
- + int frag_offset, frag_len;
- + int page_offset;
- + int i;
- + const fm_prs_result_t *parse_results;
- + struct sk_buff *skb = NULL, *skb_tmp, **skbh;
- +
- + vaddr = phys_to_virt(addr);
- + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
- +
- + dpa_bp = priv->dpa_bp;
- + /* Iterate through the SGT entries and add data buffers to the skb */
- + sgt = vaddr + fd_off;
- + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
- + /* Extension bit is not supported */
- + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
- +
- + /* We use a single global Rx pool */
- + DPA_BUG_ON(dpa_bp !=
- + dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
- +
- + sg_addr = qm_sg_addr(&sgt[i]);
- + sg_vaddr = phys_to_virt(sg_addr);
- + DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
- + SMP_CACHE_BYTES));
- +
- + dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
- + DMA_BIDIRECTIONAL);
- + if (i == 0) {
- + DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
- + DPA_BUG_ON(skb->head != sg_vaddr);
- +#ifdef CONFIG_FSL_DPAA_1588
- + if (priv->tsu && priv->tsu->valid &&
- + priv->tsu->hwts_rx_en_ioctl)
- + dpa_ptp_store_rxstamp(priv, skb, vaddr);
- +#endif
- +#ifdef CONFIG_FSL_DPAA_TS
- + if (priv->ts_rx_en)
- + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- + /* In the case of a SG frame, FMan stores the Internal
- + * Context in the buffer containing the sgt.
- + * Inspect the parse results before anything else.
- + */
- + parse_results = (const fm_prs_result_t *)(vaddr +
- + DPA_RX_PRIV_DATA_SIZE);
- + _dpa_process_parse_results(parse_results, fd, skb,
- + use_gro);
- +
- + /* Make sure forwarded skbs will have enough space
- + * on Tx, if extra headers are added.
- + */
- + DPA_BUG_ON(fd_off != priv->rx_headroom);
- + skb_reserve(skb, fd_off);
- + skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
- + } else {
- + /* Not the first S/G entry; all data from buffer will
- + * be added in an skb fragment; fragment index is offset
- + * by one since first S/G entry was incorporated in the
- + * linear part of the skb.
- + *
- + * Caution: 'page' may be a tail page.
- + */
- + DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
- + page = virt_to_page(sg_vaddr);
- + head_page = virt_to_head_page(sg_vaddr);
- +
- + /* Free (only) the skbuff shell because its data buffer
- + * is already a frag in the main skb.
- + */
- + get_page(head_page);
- + dev_kfree_skb(skb_tmp);
- +
- + /* Compute offset in (possibly tail) page */
- + page_offset = ((unsigned long)sg_vaddr &
- + (PAGE_SIZE - 1)) +
- + (page_address(page) - page_address(head_page));
- + /* page_offset only refers to the beginning of sgt[i];
- + * but the buffer itself may have an internal offset.
- + */
- + frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
- + page_offset;
- + frag_len = qm_sg_entry_get_len(&sgt[i]);
- + /* skb_add_rx_frag() does no checking on the page; if
- + * we pass it a tail page, we'll end up with
- + * bad page accounting and eventually with segafults.
- + */
- + skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
- + frag_len, dpa_bp->size);
- + }
- + /* Update the pool count for the current {cpu x bpool} */
- + (*count_ptr)--;
- +
- + if (qm_sg_entry_get_final(&sgt[i]))
- + break;
- + }
- + WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
- +
- + /* recycle the SGT fragment */
- + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
- + dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
- + return skb;
- +}
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- +static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
- + struct sk_buff *skb)
- +{
- + if (unlikely(priv->loop_to < 0))
- + return 0; /* loop disabled by default */
- +
- + skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
- + dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
- +
- + return 1; /* Frame Tx on the selected interface */
- +}
- +#endif
- +
- +void __hot _dpa_rx(struct net_device *net_dev,
- + struct qman_portal *portal,
- + const struct dpa_priv_s *priv,
- + struct dpa_percpu_priv_s *percpu_priv,
- + const struct qm_fd *fd,
- + u32 fqid,
- + int *count_ptr)
- +{
- + struct dpa_bp *dpa_bp;
- + struct sk_buff *skb;
- + dma_addr_t addr = qm_fd_addr(fd);
- + u32 fd_status = fd->status;
- + unsigned int skb_len;
- + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
- + int use_gro = net_dev->features & NETIF_F_GRO;
- +
- + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_warn(net_dev, "FD status = 0x%08x\n",
- + fd_status & FM_FD_STAT_RX_ERRORS);
- +
- + percpu_stats->rx_errors++;
- + goto _release_frame;
- + }
- +
- + dpa_bp = priv->dpa_bp;
- + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
- +
- + /* prefetch the first 64 bytes of the frame or the SGT start */
- + dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
- + prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
- +
- + /* The only FD types that we may receive are contig and S/G */
- + DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
- +
- + if (likely(fd->format == qm_fd_contig)) {
- +#ifdef CONFIG_FSL_DPAA_HOOKS
- + /* Execute the Rx processing hook, if it exists. */
- + if (dpaa_eth_hooks.rx_default &&
- + dpaa_eth_hooks.rx_default((void *)fd, net_dev,
- + fqid) == DPAA_ETH_STOLEN) {
- + /* won't count the rx bytes in */
- + return;
- + }
- +#endif
- + skb = contig_fd_to_skb(priv, fd, &use_gro);
- + } else {
- + skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
- + percpu_priv->rx_sg++;
- + }
- +
- + /* Account for either the contig buffer or the SGT buffer (depending on
- + * which case we were in) having been removed from the pool.
- + */
- + (*count_ptr)--;
- + skb->protocol = eth_type_trans(skb, net_dev);
- +
- + /* IP Reassembled frames are allowed to be larger than MTU */
- + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
- + !(fd_status & FM_FD_IPR))) {
- + percpu_stats->rx_dropped++;
- + goto drop_bad_frame;
- + }
- +
- + skb_len = skb->len;
- +
- +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
- + if (dpa_skb_loop(priv, skb)) {
- + percpu_stats->rx_packets++;
- + percpu_stats->rx_bytes += skb_len;
- + return;
- + }
- +#endif
- +
- + if (use_gro) {
- + gro_result_t gro_result;
- + const struct qman_portal_config *pc =
- + qman_p_get_portal_config(portal);
- + struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
- +
- + np->p = portal;
- + gro_result = napi_gro_receive(&np->napi, skb);
- + /* If frame is dropped by the stack, rx_dropped counter is
- + * incremented automatically, so no need for us to update it
- + */
- + if (unlikely(gro_result == GRO_DROP))
- + goto packet_dropped;
- + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
- + goto packet_dropped;
- +
- + percpu_stats->rx_packets++;
- + percpu_stats->rx_bytes += skb_len;
- +
- +packet_dropped:
- + return;
- +
- +drop_bad_frame:
- + dev_kfree_skb(skb);
- + return;
- +
- +_release_frame:
- + dpa_fd_release(net_dev, fd);
- +}
- +
- +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd,
- + int *count_ptr, int *offset)
- +{
- + struct sk_buff **skbh;
- + dma_addr_t addr;
- + struct dpa_bp *dpa_bp = priv->dpa_bp;
- + struct net_device *net_dev = priv->net_dev;
- + int err;
- + enum dma_data_direction dma_dir;
- + unsigned char *buffer_start;
- +
- +#ifndef CONFIG_FSL_DPAA_TS
- + /* Check recycling conditions; only if timestamp support is not
- + * enabled, otherwise we need the fd back on tx confirmation
- + */
- +
- + /* We can recycle the buffer if:
- + * - the pool is not full
- + * - the buffer meets the skb recycling conditions
- + * - the buffer meets our own (size, offset, align) conditions
- + */
- + if (likely((*count_ptr < dpa_bp->target_count) &&
- + dpa_skb_is_recyclable(skb) &&
- + dpa_buf_is_recyclable(skb, dpa_bp->size,
- + priv->tx_headroom, &buffer_start))) {
- + /* Buffer is recyclable; use the new start address
- + * and set fd parameters and DMA mapping direction
- + */
- + fd->bpid = dpa_bp->bpid;
- + DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
- + fd->offset = (uint16_t)(skb->data - buffer_start);
- + dma_dir = DMA_BIDIRECTIONAL;
- +
- + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
- + *offset = skb_headroom(skb) - fd->offset;
- + } else
- +#endif
- + {
- + /* Not recyclable.
- + * We are guaranteed to have at least tx_headroom bytes
- + * available, so just use that for offset.
- + */
- + fd->bpid = 0xff;
- + buffer_start = skb->data - priv->tx_headroom;
- + fd->offset = priv->tx_headroom;
- + dma_dir = DMA_TO_DEVICE;
- +
- + /* The buffer will be Tx-confirmed, but the TxConf cb must
- + * necessarily look at our Tx private data to retrieve the
- + * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
- + */
- + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
- + }
- +
- + /* Enable L3/L4 hardware checksum computation.
- + *
- + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
- + * need to write into the skb.
- + */
- + err = dpa_enable_tx_csum(priv, skb, fd,
- + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
- + if (unlikely(err < 0)) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev, "HW csum error: %d\n", err);
- + return err;
- + }
- +
- + /* Fill in the rest of the FD fields */
- + fd->format = qm_fd_contig;
- + fd->length20 = skb->len;
- + fd->cmd |= FM_FD_CMD_FCO;
- +
- + /* Map the entire buffer size that may be seen by FMan, but no more */
- + addr = dma_map_single(dpa_bp->dev, skbh,
- + skb_tail_pointer(skb) - buffer_start, dma_dir);
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev, "dma_map_single() failed\n");
- + return -EINVAL;
- + }
- + fd->addr = addr;
- +
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(skb_to_contig_fd);
- +
- +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
- + struct sk_buff *skb, struct qm_fd *fd)
- +{
- + struct dpa_bp *dpa_bp = priv->dpa_bp;
- + dma_addr_t addr;
- + dma_addr_t sg_addr;
- + struct sk_buff **skbh;
- + struct net_device *net_dev = priv->net_dev;
- + int sg_len;
- + int err;
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + unsigned long boundary;
- + int k;
- +#endif
- +
- + struct qm_sg_entry *sgt;
- + void *sgt_buf;
- + void *buffer_start;
- + skb_frag_t *frag;
- + int i, j;
- + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- + const int nr_frags = skb_shinfo(skb)->nr_frags;
- +
- + fd->format = qm_fd_sg;
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + /* get a page frag to store the SGTable */
- + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
- + if (unlikely(!sgt_buf)) {
- + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
- + return -ENOMEM;
- + }
- +
- + /* it seems that the memory allocator does not zero the allocated mem */
- + memset(sgt_buf, 0, priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
- +#else
- + /* get a page frag to store the SGTable */
- + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * (1 + nr_frags));
- + if (unlikely(!sgt_buf)) {
- + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
- + return -ENOMEM;
- + }
- +
- + memset(sgt_buf, 0, priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * (1 + nr_frags));
- +#endif
- +
- + /* Enable L3/L4 hardware checksum computation.
- + *
- + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
- + * need to write into the skb.
- + */
- + err = dpa_enable_tx_csum(priv, skb, fd,
- + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
- + if (unlikely(err < 0)) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev, "HW csum error: %d\n", err);
- + goto csum_failed;
- + }
- +
- + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
- + sg_len = skb_headlen(skb);
- + qm_sg_entry_set_bpid(&sgt[0], 0xff);
- + qm_sg_entry_set_offset(&sgt[0], 0);
- + qm_sg_entry_set_len(&sgt[0], sg_len);
- + qm_sg_entry_set_ext(&sgt[0], 0);
- + qm_sg_entry_set_final(&sgt[0], 0);
- +
- + addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + dev_err(dpa_bp->dev, "DMA mapping failed");
- + err = -EINVAL;
- + goto sg0_map_failed;
- +
- + }
- +
- + qm_sg_entry_set64(&sgt[0], addr);
- +
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + j = 0;
- + if (unlikely(HAS_DMA_ISSUE(skb->data, sg_len))) {
- + boundary = BOUNDARY_4K(skb->data, sg_len);
- + qm_sg_entry_set_len(&sgt[j], boundary -
- + (unsigned long)skb->data);
- +
- + j++;
- + qm_sg_entry_set_bpid(&sgt[j], 0xff);
- + qm_sg_entry_set_offset(&sgt[j], 0);
- + qm_sg_entry_set_len(&sgt[j],
- + ((unsigned long)skb->data + (unsigned long)sg_len) -
- + boundary);
- + qm_sg_entry_set_ext(&sgt[j], 0);
- + qm_sg_entry_set_final(&sgt[j], 0);
- +
- + /* keep the offset in the address */
- + qm_sg_entry_set64(&sgt[j], addr +
- + (boundary -
- + (unsigned long)skb->data));
- + }
- + j++;
- +
- + /* populate the rest of SGT entries */
- + for (i = 1; i <= nr_frags; i++, j++) {
- + frag = &skb_shinfo(skb)->frags[i - 1];
- + qm_sg_entry_set_bpid(&sgt[j], 0xff);
- + qm_sg_entry_set_offset(&sgt[j], 0);
- + qm_sg_entry_set_len(&sgt[j], frag->size);
- + qm_sg_entry_set_ext(&sgt[j], 0);
- +
- + DPA_BUG_ON(!skb_frag_page(frag));
- + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
- + dma_dir);
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + dev_err(dpa_bp->dev, "DMA mapping failed");
- + err = -EINVAL;
- + goto sg_map_failed;
- + }
- +
- + /* keep the offset in the address */
- + qm_sg_entry_set64(&sgt[j], addr);
- +
- + if (unlikely(HAS_DMA_ISSUE(frag, frag->size))) {
- + boundary = BOUNDARY_4K(frag, frag->size);
- + qm_sg_entry_set_len(&sgt[j], boundary -
- + (unsigned long)frag);
- +
- + j++;
- + qm_sg_entry_set_bpid(&sgt[j], 0xff);
- + qm_sg_entry_set_offset(&sgt[j], 0);
- + qm_sg_entry_set_len(&sgt[j],
- + ((unsigned long)frag->size -
- + (boundary - (unsigned long)frag)));
- + qm_sg_entry_set_ext(&sgt[j], 0);
- +
- + /* keep the offset in the address */
- + qm_sg_entry_set64(&sgt[j], addr +
- + (boundary - (unsigned long)frag));
- + }
- +
- + if (i == nr_frags)
- + qm_sg_entry_set_final(&sgt[j], 1);
- + else
- + qm_sg_entry_set_final(&sgt[j], 0);
- +#else
- +
- + /* populate the rest of SGT entries */
- + for (i = 1; i <= nr_frags; i++) {
- + frag = &skb_shinfo(skb)->frags[i - 1];
- + qm_sg_entry_set_bpid(&sgt[i], 0xff);
- + qm_sg_entry_set_offset(&sgt[i], 0);
- + qm_sg_entry_set_len(&sgt[i], frag->size);
- + qm_sg_entry_set_ext(&sgt[i], 0);
- +
- + if (i == nr_frags)
- + qm_sg_entry_set_final(&sgt[i], 1);
- + else
- + qm_sg_entry_set_final(&sgt[i], 0);
- +
- + DPA_BUG_ON(!skb_frag_page(frag));
- + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
- + dma_dir);
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + dev_err(dpa_bp->dev, "DMA mapping failed");
- + err = -EINVAL;
- + goto sg_map_failed;
- + }
- +
- + /* keep the offset in the address */
- + qm_sg_entry_set64(&sgt[i], addr);
- +#endif
- + }
- +
- + fd->length20 = skb->len;
- + fd->offset = priv->tx_headroom;
- +
- + /* DMA map the SGT page */
- + buffer_start = (void *)sgt - priv->tx_headroom;
- + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
- + dma_dir);
- +#else
- + addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
- + sizeof(struct qm_sg_entry) * (1 + nr_frags),
- + dma_dir);
- +#endif
- + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- + dev_err(dpa_bp->dev, "DMA mapping failed");
- + err = -EINVAL;
- + goto sgt_map_failed;
- + }
- +
- + fd->bpid = 0xff;
- + fd->cmd |= FM_FD_CMD_FCO;
- + fd->addr = addr;
- +
- + return 0;
- +
- +sgt_map_failed:
- +sg_map_failed:
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + for (k = 0; k < j; k++) {
- + sg_addr = qm_sg_addr(&sgt[k]);
- + dma_unmap_page(dpa_bp->dev, sg_addr,
- + qm_sg_entry_get_len(&sgt[k]), dma_dir);
- + }
- +#else
- + for (j = 0; j < i; j++) {
- + sg_addr = qm_sg_addr(&sgt[j]);
- + dma_unmap_page(dpa_bp->dev, sg_addr,
- + qm_sg_entry_get_len(&sgt[j]), dma_dir);
- + }
- +#endif
- +sg0_map_failed:
- +csum_failed:
- + put_page(virt_to_head_page(sgt_buf));
- +
- + return err;
- +}
- +EXPORT_SYMBOL(skb_to_sg_fd);
- +
- +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv;
- + const int queue_mapping = dpa_get_queue_mapping(skb);
- + struct qman_fq *egress_fq, *conf_fq;
- +
- +#ifdef CONFIG_FSL_DPAA_HOOKS
- + /* If there is a Tx hook, run it. */
- + if (dpaa_eth_hooks.tx &&
- + dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
- + /* won't update any Tx stats */
- + return NETDEV_TX_OK;
- +#endif
- +
- + priv = netdev_priv(net_dev);
- +
- +#ifdef CONFIG_FSL_DPAA_CEETM
- + if (priv->ceetm_en)
- + return ceetm_tx(skb, net_dev);
- +#endif
- +
- + egress_fq = priv->egress_fqs[queue_mapping];
- + conf_fq = priv->conf_fqs[queue_mapping];
- +
- + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
- +}
- +
- +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
- + struct qman_fq *egress_fq, struct qman_fq *conf_fq)
- +{
- + struct dpa_priv_s *priv;
- + struct qm_fd fd;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct rtnl_link_stats64 *percpu_stats;
- + int err = 0;
- + const bool nonlinear = skb_is_nonlinear(skb);
- + int *countptr, offset = 0;
- +
- + priv = netdev_priv(net_dev);
- + /* Non-migratable context, safe to use raw_cpu_ptr */
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- + percpu_stats = &percpu_priv->stats;
- + countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
- +
- + clear_fd(&fd);
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
- + fd.cmd |= FM_FD_CMD_UPD;
- +#endif
- +#ifdef CONFIG_FSL_DPAA_TS
- + if (unlikely(priv->ts_tx_en &&
- + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- + fd.cmd |= FM_FD_CMD_UPD;
- + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- +#endif /* CONFIG_FSL_DPAA_TS */
- +
- + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
- + * we don't feed FMan with more fragments than it supports.
- + * Btw, we're using the first sgt entry to store the linear part of
- + * the skb, so we're one extra frag short.
- + */
- + if (nonlinear &&
- + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_ENTRIES_THRESHOLD)) {
- + /* Just create a S/G fd based on the skb */
- + err = skb_to_sg_fd(priv, skb, &fd);
- + percpu_priv->tx_frag_skbuffs++;
- + } else {
- + /* Make sure we have enough headroom to accommodate private
- + * data, parse results, etc. Normally this shouldn't happen if
- + * we're here via the standard kernel stack.
- + */
- + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
- + struct sk_buff *skb_new;
- +
- + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
- + if (unlikely(!skb_new)) {
- + dev_kfree_skb(skb);
- + percpu_stats->tx_errors++;
- + return NETDEV_TX_OK;
- + }
- + dev_kfree_skb(skb);
- + skb = skb_new;
- + }
- +
- + /* We're going to store the skb backpointer at the beginning
- + * of the data buffer, so we need a privately owned skb
- + */
- +
- + /* Code borrowed from skb_unshare(). */
- + if (skb_cloned(skb)) {
- + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
- + kfree_skb(skb);
- + skb = nskb;
- + /* skb_copy() has now linearized the skbuff. */
- + } else if (unlikely(nonlinear)) {
- + /* We are here because the egress skb contains
- + * more fragments than we support. In this case,
- + * we have no choice but to linearize it ourselves.
- + */
- + err = __skb_linearize(skb);
- + }
- + if (unlikely(!skb || err < 0))
- + /* Common out-of-memory error path */
- + goto enomem;
- +
- +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
- + if (unlikely(HAS_DMA_ISSUE(skb->data, skb->len))) {
- + err = skb_to_sg_fd(priv, skb, &fd);
- + percpu_priv->tx_frag_skbuffs++;
- + } else {
- + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
- + }
- +#else
- + /* Finally, create a contig FD from this skb */
- + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
- +#endif
- + }
- + if (unlikely(err < 0))
- + goto skb_to_fd_failed;
- +
- + if (fd.bpid != 0xff) {
- + skb_recycle(skb);
- + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
- + * but we need the skb to look as if returned by build_skb().
- + * We need to manually adjust the tailptr as well.
- + */
- + skb->data = skb->head + offset;
- + skb_reset_tail_pointer(skb);
- +
- + (*countptr)++;
- + percpu_priv->tx_returned++;
- + }
- +
- + if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
- + goto xmit_failed;
- +
- + net_dev->trans_start = jiffies;
- + return NETDEV_TX_OK;
- +
- +xmit_failed:
- + if (fd.bpid != 0xff) {
- + (*countptr)--;
- + percpu_priv->tx_returned--;
- + dpa_fd_release(net_dev, &fd);
- + percpu_stats->tx_errors++;
- + return NETDEV_TX_OK;
- + }
- + _dpa_cleanup_tx_fd(priv, &fd);
- +skb_to_fd_failed:
- +enomem:
- + percpu_stats->tx_errors++;
- + dev_kfree_skb(skb);
- + return NETDEV_TX_OK;
- +}
- +EXPORT_SYMBOL(dpa_tx_extended);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
- @@ -0,0 +1,914 @@
- +/* Copyright 2008-2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/of_platform.h>
- +#include <linux/etherdevice.h>
- +#include <linux/kthread.h>
- +#include <linux/percpu.h>
- +#include <linux/highmem.h>
- +#include <linux/fsl_qman.h>
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#include "dpaa_eth_base.h"
- +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
- +#include "mac.h"
- +
- +/* forward declarations */
- +static enum qman_cb_dqrr_result __hot
- +shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq);
- +static enum qman_cb_dqrr_result __hot
- +shared_tx_default_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq);
- +static enum qman_cb_dqrr_result
- +shared_tx_error_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq);
- +static void shared_ern(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_mr_entry *msg);
- +
- +#define DPA_DESCRIPTION "FSL DPAA Shared Ethernet driver"
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +
- +MODULE_DESCRIPTION(DPA_DESCRIPTION);
- +
- +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
- +static uint16_t shared_tx_timeout = 1000;
- +module_param(shared_tx_timeout, ushort, S_IRUGO);
- +MODULE_PARM_DESC(shared_tx_timeout, "The Tx timeout in ms");
- +
- +static const struct of_device_id dpa_shared_match[];
- +
- +static const struct net_device_ops dpa_shared_ops = {
- + .ndo_open = dpa_start,
- + .ndo_start_xmit = dpa_shared_tx,
- + .ndo_stop = dpa_stop,
- + .ndo_tx_timeout = dpa_timeout,
- + .ndo_get_stats64 = dpa_get_stats64,
- + .ndo_set_mac_address = dpa_set_mac_address,
- + .ndo_validate_addr = eth_validate_addr,
- +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- + .ndo_select_queue = dpa_select_queue,
- +#endif
- + .ndo_change_mtu = dpa_change_mtu,
- + .ndo_set_rx_mode = dpa_set_rx_mode,
- + .ndo_init = dpa_ndo_init,
- + .ndo_set_features = dpa_set_features,
- + .ndo_fix_features = dpa_fix_features,
- + .ndo_do_ioctl = dpa_ioctl,
- +};
- +
- +const struct dpa_fq_cbs_t shared_fq_cbs = {
- + .rx_defq = { .cb = { .dqrr = shared_rx_dqrr } },
- + .tx_defq = { .cb = { .dqrr = shared_tx_default_dqrr } },
- + .rx_errq = { .cb = { .dqrr = shared_rx_dqrr } },
- + .tx_errq = { .cb = { .dqrr = shared_tx_error_dqrr } },
- + .egress_ern = { .cb = { .ern = shared_ern } }
- +};
- +EXPORT_SYMBOL(shared_fq_cbs);
- +
- +static inline void * __must_check __attribute__((nonnull))
- +dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
- +{
- + return dpa_bp->vaddr + (addr - dpa_bp->paddr);
- +}
- +
- +static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
- +{
- + int i;
- +
- + for (i = 0; i < priv->bp_count; i++)
- + if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size)
- + return dpa_bpid2pool(priv->dpa_bp[i].bpid);
- + return ERR_PTR(-ENODEV);
- +}
- +
- +/* Copy to a memory region that requires kmapping from a linear buffer,
- + * taking into account page boundaries in the destination
- + */
- +static void
- +copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size)
- +{
- + struct page *page;
- + size_t size, offset;
- + void *page_vaddr;
- +
- + while (buf_size > 0) {
- + offset = offset_in_page(phys_start);
- + size = (offset + buf_size > PAGE_SIZE) ?
- + PAGE_SIZE - offset : buf_size;
- +
- + page = pfn_to_page(phys_start >> PAGE_SHIFT);
- + page_vaddr = kmap_atomic(page);
- +
- + memcpy(page_vaddr + offset, src, size);
- +
- + kunmap_atomic(page_vaddr);
- +
- + phys_start += size;
- + src += size;
- + buf_size -= size;
- + }
- +}
- +
- +/* Copy from a memory region that requires kmapping to a linear buffer,
- + * taking into account page boundaries in the source
- + */
- +static void
- +copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size)
- +{
- + struct page *page;
- + size_t size, offset;
- + void *page_vaddr;
- +
- + while (buf_size > 0) {
- + offset = offset_in_page(phys_start);
- + size = (offset + buf_size > PAGE_SIZE) ?
- + PAGE_SIZE - offset : buf_size;
- +
- + page = pfn_to_page(phys_start >> PAGE_SHIFT);
- + page_vaddr = kmap_atomic(page);
- +
- + memcpy(dest, page_vaddr + offset, size);
- +
- + kunmap_atomic(page_vaddr);
- +
- + phys_start += size;
- + dest += size;
- + buf_size -= size;
- + }
- +}
- +
- +static void
- +dpa_fd_release_sg(const struct net_device *net_dev,
- + const struct qm_fd *fd)
- +{
- + const struct dpa_priv_s *priv;
- + struct qm_sg_entry *sgt;
- + struct dpa_bp *_dpa_bp;
- + struct bm_buffer _bmb;
- +
- + priv = netdev_priv(net_dev);
- +
- + _bmb.hi = fd->addr_hi;
- + _bmb.lo = fd->addr_lo;
- +
- + _dpa_bp = dpa_bpid2pool(fd->bpid);
- + BUG_ON(!_dpa_bp);
- +
- + if (_dpa_bp->vaddr) {
- + sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) +
- + dpa_fd_offset(fd);
- + dpa_release_sgt(sgt);
- + } else {
- + sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC);
- + if (sgt == NULL) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev,
- + "Memory allocation failed\n");
- + return;
- + }
- +
- + copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) +
- + dpa_fd_offset(fd),
- + min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
- + _dpa_bp->size));
- + dpa_release_sgt(sgt);
- + kfree(sgt);
- + }
- +
- + while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
- + cpu_relax();
- +}
- +
- +static enum qman_cb_dqrr_result __hot
- +shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + const struct qm_fd *fd = &dq->fd;
- + struct dpa_bp *dpa_bp;
- + struct sk_buff *skb;
- + struct qm_sg_entry *sgt;
- + int i;
- + void *frag_addr;
- + u32 frag_length;
- + u32 offset;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- +
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + dpa_bp = dpa_bpid2pool(fd->bpid);
- + BUG_ON(!dpa_bp);
- +
- + if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_warn(net_dev, "FD status = 0x%08x\n",
- + fd->status & FM_FD_STAT_RX_ERRORS);
- +
- + percpu_priv->stats.rx_errors++;
- +
- + goto out;
- + }
- +
- + skb = __netdev_alloc_skb(net_dev,
- + priv->tx_headroom + dpa_fd_length(fd),
- + GFP_ATOMIC);
- + if (unlikely(skb == NULL)) {
- + if (netif_msg_rx_err(priv) && net_ratelimit())
- + netdev_err(net_dev, "Could not alloc skb\n");
- +
- + percpu_priv->stats.rx_dropped++;
- +
- + goto out;
- + }
- +
- + skb_reserve(skb, priv->tx_headroom);
- +
- + if (fd->format == qm_fd_sg) {
- + if (dpa_bp->vaddr) {
- + sgt = dpa_phys2virt(dpa_bp,
- + qm_fd_addr(fd)) + dpa_fd_offset(fd);
- +
- + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
- + offset = qm_sg_entry_get_offset(&sgt[i]);
- + frag_addr = dpa_phys2virt(dpa_bp,
- + qm_sg_addr(&sgt[i]) +
- + offset);
- + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
- + frag_length = qm_sg_entry_get_len(&sgt[i]);
- +
- + /* copy from sgt[i] */
- + memcpy(skb_put(skb, frag_length), frag_addr,
- + frag_length);
- + if (qm_sg_entry_get_final(&sgt[i]))
- + break;
- + }
- + } else {
- + sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
- + GFP_ATOMIC);
- + if (unlikely(sgt == NULL)) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev,
- + "Memory allocation failed\n");
- + return -ENOMEM;
- + }
- +
- + copy_from_unmapped_area(sgt,
- + qm_fd_addr(fd) + dpa_fd_offset(fd),
- + min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
- + dpa_bp->size));
- +
- + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
- + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
- + frag_length = qm_sg_entry_get_len(&sgt[i]);
- + copy_from_unmapped_area(
- + skb_put(skb, frag_length),
- + qm_sg_addr(&sgt[i]) +
- + qm_sg_entry_get_offset(&sgt[i]),
- + frag_length);
- +
- + if (qm_sg_entry_get_final(&sgt[i]))
- + break;
- + }
- +
- + kfree(sgt);
- + }
- + goto skb_copied;
- + }
- +
- + /* otherwise fd->format == qm_fd_contig */
- + if (dpa_bp->vaddr) {
- + /* Fill the SKB */
- + memcpy(skb_put(skb, dpa_fd_length(fd)),
- + dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
- + dpa_fd_offset(fd), dpa_fd_length(fd));
- + } else {
- + copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)),
- + qm_fd_addr(fd) + dpa_fd_offset(fd),
- + dpa_fd_length(fd));
- + }
- +
- +skb_copied:
- + skb->protocol = eth_type_trans(skb, net_dev);
- +
- + /* IP Reassembled frames are allowed to be larger than MTU */
- + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
- + !(fd->status & FM_FD_IPR))) {
- + percpu_priv->stats.rx_dropped++;
- + dev_kfree_skb_any(skb);
- + goto out;
- + }
- +
- + if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
- + goto out;
- + else {
- + percpu_priv->stats.rx_packets++;
- + percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
- + }
- +
- +out:
- + if (fd->format == qm_fd_sg)
- + dpa_fd_release_sg(net_dev, fd);
- + else
- + dpa_fd_release(net_dev, fd);
- +
- + return qman_cb_dqrr_consume;
- +}
- +
- +static enum qman_cb_dqrr_result
- +shared_tx_error_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct dpa_bp *dpa_bp;
- + const struct qm_fd *fd = &dq->fd;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- +
- + dpa_bp = dpa_bpid2pool(fd->bpid);
- + BUG_ON(!dpa_bp);
- +
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_warn(net_dev, "FD status = 0x%08x\n",
- + fd->status & FM_FD_STAT_TX_ERRORS);
- +
- + if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
- + dpa_fd_release_sg(net_dev, fd);
- + else
- + dpa_fd_release(net_dev, fd);
- +
- + percpu_priv->stats.tx_errors++;
- +
- + return qman_cb_dqrr_consume;
- +}
- +
- +static enum qman_cb_dqrr_result __hot
- +shared_tx_default_dqrr(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_dqrr_entry *dq)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct dpa_bp *dpa_bp;
- + const struct qm_fd *fd = &dq->fd;
- +
- + net_dev = ((struct dpa_fq *)fq)->net_dev;
- + priv = netdev_priv(net_dev);
- +
- + dpa_bp = dpa_bpid2pool(fd->bpid);
- + BUG_ON(!dpa_bp);
- +
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
- + if (netif_msg_hw(priv) && net_ratelimit())
- + netdev_warn(net_dev, "FD status = 0x%08x\n",
- + fd->status & FM_FD_STAT_TX_ERRORS);
- +
- + percpu_priv->stats.tx_errors++;
- + }
- +
- + if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
- + dpa_fd_release_sg(net_dev, fd);
- + else
- + dpa_fd_release(net_dev, fd);
- +
- + percpu_priv->tx_confirm++;
- +
- + return qman_cb_dqrr_consume;
- +}
- +
- +static void shared_ern(struct qman_portal *portal,
- + struct qman_fq *fq,
- + const struct qm_mr_entry *msg)
- +{
- + struct net_device *net_dev;
- + const struct dpa_priv_s *priv;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
- +
- + net_dev = dpa_fq->net_dev;
- + priv = netdev_priv(net_dev);
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + dpa_fd_release(net_dev, &msg->ern.fd);
- +
- + percpu_priv->stats.tx_dropped++;
- + percpu_priv->stats.tx_fifo_errors++;
- + count_ern(percpu_priv, msg);
- +}
- +
- +int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
- +{
- + struct dpa_bp *dpa_bp;
- + struct bm_buffer bmb;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct dpa_priv_s *priv;
- + struct qm_fd fd;
- + int queue_mapping;
- + int err;
- + void *dpa_bp_vaddr;
- + fm_prs_result_t parse_results;
- + fm_prs_result_t *parse_results_ref;
- + struct qman_fq *egress_fq, *conf_fq;
- +
- + priv = netdev_priv(net_dev);
- + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- +
- + memset(&fd, 0, sizeof(fd));
- + fd.format = qm_fd_contig;
- +
- + queue_mapping = smp_processor_id();
- +
- + dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
- + if (unlikely(!dpa_bp)) {
- + percpu_priv->stats.tx_errors++;
- + err = PTR_ERR(dpa_bp);
- + goto bpools_too_small_error;
- + }
- +
- + err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
- + if (unlikely(err <= 0)) {
- + percpu_priv->stats.tx_errors++;
- + if (err == 0)
- + err = -ENOMEM;
- + goto buf_acquire_failed;
- + }
- + fd.bpid = dpa_bp->bpid;
- +
- + fd.length20 = skb_headlen(skb);
- + fd.addr_hi = (uint8_t)bmb.hi;
- + fd.addr_lo = bmb.lo;
- + fd.offset = priv->tx_headroom;
- +
- + /* The virtual address of the buffer pool is expected to be NULL
- + * in scenarios like MAC-less or Shared-MAC between Linux and
- + * USDPAA. In this case the buffers are dynamically mapped/unmapped.
- + */
- + if (dpa_bp->vaddr) {
- + dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
- +
- + /* Copy the packet payload */
- + skb_copy_from_linear_data(skb,
- + dpa_bp_vaddr + dpa_fd_offset(&fd),
- + dpa_fd_length(&fd));
- +
- + /* if no mac device or peer set it's macless */
- + if (!priv->mac_dev || priv->peer) {
- + parse_results_ref = (fm_prs_result_t *) (dpa_bp_vaddr +
- + DPA_TX_PRIV_DATA_SIZE);
- + /* Default values; FMan will not generate/validate
- + * CSUM;
- + */
- + parse_results_ref->l3r = 0;
- + parse_results_ref->l4r = 0;
- + parse_results_ref->ip_off[0] = 0xff;
- + parse_results_ref->ip_off[1] = 0xff;
- + parse_results_ref->l4_off = 0xff;
- +
- + fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
- + } else {
- + /* Enable L3/L4 hardware checksum computation,
- + * if applicable
- + */
- + err = dpa_enable_tx_csum(priv, skb, &fd,
- + dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE);
- +
- + if (unlikely(err < 0)) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev,
- + "Tx HW csum error: %d\n", err);
- + percpu_priv->stats.tx_errors++;
- + goto l3_l4_csum_failed;
- + }
- + }
- +
- + } else {
- + if (!priv->mac_dev || priv->peer) {
- + /* Default values; FMan will not generate/validate
- + * CSUM;
- + */
- + parse_results.l3r = 0;
- + parse_results.l4r = 0;
- + parse_results.ip_off[0] = 0xff;
- + parse_results.ip_off[1] = 0xff;
- + parse_results.l4_off = 0xff;
- +
- + fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
- + } else {
- + /* Enable L3/L4 hardware checksum computation,
- + * if applicable
- + */
- + err = dpa_enable_tx_csum(priv, skb, &fd,
- + (char *)&parse_results);
- +
- + if (unlikely(err < 0)) {
- + if (netif_msg_tx_err(priv) && net_ratelimit())
- + netdev_err(net_dev,
- + "Tx HW csum error: %d\n", err);
- + percpu_priv->stats.tx_errors++;
- + goto l3_l4_csum_failed;
- + }
- +
- + }
- +
- + copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE,
- + &parse_results,
- + DPA_PARSE_RESULTS_SIZE);
- +
- + copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd),
- + skb->data,
- + dpa_fd_length(&fd));
- + }
- +
- + egress_fq = priv->egress_fqs[queue_mapping];
- + conf_fq = priv->conf_fqs[queue_mapping];
- +
- + err = dpa_xmit(priv, &percpu_priv->stats, &fd, egress_fq, conf_fq);
- +
- +l3_l4_csum_failed:
- +bpools_too_small_error:
- +buf_acquire_failed:
- + /* We're done with the skb */
- + dev_kfree_skb(skb);
- +
- + /* err remains unused, NETDEV_TX_OK must be returned here */
- + return NETDEV_TX_OK;
- +}
- +EXPORT_SYMBOL(dpa_shared_tx);
- +
- +static int dpa_shared_netdev_init(struct device_node *dpa_node,
- + struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + const uint8_t *mac_addr;
- +
- + net_dev->netdev_ops = &dpa_shared_ops;
- +
- + net_dev->mem_start = priv->mac_dev->res->start;
- + net_dev->mem_end = priv->mac_dev->res->end;
- +
- + mac_addr = priv->mac_dev->addr;
- +
- + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- + NETIF_F_LLTX);
- +
- + return dpa_netdev_init(net_dev, mac_addr, shared_tx_timeout);
- +}
- +
- +#ifdef CONFIG_PM
- +
- +static int dpa_shared_suspend(struct device *dev)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- + int err = 0;
- +
- + net_dev = dev_get_drvdata(dev);
- + if (net_dev->flags & IFF_UP) {
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + err = fm_port_suspend(mac_dev->port_dev[RX]);
- + if (err)
- + goto port_suspend_failed;
- +
- + err = fm_port_suspend(mac_dev->port_dev[TX]);
- + if (err)
- + err = fm_port_resume(mac_dev->port_dev[RX]);
- + }
- +
- +port_suspend_failed:
- + return err;
- +}
- +
- +static int dpa_shared_resume(struct device *dev)
- +{
- + struct net_device *net_dev;
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- + int err = 0;
- +
- + net_dev = dev_get_drvdata(dev);
- + if (net_dev->flags & IFF_UP) {
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + err = fm_port_resume(mac_dev->port_dev[TX]);
- + if (err)
- + goto port_resume_failed;
- +
- + err = fm_port_resume(mac_dev->port_dev[RX]);
- + if (err)
- + err = fm_port_suspend(mac_dev->port_dev[TX]);
- + }
- +
- +port_resume_failed:
- + return err;
- +}
- +
- +static const struct dev_pm_ops shared_pm_ops = {
- + .suspend = dpa_shared_suspend,
- + .resume = dpa_shared_resume,
- +};
- +
- +#define SHARED_PM_OPS (&shared_pm_ops)
- +
- +#else /* CONFIG_PM */
- +
- +#define SHARED_PM_OPS NULL
- +
- +#endif /* CONFIG_PM */
- +
- +static int
- +dpaa_eth_shared_probe(struct platform_device *_of_dev)
- +{
- + int err = 0, i, channel;
- + struct device *dev;
- + struct device_node *dpa_node;
- + struct dpa_bp *dpa_bp;
- + struct dpa_fq *dpa_fq, *tmp;
- + size_t count;
- + struct net_device *net_dev = NULL;
- + struct dpa_priv_s *priv = NULL;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct fm_port_fqs port_fqs;
- + struct dpa_buffer_layout_s *buf_layout = NULL;
- + struct mac_device *mac_dev;
- + struct task_struct *kth;
- +
- + dev = &_of_dev->dev;
- +
- + dpa_node = dev->of_node;
- +
- + if (!of_device_is_available(dpa_node))
- + return -ENODEV;
- +
- + /* Get the buffer pools assigned to this interface */
- + dpa_bp = dpa_bp_probe(_of_dev, &count);
- + if (IS_ERR(dpa_bp))
- + return PTR_ERR(dpa_bp);
- +
- + for (i = 0; i < count; i++)
- + dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
- +
- + /* Allocate this early, so we can store relevant information in
- + * the private area (needed by 1588 code in dpa_mac_probe)
- + */
- + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
- + if (!net_dev) {
- + dev_err(dev, "alloc_etherdev_mq() failed\n");
- + return -ENOMEM;
- + }
- +
- + /* Do this here, so we can be verbose early */
- + SET_NETDEV_DEV(net_dev, dev);
- + dev_set_drvdata(dev, net_dev);
- +
- + priv = netdev_priv(net_dev);
- + priv->net_dev = net_dev;
- + strcpy(priv->if_type, "shared");
- +
- + priv->msg_enable = netif_msg_init(advanced_debug, -1);
- +
- + mac_dev = dpa_mac_probe(_of_dev);
- + if (IS_ERR(mac_dev) || !mac_dev) {
- + err = PTR_ERR(mac_dev);
- + goto mac_probe_failed;
- + }
- +
- + /* We have physical ports, so we need to establish
- + * the buffer layout.
- + */
- + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
- + GFP_KERNEL);
- + if (!buf_layout) {
- + dev_err(dev, "devm_kzalloc() failed\n");
- + goto alloc_failed;
- + }
- + dpa_set_buffers_layout(mac_dev, buf_layout);
- +
- + INIT_LIST_HEAD(&priv->dpa_fq_list);
- +
- + memset(&port_fqs, 0, sizeof(port_fqs));
- +
- + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs,
- + false, RX);
- + if (!err)
- + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
- + &port_fqs, false, TX);
- + if (err < 0)
- + goto fq_probe_failed;
- +
- + /* bp init */
- + priv->bp_count = count;
- + err = dpa_bp_create(net_dev, dpa_bp, count);
- + if (err < 0)
- + goto bp_create_failed;
- +
- + priv->mac_dev = mac_dev;
- +
- + channel = dpa_get_channel();
- +
- + if (channel < 0) {
- + err = channel;
- + goto get_channel_failed;
- + }
- +
- + priv->channel = (uint16_t)channel;
- +
- + /* Start a thread that will walk the cpus with affine portals
- + * and add this pool channel to each's dequeue mask.
- + */
- + kth = kthread_run(dpaa_eth_add_channel,
- + (void *)(unsigned long)priv->channel,
- + "dpaa_%p:%d", net_dev, priv->channel);
- + if (!kth) {
- + err = -ENOMEM;
- + goto add_channel_failed;
- + }
- +
- + dpa_fq_setup(priv, &shared_fq_cbs, priv->mac_dev->port_dev[TX]);
- +
- + /* Create a congestion group for this netdev, with
- + * dynamically-allocated CGR ID.
- + * Must be executed after probing the MAC, but before
- + * assigning the egress FQs to the CGRs.
- + */
- + err = dpaa_eth_cgr_init(priv);
- + if (err < 0) {
- + dev_err(dev, "Error initializing CGR\n");
- + goto cgr_init_failed;
- + }
- +
- + /* Add the FQs to the interface, and make them active */
- + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
- + err = dpa_fq_init(dpa_fq, false);
- + if (err < 0)
- + goto fq_alloc_failed;
- + }
- +
- + priv->buf_layout = buf_layout;
- + priv->tx_headroom =
- + dpa_get_headroom(&priv->buf_layout[TX]);
- +
- + /* All real interfaces need their ports initialized */
- + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
- + buf_layout, dev);
- +
- + /* Now we need to initialize either a private or shared interface */
- + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
- +
- + if (priv->percpu_priv == NULL) {
- + dev_err(dev, "devm_alloc_percpu() failed\n");
- + err = -ENOMEM;
- + goto alloc_percpu_failed;
- + }
- + for_each_possible_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- + memset(percpu_priv, 0, sizeof(*percpu_priv));
- + }
- +
- + err = dpa_shared_netdev_init(dpa_node, net_dev);
- +
- + if (err < 0)
- + goto netdev_init_failed;
- +
- + dpaa_eth_sysfs_init(&net_dev->dev);
- +
- + pr_info("fsl_dpa_shared: Probed shared interface %s\n",
- + net_dev->name);
- +
- + return 0;
- +
- +netdev_init_failed:
- +alloc_percpu_failed:
- +fq_alloc_failed:
- + if (net_dev) {
- + dpa_fq_free(dev, &priv->dpa_fq_list);
- + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
- + qman_delete_cgr(&priv->cgr_data.cgr);
- + }
- +cgr_init_failed:
- +add_channel_failed:
- +get_channel_failed:
- + if (net_dev)
- + dpa_bp_free(priv);
- +bp_create_failed:
- +fq_probe_failed:
- + devm_kfree(dev, buf_layout);
- +alloc_failed:
- +mac_probe_failed:
- + dev_set_drvdata(dev, NULL);
- + if (net_dev)
- + free_netdev(net_dev);
- +
- + return err;
- +}
- +
- +static const struct of_device_id dpa_shared_match[] = {
- + {
- + .compatible = "fsl,dpa-ethernet-shared"
- + },
- + {}
- +};
- +MODULE_DEVICE_TABLE(of, dpa_shared_match);
- +
- +static struct platform_driver dpa_shared_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME "-shared",
- + .of_match_table = dpa_shared_match,
- + .owner = THIS_MODULE,
- + .pm = SHARED_PM_OPS,
- + },
- + .probe = dpaa_eth_shared_probe,
- + .remove = dpa_remove
- +};
- +
- +static int __init __cold dpa_shared_load(void)
- +{
- + int _errno;
- +
- + pr_info(DPA_DESCRIPTION "\n");
- +
- + /* Initialize dpaa_eth mirror values */
- + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
- + dpa_max_frm = fm_get_max_frm();
- +
- + _errno = platform_driver_register(&dpa_shared_driver);
- + if (unlikely(_errno < 0)) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): platform_driver_register() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- + }
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + return _errno;
- +}
- +module_init(dpa_shared_load);
- +
- +static void __exit __cold dpa_shared_unload(void)
- +{
- + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + platform_driver_unregister(&dpa_shared_driver);
- +}
- +module_exit(dpa_shared_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
- @@ -0,0 +1,278 @@
- +/* Copyright 2008-2012 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/kthread.h>
- +#include <linux/io.h>
- +#include <linux/of_net.h>
- +#include "dpaa_eth.h"
- +#include "mac.h" /* struct mac_device */
- +#ifdef CONFIG_FSL_DPAA_1588
- +#include "dpaa_1588.h"
- +#endif
- +
- +static ssize_t dpaa_eth_show_addr(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct mac_device *mac_dev = priv->mac_dev;
- +
- + if (mac_dev)
- + return sprintf(buf, "%llx",
- + (unsigned long long)mac_dev->res->start);
- + else
- + return sprintf(buf, "none");
- +}
- +
- +static ssize_t dpaa_eth_show_type(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + ssize_t res = 0;
- +
- + if (priv)
- + res = sprintf(buf, "%s", priv->if_type);
- +
- + return res;
- +}
- +
- +static ssize_t dpaa_eth_show_fqids(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + ssize_t bytes = 0;
- + int i = 0;
- + char *str;
- + struct dpa_fq *fq;
- + struct dpa_fq *tmp;
- + struct dpa_fq *prev = NULL;
- + u32 first_fqid = 0;
- + u32 last_fqid = 0;
- + char *prevstr = NULL;
- +
- + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
- + switch (fq->fq_type) {
- + case FQ_TYPE_RX_DEFAULT:
- + str = "Rx default";
- + break;
- + case FQ_TYPE_RX_ERROR:
- + str = "Rx error";
- + break;
- + case FQ_TYPE_RX_PCD:
- + str = "Rx PCD";
- + break;
- + case FQ_TYPE_TX_CONFIRM:
- + str = "Tx default confirmation";
- + break;
- + case FQ_TYPE_TX_CONF_MQ:
- + str = "Tx confirmation (mq)";
- + break;
- + case FQ_TYPE_TX_ERROR:
- + str = "Tx error";
- + break;
- + case FQ_TYPE_TX:
- + str = "Tx";
- + break;
- + case FQ_TYPE_RX_PCD_HI_PRIO:
- + str ="Rx PCD High Priority";
- + break;
- + default:
- + str = "Unknown";
- + }
- +
- + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
- + str != prevstr)) {
- + if (last_fqid == first_fqid)
- + bytes += sprintf(buf + bytes,
- + "%s: %d\n", prevstr, prev->fqid);
- + else
- + bytes += sprintf(buf + bytes,
- + "%s: %d - %d\n", prevstr,
- + first_fqid, last_fqid);
- + }
- +
- + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
- + last_fqid = fq->fqid;
- + else
- + first_fqid = last_fqid = fq->fqid;
- +
- + prev = fq;
- + prevstr = str;
- + i++;
- + }
- +
- + if (prev) {
- + if (last_fqid == first_fqid)
- + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
- + prev->fqid);
- + else
- + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
- + first_fqid, last_fqid);
- + }
- +
- + return bytes;
- +}
- +
- +static ssize_t dpaa_eth_show_bpids(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + ssize_t bytes = 0;
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct dpa_bp *dpa_bp = priv->dpa_bp;
- + int i = 0;
- +
- + for (i = 0; i < priv->bp_count; i++)
- + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
- + dpa_bp[i].bpid);
- +
- + return bytes;
- +}
- +
- +static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct mac_device *mac_dev = priv->mac_dev;
- + int n = 0;
- +
- + if (mac_dev)
- + n = fm_mac_dump_regs(mac_dev, buf, n);
- + else
- + return sprintf(buf, "no mac registers\n");
- +
- + return n;
- +}
- +
- +static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct mac_device *mac_dev = priv->mac_dev;
- + int n = 0;
- +
- + if (mac_dev)
- + n = fm_mac_dump_rx_stats(mac_dev, buf, n);
- + else
- + return sprintf(buf, "no mac rx stats\n");
- +
- + return n;
- +}
- +
- +static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + struct mac_device *mac_dev = priv->mac_dev;
- + int n = 0;
- +
- + if (mac_dev)
- + n = fm_mac_dump_tx_stats(mac_dev, buf, n);
- + else
- + return sprintf(buf, "no mac tx stats\n");
- +
- + return n;
- +}
- +
- +#ifdef CONFIG_FSL_DPAA_1588
- +static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
- + struct device_attribute *attr, char *buf)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- +
- + if (priv->tsu && priv->tsu->valid)
- + return sprintf(buf, "1\n");
- + else
- + return sprintf(buf, "0\n");
- +}
- +
- +static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
- + struct device_attribute *attr,
- + const char *buf, size_t count)
- +{
- + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
- + unsigned int num;
- + unsigned long flags;
- +
- + if (kstrtouint(buf, 0, &num) < 0)
- + return -EINVAL;
- +
- + local_irq_save(flags);
- +
- + if (num) {
- + if (priv->tsu)
- + priv->tsu->valid = TRUE;
- + } else {
- + if (priv->tsu)
- + priv->tsu->valid = FALSE;
- + }
- +
- + local_irq_restore(flags);
- +
- + return count;
- +}
- +#endif
- +
- +static struct device_attribute dpaa_eth_attrs[] = {
- + __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
- + __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
- + __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
- + __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
- + __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
- + __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
- + __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
- +#ifdef CONFIG_FSL_DPAA_1588
- + __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
- + dpaa_eth_set_ptp_1588),
- +#endif
- +};
- +
- +void dpaa_eth_sysfs_init(struct device *dev)
- +{
- + int i;
- +
- + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
- + if (device_create_file(dev, &dpaa_eth_attrs[i])) {
- + dev_err(dev, "Error creating sysfs file\n");
- + while (i > 0)
- + device_remove_file(dev, &dpaa_eth_attrs[--i]);
- + return;
- + }
- +}
- +EXPORT_SYMBOL(dpaa_eth_sysfs_init);
- +
- +void dpaa_eth_sysfs_remove(struct device *dev)
- +{
- + int i;
- +
- + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
- + device_remove_file(dev, &dpaa_eth_attrs[i]);
- +}
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
- @@ -0,0 +1,144 @@
- +/* Copyright 2013 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#undef TRACE_SYSTEM
- +#define TRACE_SYSTEM dpaa_eth
- +
- +#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
- +#define _DPAA_ETH_TRACE_H
- +
- +#include <linux/skbuff.h>
- +#include <linux/netdevice.h>
- +#include "dpaa_eth.h"
- +#include <linux/tracepoint.h>
- +
- +#define fd_format_name(format) { qm_fd_##format, #format }
- +#define fd_format_list \
- + fd_format_name(contig), \
- + fd_format_name(sg)
- +#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
- + " status=0x%08x"
- +
- +/* This is used to declare a class of events.
- + * individual events of this type will be defined below.
- + */
- +
- +/* Store details about a frame descriptor and the FQ on which it was
- + * transmitted/received.
- + */
- +DECLARE_EVENT_CLASS(dpaa_eth_fd,
- + /* Trace function prototype */
- + TP_PROTO(struct net_device *netdev,
- + struct qman_fq *fq,
- + const struct qm_fd *fd),
- +
- + /* Repeat argument list here */
- + TP_ARGS(netdev, fq, fd),
- +
- + /* A structure containing the relevant information we want to record.
- + * Declare name and type for each normal element, name, type and size
- + * for arrays. Use __string for variable length strings.
- + */
- + TP_STRUCT__entry(
- + __field(u32, fqid)
- + __field(u64, fd_addr)
- + __field(u8, fd_format)
- + __field(u16, fd_offset)
- + __field(u32, fd_length)
- + __field(u32, fd_status)
- + __string(name, netdev->name)
- + ),
- +
- + /* The function that assigns values to the above declared fields */
- + TP_fast_assign(
- + __entry->fqid = fq->fqid;
- + __entry->fd_addr = qm_fd_addr_get64(fd);
- + __entry->fd_format = fd->format;
- + __entry->fd_offset = dpa_fd_offset(fd);
- + __entry->fd_length = dpa_fd_length(fd);
- + __entry->fd_status = fd->status;
- + __assign_str(name, netdev->name);
- + ),
- +
- + /* This is what gets printed when the trace event is triggered */
- + /* TODO: print the status using __print_flags() */
- + TP_printk(TR_FMT,
- + __get_str(name), __entry->fqid, __entry->fd_addr,
- + __print_symbolic(__entry->fd_format, fd_format_list),
- + __entry->fd_offset, __entry->fd_length, __entry->fd_status)
- +);
- +
- +/* Now declare events of the above type. Format is:
- + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
- + */
- +
- +/* Tx (egress) fd */
- +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
- +
- + TP_PROTO(struct net_device *netdev,
- + struct qman_fq *fq,
- + const struct qm_fd *fd),
- +
- + TP_ARGS(netdev, fq, fd)
- +);
- +
- +/* Rx fd */
- +DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
- +
- + TP_PROTO(struct net_device *netdev,
- + struct qman_fq *fq,
- + const struct qm_fd *fd),
- +
- + TP_ARGS(netdev, fq, fd)
- +);
- +
- +/* Tx confirmation fd */
- +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
- +
- + TP_PROTO(struct net_device *netdev,
- + struct qman_fq *fq,
- + const struct qm_fd *fd),
- +
- + TP_ARGS(netdev, fq, fd)
- +);
- +
- +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
- + * The syntax is the same as for DECLARE_EVENT_CLASS().
- + */
- +
- +#endif /* _DPAA_ETH_TRACE_H */
- +
- +/* This must be outside ifdef _DPAA_ETH_TRACE_H */
- +#undef TRACE_INCLUDE_PATH
- +#define TRACE_INCLUDE_PATH .
- +#undef TRACE_INCLUDE_FILE
- +#define TRACE_INCLUDE_FILE dpaa_eth_trace
- +#include <trace/define_trace.h>
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
- @@ -0,0 +1,544 @@
- +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/string.h>
- +
- +#include "dpaa_eth.h"
- +#include "mac.h" /* struct mac_device */
- +#include "dpaa_eth_common.h"
- +
- +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
- + "interrupts",
- + "rx packets",
- + "tx packets",
- + "tx recycled",
- + "tx confirm",
- + "tx S/G",
- + "rx S/G",
- + "tx error",
- + "rx error",
- + "bp count"
- +};
- +
- +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
- + /* dpa rx errors */
- + "rx dma error",
- + "rx frame physical error",
- + "rx frame size error",
- + "rx header error",
- + "rx csum error",
- +
- + /* demultiplexing errors */
- + "qman cg_tdrop",
- + "qman wred",
- + "qman error cond",
- + "qman early window",
- + "qman late window",
- + "qman fq tdrop",
- + "qman fq retired",
- + "qman orp disabled",
- +
- + /* congestion related stats */
- + "congestion time (ms)",
- + "entered congestion",
- + "congested (0/1)"
- +};
- +
- +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
- +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
- +
- +static int __cold dpa_get_settings(struct net_device *net_dev,
- + struct ethtool_cmd *et_cmd)
- +{
- + int _errno;
- + struct dpa_priv_s *priv;
- +
- + priv = netdev_priv(net_dev);
- +
- + if (priv->mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return -ENODEV;
- + }
- + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
- + netdev_dbg(net_dev, "phy device not initialized\n");
- + return 0;
- + }
- +
- + _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
- + if (unlikely(_errno < 0))
- + netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
- +
- + return _errno;
- +}
- +
- +static int __cold dpa_set_settings(struct net_device *net_dev,
- + struct ethtool_cmd *et_cmd)
- +{
- + int _errno;
- + struct dpa_priv_s *priv;
- +
- + priv = netdev_priv(net_dev);
- +
- + if (priv->mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return -ENODEV;
- + }
- + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
- + netdev_err(net_dev, "phy device not initialized\n");
- + return -ENODEV;
- + }
- +
- + _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
- + if (unlikely(_errno < 0))
- + netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
- +
- + return _errno;
- +}
- +
- +static void __cold dpa_get_drvinfo(struct net_device *net_dev,
- + struct ethtool_drvinfo *drvinfo)
- +{
- + int _errno;
- +
- + strncpy(drvinfo->driver, KBUILD_MODNAME,
- + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
- + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- + "%X", 0);
- +
- + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
- + /* Truncated output */
- + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
- + } else if (unlikely(_errno < 0)) {
- + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
- + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
- + }
- + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
- + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
- +}
- +
- +static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
- +{
- + return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
- +}
- +
- +static void __cold dpa_set_msglevel(struct net_device *net_dev,
- + uint32_t msg_enable)
- +{
- + ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
- +}
- +
- +static int __cold dpa_nway_reset(struct net_device *net_dev)
- +{
- + int _errno;
- + struct dpa_priv_s *priv;
- +
- + priv = netdev_priv(net_dev);
- +
- + if (priv->mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return -ENODEV;
- + }
- + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
- + netdev_err(net_dev, "phy device not initialized\n");
- + return -ENODEV;
- + }
- +
- + _errno = 0;
- + if (priv->mac_dev->phy_dev->autoneg) {
- + _errno = phy_start_aneg(priv->mac_dev->phy_dev);
- + if (unlikely(_errno < 0))
- + netdev_err(net_dev, "phy_start_aneg() = %d\n",
- + _errno);
- + }
- +
- + return _errno;
- +}
- +
- +static void __cold dpa_get_pauseparam(struct net_device *net_dev,
- + struct ethtool_pauseparam *epause)
- +{
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- + struct phy_device *phy_dev;
- +
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + if (mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return;
- + }
- +
- + phy_dev = mac_dev->phy_dev;
- + if (unlikely(phy_dev == NULL)) {
- + netdev_err(net_dev, "phy device not initialized\n");
- + return;
- + }
- +
- + epause->autoneg = mac_dev->autoneg_pause;
- + epause->rx_pause = mac_dev->rx_pause_active;
- + epause->tx_pause = mac_dev->tx_pause_active;
- +}
- +
- +static int __cold dpa_set_pauseparam(struct net_device *net_dev,
- + struct ethtool_pauseparam *epause)
- +{
- + struct dpa_priv_s *priv;
- + struct mac_device *mac_dev;
- + struct phy_device *phy_dev;
- + int _errno;
- + u32 newadv, oldadv;
- + bool rx_pause, tx_pause;
- +
- + priv = netdev_priv(net_dev);
- + mac_dev = priv->mac_dev;
- +
- + if (mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return -ENODEV;
- + }
- +
- + phy_dev = mac_dev->phy_dev;
- + if (unlikely(phy_dev == NULL)) {
- + netdev_err(net_dev, "phy device not initialized\n");
- + return -ENODEV;
- + }
- +
- + if (!(phy_dev->supported & SUPPORTED_Pause) ||
- + (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
- + (epause->rx_pause != epause->tx_pause)))
- + return -EINVAL;
- +
- + /* The MAC should know how to handle PAUSE frame autonegotiation before
- + * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
- + * settings.
- + */
- + mac_dev->autoneg_pause = !!epause->autoneg;
- + mac_dev->rx_pause_req = !!epause->rx_pause;
- + mac_dev->tx_pause_req = !!epause->tx_pause;
- +
- + /* Determine the sym/asym advertised PAUSE capabilities from the desired
- + * rx/tx pause settings.
- + */
- + newadv = 0;
- + if (epause->rx_pause)
- + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- + if (epause->tx_pause)
- + newadv |= ADVERTISED_Asym_Pause;
- +
- + oldadv = phy_dev->advertising &
- + (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
- +
- + /* If there are differences between the old and the new advertised
- + * values, restart PHY autonegotiation and advertise the new values.
- + */
- + if (oldadv != newadv) {
- + phy_dev->advertising &= ~(ADVERTISED_Pause
- + | ADVERTISED_Asym_Pause);
- + phy_dev->advertising |= newadv;
- + if (phy_dev->autoneg) {
- + _errno = phy_start_aneg(phy_dev);
- + if (unlikely(_errno < 0))
- + netdev_err(net_dev, "phy_start_aneg() = %d\n",
- + _errno);
- + }
- + }
- +
- + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- + if (unlikely(_errno < 0))
- + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
- +
- + return _errno;
- +}
- +
- +#ifdef CONFIG_PM
- +static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- +
- + wol->supported = 0;
- + wol->wolopts = 0;
- +
- + if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
- + return;
- +
- + if (priv->wol & DPAA_WOL_MAGIC) {
- + wol->supported = WAKE_MAGIC;
- + wol->wolopts = WAKE_MAGIC;
- + }
- +}
- +
- +static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- +
- + if (priv->mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return -ENODEV;
- + }
- +
- + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
- + netdev_dbg(net_dev, "phy device not initialized\n");
- + return -ENODEV;
- + }
- +
- + if (!device_can_wakeup(net_dev->dev.parent) ||
- + (wol->wolopts & ~WAKE_MAGIC))
- + return -EOPNOTSUPP;
- +
- + priv->wol = 0;
- +
- + if (wol->wolopts & WAKE_MAGIC) {
- + priv->wol = DPAA_WOL_MAGIC;
- + device_set_wakeup_enable(net_dev->dev.parent, 1);
- + } else {
- + device_set_wakeup_enable(net_dev->dev.parent, 0);
- + }
- +
- + return 0;
- +}
- +#endif
- +
- +static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
- +{
- + struct dpa_priv_s *priv;
- +
- + priv = netdev_priv(net_dev);
- + if (priv->mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return -ENODEV;
- + }
- +
- + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
- + netdev_err(net_dev, "phy device not initialized\n");
- + return -ENODEV;
- + }
- +
- + return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
- +}
- +
- +static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
- +{
- + struct dpa_priv_s *priv;
- +
- + priv = netdev_priv(net_dev);
- + if (priv->mac_dev == NULL) {
- + netdev_info(net_dev, "This is a MAC-less interface\n");
- + return -ENODEV;
- + }
- +
- + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
- + netdev_err(net_dev, "phy device not initialized\n");
- + return -ENODEV;
- + }
- +
- + return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
- +}
- +
- +static int dpa_get_sset_count(struct net_device *net_dev, int type)
- +{
- + unsigned int total_stats, num_stats;
- +
- + num_stats = num_online_cpus() + 1;
- + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
- +
- + switch (type) {
- + case ETH_SS_STATS:
- + return total_stats;
- + default:
- + return -EOPNOTSUPP;
- + }
- +}
- +
- +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
- + int crr_cpu, u64 bp_count, u64 *data)
- +{
- + int num_stat_values = num_cpus + 1;
- + int crr_stat = 0;
- +
- + /* update current CPU's stats and also add them to the total values */
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
- + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
- +
- + data[crr_stat * num_stat_values + crr_cpu] = bp_count;
- + data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
- +}
- +
- +static void dpa_get_ethtool_stats(struct net_device *net_dev,
- + struct ethtool_stats *stats, u64 *data)
- +{
- + u64 bp_count, cg_time, cg_num, cg_status;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct qm_mcr_querycgr query_cgr;
- + struct dpa_rx_errors rx_errors;
- + struct dpa_ern_cnt ern_cnt;
- + struct dpa_priv_s *priv;
- + unsigned int num_cpus, offset;
- + struct dpa_bp *dpa_bp;
- + int total_stats, i;
- +
- + total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
- + priv = netdev_priv(net_dev);
- + dpa_bp = priv->dpa_bp;
- + num_cpus = num_online_cpus();
- + bp_count = 0;
- +
- + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
- + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
- + memset(data, 0, total_stats * sizeof(u64));
- +
- + for_each_online_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + if (dpa_bp->percpu_count)
- + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
- +
- + rx_errors.dme += percpu_priv->rx_errors.dme;
- + rx_errors.fpe += percpu_priv->rx_errors.fpe;
- + rx_errors.fse += percpu_priv->rx_errors.fse;
- + rx_errors.phe += percpu_priv->rx_errors.phe;
- + rx_errors.cse += percpu_priv->rx_errors.cse;
- +
- + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
- + ern_cnt.wred += percpu_priv->ern_cnt.wred;
- + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
- + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
- + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
- + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
- + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
- + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
- +
- + copy_stats(percpu_priv, num_cpus, i, bp_count, data);
- + }
- +
- + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
- + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
- +
- + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
- + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
- +
- + /* gather congestion related counters */
- + cg_num = 0;
- + cg_status = 0;
- + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
- + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
- + cg_num = priv->cgr_data.cgr_congested_count;
- + cg_status = query_cgr.cgr.cs;
- +
- + /* reset congestion stats (like QMan API does */
- + priv->cgr_data.congested_jiffies = 0;
- + priv->cgr_data.cgr_congested_count = 0;
- + }
- +
- + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
- + data[offset++] = cg_time;
- + data[offset++] = cg_num;
- + data[offset++] = cg_status;
- +}
- +
- +static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
- +{
- + unsigned int i, j, num_cpus, size;
- + char stat_string_cpu[ETH_GSTRING_LEN];
- + u8 *strings;
- +
- + strings = data;
- + num_cpus = num_online_cpus();
- + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
- +
- + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
- + for (j = 0; j < num_cpus; j++) {
- + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
- + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
- + strings += ETH_GSTRING_LEN;
- + }
- + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
- + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
- + strings += ETH_GSTRING_LEN;
- + }
- + memcpy(strings, dpa_stats_global, size);
- +}
- +
- +const struct ethtool_ops dpa_ethtool_ops = {
- + .get_settings = dpa_get_settings,
- + .set_settings = dpa_set_settings,
- + .get_drvinfo = dpa_get_drvinfo,
- + .get_msglevel = dpa_get_msglevel,
- + .set_msglevel = dpa_set_msglevel,
- + .nway_reset = dpa_nway_reset,
- + .get_pauseparam = dpa_get_pauseparam,
- + .set_pauseparam = dpa_set_pauseparam,
- + .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
- + .get_link = ethtool_op_get_link,
- + .get_eee = dpa_get_eee,
- + .set_eee = dpa_set_eee,
- + .get_sset_count = dpa_get_sset_count,
- + .get_ethtool_stats = dpa_get_ethtool_stats,
- + .get_strings = dpa_get_strings,
- +#ifdef CONFIG_PM
- + .get_wol = dpa_get_wol,
- + .set_wol = dpa_set_wol,
- +#endif
- +};
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
- @@ -0,0 +1,286 @@
- +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/string.h>
- +
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +#include "dpaa_eth_generic.h"
- +
- +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
- + "interrupts",
- + "rx packets",
- + "tx packets",
- + "tx recycled",
- + "tx confirm",
- + "tx S/G",
- + "rx S/G (N/A)",
- + "tx error",
- + "rx error",
- + "bp count",
- + "bp draining count"
- +};
- +
- +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
- + /* dpa rx errors */
- + "rx dma error",
- + "rx frame physical error",
- + "rx frame size error",
- + "rx header error",
- + "rx csum error",
- +
- + /* demultiplexing errors */
- + "qman cg_tdrop",
- + "qman wred",
- + "qman error cond",
- + "qman early window",
- + "qman late window",
- + "qman fq tdrop",
- + "qman fq retired",
- + "qman orp disabled",
- +};
- +
- +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
- +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
- +
- +static int __cold dpa_generic_get_settings(struct net_device *net_dev,
- + struct ethtool_cmd *et_cmd)
- +{
- + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
- + return -ENODEV;
- +}
- +
- +static int __cold dpa_generic_set_settings(struct net_device *net_dev,
- + struct ethtool_cmd *et_cmd)
- +{
- + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
- + return -ENODEV;
- +}
- +
- +static void __cold dpa_generic_get_drvinfo(struct net_device *net_dev,
- + struct ethtool_drvinfo *drvinfo)
- +{
- + int _errno;
- +
- + strncpy(drvinfo->driver, KBUILD_MODNAME,
- + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
- + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- + "%X", 0);
- +
- + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
- + /* Truncated output */
- + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
- + } else if (unlikely(_errno < 0)) {
- + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
- + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
- + }
- + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
- + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
- +}
- +
- +static uint32_t __cold dpa_generic_get_msglevel(struct net_device *net_dev)
- +{
- + return ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable;
- +}
- +
- +static void __cold dpa_generic_set_msglevel(struct net_device *net_dev,
- + uint32_t msg_enable)
- +{
- + ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable =
- + msg_enable;
- +}
- +
- +static int __cold dpa_generic_nway_reset(struct net_device *net_dev)
- +{
- + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
- + return -ENODEV;
- +}
- +
- +static int dpa_generic_get_sset_count(struct net_device *net_dev, int type)
- +{
- + unsigned int total_stats, num_stats;
- +
- + num_stats = num_online_cpus() + 1;
- + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
- +
- + switch (type) {
- + case ETH_SS_STATS:
- + return total_stats;
- + default:
- + return -EOPNOTSUPP;
- + }
- +}
- +
- +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv,
- + int num_cpus, int crr_cpu, u64 bp_count,
- + u64 bp_drain_count, u64 *data)
- +{
- + int num_values = num_cpus + 1;
- + int crr = 0;
- +
- + /* update current CPU's stats and also add them to the total values */
- + data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
- + data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
- + data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
- + data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
- + data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
- +
- + data[crr * num_values + crr_cpu] = bp_count;
- + data[crr++ * num_values + num_cpus] += bp_count;
- +
- + data[crr * num_values + crr_cpu] = bp_drain_count;
- + data[crr++ * num_values + num_cpus] += bp_drain_count;
- +}
- +
- +static void dpa_generic_get_ethtool_stats(struct net_device *net_dev,
- + struct ethtool_stats *stats,
- + u64 *data)
- +{
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct dpa_bp *dpa_bp, *drain_bp;
- + struct dpa_generic_priv_s *priv;
- + struct dpa_rx_errors rx_errors;
- + struct dpa_ern_cnt ern_cnt;
- + unsigned int num_cpus, offset;
- + u64 bp_cnt, drain_cnt;
- + int total_stats, i;
- +
- + total_stats = dpa_generic_get_sset_count(net_dev, ETH_SS_STATS);
- + priv = netdev_priv(net_dev);
- + drain_bp = priv->draining_tx_bp;
- + dpa_bp = priv->rx_bp;
- + num_cpus = num_online_cpus();
- + drain_cnt = 0;
- + bp_cnt = 0;
- +
- + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
- + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
- + memset(data, 0, total_stats * sizeof(u64));
- +
- + for_each_online_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- +
- + if (dpa_bp->percpu_count)
- + bp_cnt = *(per_cpu_ptr(dpa_bp->percpu_count, i));
- +
- + if (drain_bp->percpu_count)
- + drain_cnt = *(per_cpu_ptr(drain_bp->percpu_count, i));
- +
- + rx_errors.dme += percpu_priv->rx_errors.dme;
- + rx_errors.fpe += percpu_priv->rx_errors.fpe;
- + rx_errors.fse += percpu_priv->rx_errors.fse;
- + rx_errors.phe += percpu_priv->rx_errors.phe;
- + rx_errors.cse += percpu_priv->rx_errors.cse;
- +
- + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
- + ern_cnt.wred += percpu_priv->ern_cnt.wred;
- + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
- + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
- + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
- + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
- + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
- + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
- +
- + copy_stats(percpu_priv, num_cpus, i, bp_cnt, drain_cnt, data);
- + }
- +
- + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
- + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
- +
- + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
- + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
- +}
- +
- +static void dpa_generic_get_strings(struct net_device *net_dev,
- + u32 stringset, u8 *data)
- +{
- + unsigned int i, j, num_cpus, size;
- + char string_cpu[ETH_GSTRING_LEN];
- + u8 *strings;
- +
- + strings = data;
- + num_cpus = num_online_cpus();
- + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
- +
- + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
- + for (j = 0; j < num_cpus; j++) {
- + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
- + dpa_stats_percpu[i], j);
- + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- + strings += ETH_GSTRING_LEN;
- + }
- + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
- + dpa_stats_percpu[i]);
- + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- + strings += ETH_GSTRING_LEN;
- + }
- + memcpy(strings, dpa_stats_global, size);
- +}
- +
- +const struct ethtool_ops dpa_generic_ethtool_ops = {
- + .get_settings = dpa_generic_get_settings,
- + .set_settings = dpa_generic_set_settings,
- + .get_drvinfo = dpa_generic_get_drvinfo,
- + .get_msglevel = dpa_generic_get_msglevel,
- + .set_msglevel = dpa_generic_set_msglevel,
- + .nway_reset = dpa_generic_nway_reset,
- + .get_link = ethtool_op_get_link,
- + .get_sset_count = dpa_generic_get_sset_count,
- + .get_ethtool_stats = dpa_generic_get_ethtool_stats,
- + .get_strings = dpa_generic_get_strings,
- +};
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
- @@ -0,0 +1,250 @@
- +/* Copyright 2015 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#include <linux/string.h>
- +
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_macsec.h"
- +
- +static const char dpa_macsec_stats_percpu[][ETH_GSTRING_LEN] = {
- + "interrupts",
- + "rx packets",
- + "tx packets",
- + "tx recycled",
- + "tx confirm",
- + "tx S/G",
- + "rx S/G",
- + "tx error",
- + "rx error",
- + "bp count",
- + "tx macsec",
- + "rx macsec"
- +};
- +
- +static char dpa_macsec_stats_global[][ETH_GSTRING_LEN] = {
- + /* dpa rx errors */
- + "rx dma error",
- + "rx frame physical error",
- + "rx frame size error",
- + "rx header error",
- + "rx csum error",
- +
- + /* demultiplexing errors */
- + "qman cg_tdrop",
- + "qman wred",
- + "qman error cond",
- + "qman early window",
- + "qman late window",
- + "qman fq tdrop",
- + "qman fq retired",
- + "qman orp disabled",
- +
- + /* congestion related stats */
- + "congestion time (ms)",
- + "entered congestion",
- + "congested (0/1)"
- +};
- +
- +#define DPA_MACSEC_STATS_PERCPU_LEN ARRAY_SIZE(dpa_macsec_stats_percpu)
- +#define DPA_MACSEC_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_macsec_stats_global)
- +
- +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
- + int crr_cpu, u64 bp_count, u64 tx_macsec,
- + u64 rx_macsec, u64 *data)
- +{
- + int num_values = num_cpus + 1;
- + int crr = 0;
- +
- + /* update current CPU's stats and also add them to the total values */
- + data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
- + data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
- + data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
- + data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
- + data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->rx_sg;
- + data[crr++ * num_values + num_cpus] += percpu_priv->rx_sg;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
- +
- + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
- + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
- +
- + data[crr * num_values + crr_cpu] = bp_count;
- + data[crr++ * num_values + num_cpus] += bp_count;
- +
- + data[crr * num_values + crr_cpu] = tx_macsec;
- + data[crr++ * num_values + num_cpus] += tx_macsec;
- +
- + data[crr * num_values + crr_cpu] = rx_macsec;
- + data[crr++ * num_values + num_cpus] += rx_macsec;
- +}
- +
- +int dpa_macsec_get_sset_count(struct net_device *net_dev, int type)
- +{
- + unsigned int total_stats, num_stats;
- +
- + num_stats = num_online_cpus() + 1;
- + total_stats = num_stats * DPA_MACSEC_STATS_PERCPU_LEN +
- + DPA_MACSEC_STATS_GLOBAL_LEN;
- +
- + switch (type) {
- + case ETH_SS_STATS:
- + return total_stats;
- + default:
- + return -EOPNOTSUPP;
- + }
- +}
- +
- +void dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
- + struct ethtool_stats *stats, u64 *data)
- +{
- + u64 bp_count, bp_total, cg_time, cg_num, cg_status;
- + struct macsec_percpu_priv_s *percpu_priv_macsec;
- + struct dpa_percpu_priv_s *percpu_priv;
- + struct macsec_priv_s *macsec_priv;
- + struct qm_mcr_querycgr query_cgr;
- + struct dpa_rx_errors rx_errors;
- + struct dpa_ern_cnt ern_cnt;
- + struct dpa_priv_s *priv;
- + unsigned int num_cpus, offset;
- + struct dpa_bp *dpa_bp;
- + int total_stats, i;
- +
- + macsec_priv = dpa_macsec_get_priv(net_dev);
- + if (unlikely(!macsec_priv)) {
- + pr_err("selected macsec_priv is NULL\n");
- + return;
- + }
- +
- + total_stats = dpa_macsec_get_sset_count(net_dev, ETH_SS_STATS);
- + priv = netdev_priv(net_dev);
- + dpa_bp = priv->dpa_bp;
- + num_cpus = num_online_cpus();
- + bp_count = 0;
- + bp_total = 0;
- +
- + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
- + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
- + memset(data, 0, total_stats * sizeof(u64));
- +
- + for_each_online_cpu(i) {
- + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- + percpu_priv_macsec = per_cpu_ptr(macsec_priv->percpu_priv, i);
- +
- + if (dpa_bp->percpu_count)
- + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
- +
- + rx_errors.dme += percpu_priv->rx_errors.dme;
- + rx_errors.fpe += percpu_priv->rx_errors.fpe;
- + rx_errors.fse += percpu_priv->rx_errors.fse;
- + rx_errors.phe += percpu_priv->rx_errors.phe;
- + rx_errors.cse += percpu_priv->rx_errors.cse;
- +
- + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
- + ern_cnt.wred += percpu_priv->ern_cnt.wred;
- + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
- + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
- + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
- + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
- + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
- + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
- +
- + copy_stats(percpu_priv, num_cpus, i, bp_count,
- + percpu_priv_macsec->tx_macsec,
- + percpu_priv_macsec->rx_macsec,
- + data);
- + }
- +
- + offset = (num_cpus + 1) * DPA_MACSEC_STATS_PERCPU_LEN;
- + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
- +
- + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
- + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
- +
- + /* gather congestion related counters */
- + cg_num = 0;
- + cg_status = 0;
- + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
- + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
- + cg_num = priv->cgr_data.cgr_congested_count;
- + cg_status = query_cgr.cgr.cs;
- +
- + /* reset congestion stats (like QMan API does */
- + priv->cgr_data.congested_jiffies = 0;
- + priv->cgr_data.cgr_congested_count = 0;
- + }
- +
- + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
- + data[offset++] = cg_time;
- + data[offset++] = cg_num;
- + data[offset++] = cg_status;
- +}
- +
- +void dpa_macsec_get_strings(struct net_device *net_dev,
- + u32 stringset, u8 *data)
- +{
- + unsigned int i, j, num_cpus, size;
- + char string_cpu[ETH_GSTRING_LEN];
- + u8 *strings;
- +
- + strings = data;
- + num_cpus = num_online_cpus();
- + size = DPA_MACSEC_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
- +
- + for (i = 0; i < DPA_MACSEC_STATS_PERCPU_LEN; i++) {
- + for (j = 0; j < num_cpus; j++) {
- + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
- + dpa_macsec_stats_percpu[i], j);
- + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- + strings += ETH_GSTRING_LEN;
- + }
- + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
- + dpa_macsec_stats_percpu[i]);
- + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- + strings += ETH_GSTRING_LEN;
- + }
- + memcpy(strings, dpa_macsec_stats_global, size);
- +}
- +
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
- @@ -0,0 +1,287 @@
- +/*
- + * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
- + *
- + * Author: Yangbo Lu <yangbo.lu@freescale.com>
- + *
- + * Copyright 2014 Freescale Semiconductor, Inc.
- + *
- + * This program is free software; you can redistribute it and/or modify it
- + * under the terms of the GNU General Public License as published by the
- + * Free Software Foundation; either version 2 of the License, or (at your
- + * option) any later version.
- +*/
- +
- +#include <linux/device.h>
- +#include <linux/hrtimer.h>
- +#include <linux/init.h>
- +#include <linux/interrupt.h>
- +#include <linux/kernel.h>
- +#include <linux/module.h>
- +#include <linux/of.h>
- +#include <linux/of_platform.h>
- +#include <linux/timex.h>
- +#include <linux/io.h>
- +
- +#include <linux/ptp_clock_kernel.h>
- +
- +#include "dpaa_eth.h"
- +#include "mac.h"
- +
- +struct ptp_clock *clock;
- +
- +static struct mac_device *mac_dev;
- +static u32 freqCompensation;
- +
- +/* Bit definitions for the TMR_CTRL register */
- +#define ALM1P (1<<31) /* Alarm1 output polarity */
- +#define ALM2P (1<<30) /* Alarm2 output polarity */
- +#define FS (1<<28) /* FIPER start indication */
- +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
- +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
- +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
- +#define TCLK_PERIOD_MASK (0x3ff)
- +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
- +#define FRD (1<<14) /* FIPER Realignment Disable */
- +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
- +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
- +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
- +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
- +#define COPH (1<<7) /* Generated clock output phase. */
- +#define CIPH (1<<6) /* External oscillator input clock phase */
- +#define TMSR (1<<5) /* Timer soft reset. */
- +#define BYP (1<<3) /* Bypass drift compensated clock */
- +#define TE (1<<2) /* 1588 timer enable. */
- +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
- +#define CKSEL_MASK (0x3)
- +
- +/* Bit definitions for the TMR_TEVENT register */
- +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
- +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
- +#define ALM2 (1<<17) /* Current time = alarm time register 2 */
- +#define ALM1 (1<<16) /* Current time = alarm time register 1 */
- +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
- +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
- +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
- +
- +/* Bit definitions for the TMR_TEMASK register */
- +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
- +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
- +#define ALM2EN (1<<17) /* Timer ALM2 event enable */
- +#define ALM1EN (1<<16) /* Timer ALM1 event enable */
- +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
- +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
- +
- +/* Bit definitions for the TMR_PEVENT register */
- +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
- +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
- +#define RXP (1<<0) /* PTP frame has been received */
- +
- +/* Bit definitions for the TMR_PEMASK register */
- +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
- +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
- +#define RXPEN (1<<0) /* Receive PTP packet event enable */
- +
- +/* Bit definitions for the TMR_STAT register */
- +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
- +#define STAT_VEC_MASK (0x3f)
- +
- +/* Bit definitions for the TMR_PRSC register */
- +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
- +#define PRSC_OCK_MASK (0xffff)
- +
- +
- +#define N_EXT_TS 2
- +
- +static void set_alarm(void)
- +{
- + u64 ns;
- +
- + if (mac_dev->fm_rtc_get_cnt)
- + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
- + ns += 1500000000ULL;
- + ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
- + ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
- + if (mac_dev->fm_rtc_set_alarm)
- + mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
- +}
- +
- +static void set_fipers(void)
- +{
- + u64 fiper;
- +
- + if (mac_dev->fm_rtc_disable)
- + mac_dev->fm_rtc_disable(mac_dev->fm_dev);
- +
- + set_alarm();
- + fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
- + if (mac_dev->fm_rtc_set_fiper)
- + mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
- +
- + if (mac_dev->fm_rtc_enable)
- + mac_dev->fm_rtc_enable(mac_dev->fm_dev);
- +}
- +
- +/* PTP clock operations */
- +
- +static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
- +{
- + u64 adj;
- + u32 diff, tmr_add;
- + int neg_adj = 0;
- +
- + if (ppb < 0) {
- + neg_adj = 1;
- + ppb = -ppb;
- + }
- +
- + tmr_add = freqCompensation;
- + adj = tmr_add;
- + adj *= ppb;
- + diff = div_u64(adj, 1000000000ULL);
- +
- + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
- +
- + if (mac_dev->fm_rtc_set_drift)
- + mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
- +
- + return 0;
- +}
- +
- +static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
- +{
- + s64 now;
- +
- + if (mac_dev->fm_rtc_get_cnt)
- + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
- +
- + now += delta;
- +
- + if (mac_dev->fm_rtc_set_cnt)
- + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
- + set_fipers();
- +
- + return 0;
- +}
- +
- +static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
- +{
- + u64 ns;
- + u32 remainder;
- +
- + if (mac_dev->fm_rtc_get_cnt)
- + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
- +
- + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- + ts->tv_nsec = remainder;
- + return 0;
- +}
- +
- +static int ptp_dpa_settime(struct ptp_clock_info *ptp,
- + const struct timespec *ts)
- +{
- + u64 ns;
- +
- + ns = ts->tv_sec * 1000000000ULL;
- + ns += ts->tv_nsec;
- +
- + if (mac_dev->fm_rtc_set_cnt)
- + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
- + set_fipers();
- + return 0;
- +}
- +
- +static int ptp_dpa_enable(struct ptp_clock_info *ptp,
- + struct ptp_clock_request *rq, int on)
- +{
- + u32 bit;
- +
- + switch (rq->type) {
- + case PTP_CLK_REQ_EXTTS:
- + switch (rq->extts.index) {
- + case 0:
- + bit = ETS1EN;
- + break;
- + case 1:
- + bit = ETS2EN;
- + break;
- + default:
- + return -EINVAL;
- + }
- + if (on) {
- + if (mac_dev->fm_rtc_enable_interrupt)
- + mac_dev->fm_rtc_enable_interrupt(
- + mac_dev->fm_dev, bit);
- + } else {
- + if (mac_dev->fm_rtc_disable_interrupt)
- + mac_dev->fm_rtc_disable_interrupt(
- + mac_dev->fm_dev, bit);
- + }
- + return 0;
- +
- + case PTP_CLK_REQ_PPS:
- + if (on) {
- + if (mac_dev->fm_rtc_enable_interrupt)
- + mac_dev->fm_rtc_enable_interrupt(
- + mac_dev->fm_dev, PP1EN);
- + } else {
- + if (mac_dev->fm_rtc_disable_interrupt)
- + mac_dev->fm_rtc_disable_interrupt(
- + mac_dev->fm_dev, PP1EN);
- + }
- + return 0;
- +
- + default:
- + break;
- + }
- +
- + return -EOPNOTSUPP;
- +}
- +
- +static struct ptp_clock_info ptp_dpa_caps = {
- + .owner = THIS_MODULE,
- + .name = "dpaa clock",
- + .max_adj = 512000,
- + .n_alarm = 0,
- + .n_ext_ts = N_EXT_TS,
- + .n_per_out = 0,
- + .pps = 1,
- + .adjfreq = ptp_dpa_adjfreq,
- + .adjtime = ptp_dpa_adjtime,
- + .gettime = ptp_dpa_gettime,
- + .settime = ptp_dpa_settime,
- + .enable = ptp_dpa_enable,
- +};
- +
- +static int __init __cold dpa_ptp_load(void)
- +{
- + struct device *ptp_dev;
- + struct timespec now;
- + int dpa_phc_index;
- + int err;
- +
- + ptp_dev = &ptp_priv.of_dev->dev;
- + mac_dev = ptp_priv.mac_dev;
- +
- + if (mac_dev->fm_rtc_get_drift)
- + mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
- +
- + getnstimeofday(&now);
- + ptp_dpa_settime(&ptp_dpa_caps, &now);
- +
- + clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
- + if (IS_ERR(clock)) {
- + err = PTR_ERR(clock);
- + return err;
- + }
- + dpa_phc_index = ptp_clock_index(clock);
- + return 0;
- +}
- +module_init(dpa_ptp_load);
- +
- +static void __exit __cold dpa_ptp_unload(void)
- +{
- + if (mac_dev->fm_rtc_disable_interrupt)
- + mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
- + ptp_clock_unregister(clock);
- +}
- +module_exit(dpa_ptp_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
- @@ -0,0 +1,915 @@
- +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/io.h>
- +#include <linux/of_platform.h>
- +#include <linux/of_mdio.h>
- +#include <linux/phy.h>
- +#include <linux/netdevice.h>
- +
- +#include "dpaa_eth.h"
- +#include "mac.h"
- +#include "lnxwrp_fsl_fman.h"
- +
- +#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
- +
- +#include "fsl_fman_dtsec.h"
- +#include "fsl_fman_tgec.h"
- +#include "fsl_fman_memac.h"
- +#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
- +
- +#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +
- +MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
- +
- +MODULE_DESCRIPTION(MAC_DESCRIPTION);
- +
- +struct mac_priv_s {
- + struct fm_mac_dev *fm_mac;
- +};
- +
- +const char *mac_driver_description __initconst = MAC_DESCRIPTION;
- +const size_t mac_sizeof_priv[] = {
- + [DTSEC] = sizeof(struct mac_priv_s),
- + [XGMAC] = sizeof(struct mac_priv_s),
- + [MEMAC] = sizeof(struct mac_priv_s)
- +};
- +
- +static const enet_mode_t _100[] = {
- + [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
- + [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
- +};
- +
- +static const enet_mode_t _1000[] = {
- + [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
- + [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
- + [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
- + [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
- + [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
- + [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
- + [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
- + [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
- + [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
- +};
- +
- +static enet_mode_t __cold __attribute__((nonnull))
- +macdev2enetinterface(const struct mac_device *mac_dev)
- +{
- + switch (mac_dev->max_speed) {
- + case SPEED_100:
- + return _100[mac_dev->phy_if];
- + case SPEED_1000:
- + return _1000[mac_dev->phy_if];
- + case SPEED_2500:
- + return e_ENET_MODE_SGMII_2500;
- + case SPEED_10000:
- + return e_ENET_MODE_XGMII_10000;
- + default:
- + return e_ENET_MODE_MII_100;
- + }
- +}
- +
- +static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
- +{
- + struct mac_device *mac_dev;
- +
- + mac_dev = (struct mac_device *)_mac_dev;
- +
- + if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
- + /* don't flag RX FIFO after the first */
- + fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
- + e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
- + dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
- + exception);
- + }
- +
- + dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
- + exception);
- +}
- +
- +static int __cold init(struct mac_device *mac_dev)
- +{
- + int _errno;
- + struct mac_priv_s *priv;
- + t_FmMacParams param;
- + uint32_t version;
- +
- + priv = macdev_priv(mac_dev);
- +
- + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
- + mac_dev->dev, mac_dev->res->start, 0x2000);
- + param.enetMode = macdev2enetinterface(mac_dev);
- + memcpy(¶m.addr, mac_dev->addr, min(sizeof(param.addr),
- + sizeof(mac_dev->addr)));
- + param.macId = mac_dev->cell_index;
- + param.h_Fm = (handle_t)mac_dev->fm;
- + param.mdioIrq = NO_IRQ;
- + param.f_Exception = mac_exception;
- + param.f_Event = mac_exception;
- + param.h_App = mac_dev;
- +
- + priv->fm_mac = fm_mac_config(¶m);
- + if (unlikely(priv->fm_mac == NULL)) {
- + _errno = -EINVAL;
- + goto _return;
- + }
- +
- + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
- + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
- + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
- +
- + _errno = fm_mac_config_max_frame_length(priv->fm_mac,
- + fm_get_max_frm());
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- +
- + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
- + /* 10G always works with pad and CRC */
- + _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- +
- + _errno = fm_mac_config_half_duplex(priv->fm_mac,
- + mac_dev->half_duplex);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- + } else {
- + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- + }
- +
- + _errno = fm_mac_init(priv->fm_mac);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- +
- +#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
- + /* For 1G MAC, disable by default the MIB counters overflow interrupt */
- + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
- + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
- + e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- + }
- +#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
- +
- + /* For 10G MAC, disable Tx ECC exception */
- + if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
- + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
- + e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- + }
- +
- + _errno = fm_mac_get_version(priv->fm_mac, &version);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- +
- + dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
- + ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
- + "dTSEC" : "XGEC"), version);
- +
- + goto _return;
- +
- +
- +_return_fm_mac_free:
- + fm_mac_free(mac_dev->get_mac_handle(mac_dev));
- +
- +_return:
- + return _errno;
- +}
- +
- +static int __cold memac_init(struct mac_device *mac_dev)
- +{
- + int _errno;
- + struct mac_priv_s *priv;
- + t_FmMacParams param;
- +
- + priv = macdev_priv(mac_dev);
- +
- + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
- + mac_dev->dev, mac_dev->res->start, 0x2000);
- + param.enetMode = macdev2enetinterface(mac_dev);
- + memcpy(¶m.addr, mac_dev->addr, sizeof(mac_dev->addr));
- + param.macId = mac_dev->cell_index;
- + param.h_Fm = (handle_t)mac_dev->fm;
- + param.mdioIrq = NO_IRQ;
- + param.f_Exception = mac_exception;
- + param.f_Event = mac_exception;
- + param.h_App = mac_dev;
- +
- + priv->fm_mac = fm_mac_config(¶m);
- + if (unlikely(priv->fm_mac == NULL)) {
- + _errno = -EINVAL;
- + goto _return;
- + }
- +
- + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
- + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
- + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
- +
- + _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- +
- + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- +
- + _errno = fm_mac_init(priv->fm_mac);
- + if (unlikely(_errno < 0))
- + goto _return_fm_mac_free;
- +
- + dev_info(mac_dev->dev, "FMan MEMAC\n");
- +
- + goto _return;
- +
- +_return_fm_mac_free:
- + fm_mac_free(priv->fm_mac);
- +
- +_return:
- + return _errno;
- +}
- +
- +static int __cold start(struct mac_device *mac_dev)
- +{
- + int _errno;
- + struct phy_device *phy_dev = mac_dev->phy_dev;
- +
- + _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
- +
- + if (!_errno && phy_dev) {
- + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)
- + phy_start(phy_dev);
- + else if (phy_dev->drv->read_status)
- + phy_dev->drv->read_status(phy_dev);
- + }
- +
- + return _errno;
- +}
- +
- +static int __cold stop(struct mac_device *mac_dev)
- +{
- + if (mac_dev->phy_dev &&
- + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000))
- + phy_stop(mac_dev->phy_dev);
- +
- + return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
- +}
- +
- +static int __cold set_multi(struct net_device *net_dev,
- + struct mac_device *mac_dev)
- +{
- + struct mac_priv_s *mac_priv;
- + struct mac_address *old_addr, *tmp;
- + struct netdev_hw_addr *ha;
- + int _errno;
- +
- + mac_priv = macdev_priv(mac_dev);
- +
- + /* Clear previous address list */
- + list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
- + _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
- + (t_EnetAddr *)old_addr->addr);
- + if (_errno < 0)
- + return _errno;
- +
- + list_del(&old_addr->list);
- + kfree(old_addr);
- + }
- +
- + /* Add all the addresses from the new list */
- + netdev_for_each_mc_addr(ha, net_dev) {
- + _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
- + (t_EnetAddr *)ha->addr);
- + if (_errno < 0)
- + return _errno;
- +
- + tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
- + if (!tmp) {
- + dev_err(mac_dev->dev, "Out of memory\n");
- + return -ENOMEM;
- + }
- + memcpy(tmp->addr, ha->addr, ETH_ALEN);
- + list_add(&tmp->list, &mac_dev->mc_addr_list);
- + }
- + return 0;
- +}
- +
- +/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
- + * active PAUSE settings. Otherwise, the new active settings should be reflected
- + * in FMan.
- + */
- +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
- +{
- + struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
- + int _errno = 0;
- +
- + if (unlikely(rx != mac_dev->rx_pause_active)) {
- + _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
- + if (likely(_errno == 0))
- + mac_dev->rx_pause_active = rx;
- + }
- +
- + if (unlikely(tx != mac_dev->tx_pause_active)) {
- + _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
- + if (likely(_errno == 0))
- + mac_dev->tx_pause_active = tx;
- + }
- +
- + return _errno;
- +}
- +EXPORT_SYMBOL(set_mac_active_pause);
- +
- +/* Determine the MAC RX/TX PAUSE frames settings based on PHY
- + * autonegotiation or values set by eththool.
- + */
- +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
- +{
- + struct phy_device *phy_dev = mac_dev->phy_dev;
- + u16 lcl_adv, rmt_adv;
- + u8 flowctrl;
- +
- + *rx_pause = *tx_pause = false;
- +
- + if (!phy_dev->duplex)
- + return;
- +
- + /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
- + * are those set by ethtool.
- + */
- + if (!mac_dev->autoneg_pause) {
- + *rx_pause = mac_dev->rx_pause_req;
- + *tx_pause = mac_dev->tx_pause_req;
- + return;
- + }
- +
- + /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
- + * settings depend on the result of the link negotiation.
- + */
- +
- + /* get local capabilities */
- + lcl_adv = 0;
- + if (phy_dev->advertising & ADVERTISED_Pause)
- + lcl_adv |= ADVERTISE_PAUSE_CAP;
- + if (phy_dev->advertising & ADVERTISED_Asym_Pause)
- + lcl_adv |= ADVERTISE_PAUSE_ASYM;
- +
- + /* get link partner capabilities */
- + rmt_adv = 0;
- + if (phy_dev->pause)
- + rmt_adv |= LPA_PAUSE_CAP;
- + if (phy_dev->asym_pause)
- + rmt_adv |= LPA_PAUSE_ASYM;
- +
- + /* Calculate TX/RX settings based on local and peer advertised
- + * symmetric/asymmetric PAUSE capabilities.
- + */
- + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
- + if (flowctrl & FLOW_CTRL_RX)
- + *rx_pause = true;
- + if (flowctrl & FLOW_CTRL_TX)
- + *tx_pause = true;
- +}
- +EXPORT_SYMBOL(get_pause_cfg);
- +
- +static void adjust_link(struct net_device *net_dev)
- +{
- + struct dpa_priv_s *priv = netdev_priv(net_dev);
- + struct mac_device *mac_dev = priv->mac_dev;
- + struct phy_device *phy_dev = mac_dev->phy_dev;
- + struct fm_mac_dev *fm_mac_dev;
- + bool rx_pause, tx_pause;
- + int _errno;
- +
- + fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
- + fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
- + phy_dev->duplex);
- +
- + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- + if (unlikely(_errno < 0))
- + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
- +}
- +
- +/* Initializes driver's PHY state, and attaches to the PHY.
- + * Returns 0 on success.
- + */
- +static int dtsec_init_phy(struct net_device *net_dev,
- + struct mac_device *mac_dev)
- +{
- + struct phy_device *phy_dev;
- +
- + if (!mac_dev->phy_node)
- + phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
- + &adjust_link, mac_dev->phy_if);
- + else
- + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
- + &adjust_link, 0, mac_dev->phy_if);
- + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
- + netdev_err(net_dev, "Could not connect to PHY %s\n",
- + mac_dev->phy_node ?
- + mac_dev->phy_node->full_name :
- + mac_dev->fixed_bus_id);
- + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
- + }
- +
- + /* Remove any features not supported by the controller */
- + phy_dev->supported &= mac_dev->if_support;
- + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
- + * as most of the PHY drivers do not enable them by default.
- + */
- + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- + phy_dev->advertising = phy_dev->supported;
- +
- + mac_dev->phy_dev = phy_dev;
- +
- + return 0;
- +}
- +
- +static int xgmac_init_phy(struct net_device *net_dev,
- + struct mac_device *mac_dev)
- +{
- + struct phy_device *phy_dev;
- +
- + if (!mac_dev->phy_node)
- + phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id,
- + mac_dev->phy_if);
- + else
- + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
- + mac_dev->phy_if);
- + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
- + netdev_err(net_dev, "Could not attach to PHY %s\n",
- + mac_dev->phy_node ?
- + mac_dev->phy_node->full_name :
- + mac_dev->fixed_bus_id);
- + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
- + }
- +
- + phy_dev->supported &= mac_dev->if_support;
- + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
- + * as most of the PHY drivers do not enable them by default.
- + */
- + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- + phy_dev->advertising = phy_dev->supported;
- +
- + mac_dev->phy_dev = phy_dev;
- +
- + return 0;
- +}
- +
- +static int memac_init_phy(struct net_device *net_dev,
- + struct mac_device *mac_dev)
- +{
- + struct phy_device *phy_dev;
- +
- + if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
- + (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)){
- + if (!mac_dev->phy_node) {
- + mac_dev->phy_dev = NULL;
- + return 0;
- + } else
- + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
- + mac_dev->phy_if);
- + } else {
- + if (!mac_dev->phy_node)
- + phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
- + &adjust_link, mac_dev->phy_if);
- + else
- + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
- + &adjust_link, 0,
- + mac_dev->phy_if);
- + }
- +
- + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
- + netdev_err(net_dev, "Could not connect to PHY %s\n",
- + mac_dev->phy_node ?
- + mac_dev->phy_node->full_name :
- + mac_dev->fixed_bus_id);
- + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
- + }
- +
- + /* Remove any features not supported by the controller */
- + phy_dev->supported &= mac_dev->if_support;
- + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
- + * as most of the PHY drivers do not enable them by default.
- + */
- + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- + phy_dev->advertising = phy_dev->supported;
- +
- + mac_dev->phy_dev = phy_dev;
- +
- + return 0;
- +}
- +
- +static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
- +{
- + int _errno, __errno;
- +
- + _errno = fm_mac_disable(fm_mac_dev);
- + __errno = fm_mac_free(fm_mac_dev);
- +
- + if (unlikely(__errno < 0))
- + _errno = __errno;
- +
- + return _errno;
- +}
- +
- +static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
- +{
- + const struct mac_priv_s *priv;
- + priv = macdev_priv(mac_dev);
- + return priv->fm_mac;
- +}
- +
- +static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
- +{
- + struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
- + int i = 0, n = nn;
- +
- + FM_DMP_SUBTITLE(buf, n, "\n");
- +
- + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
- +
- + FM_DMP_V32(buf, n, p_mm, tsec_id);
- + FM_DMP_V32(buf, n, p_mm, tsec_id2);
- + FM_DMP_V32(buf, n, p_mm, ievent);
- + FM_DMP_V32(buf, n, p_mm, imask);
- + FM_DMP_V32(buf, n, p_mm, ecntrl);
- + FM_DMP_V32(buf, n, p_mm, ptv);
- + FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
- + FM_DMP_V32(buf, n, p_mm, tmr_pevent);
- + FM_DMP_V32(buf, n, p_mm, tmr_pemask);
- + FM_DMP_V32(buf, n, p_mm, tctrl);
- + FM_DMP_V32(buf, n, p_mm, rctrl);
- + FM_DMP_V32(buf, n, p_mm, maccfg1);
- + FM_DMP_V32(buf, n, p_mm, maccfg2);
- + FM_DMP_V32(buf, n, p_mm, ipgifg);
- + FM_DMP_V32(buf, n, p_mm, hafdup);
- + FM_DMP_V32(buf, n, p_mm, maxfrm);
- +
- + FM_DMP_V32(buf, n, p_mm, macstnaddr1);
- + FM_DMP_V32(buf, n, p_mm, macstnaddr2);
- +
- + for (i = 0; i < 7; ++i) {
- + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
- + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
- + }
- +
- + FM_DMP_V32(buf, n, p_mm, car1);
- + FM_DMP_V32(buf, n, p_mm, car2);
- +
- + return n;
- +}
- +
- +static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
- +{
- + struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
- + int n = nn;
- +
- + FM_DMP_SUBTITLE(buf, n, "\n");
- + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
- +
- + FM_DMP_V32(buf, n, p_mm, tgec_id);
- + FM_DMP_V32(buf, n, p_mm, command_config);
- + FM_DMP_V32(buf, n, p_mm, mac_addr_0);
- + FM_DMP_V32(buf, n, p_mm, mac_addr_1);
- + FM_DMP_V32(buf, n, p_mm, maxfrm);
- + FM_DMP_V32(buf, n, p_mm, pause_quant);
- + FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
- + FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
- + FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
- + FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
- + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
- + FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
- + FM_DMP_V32(buf, n, p_mm, mdio_command);
- + FM_DMP_V32(buf, n, p_mm, mdio_data);
- + FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
- + FM_DMP_V32(buf, n, p_mm, status);
- + FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
- + FM_DMP_V32(buf, n, p_mm, mac_addr_2);
- + FM_DMP_V32(buf, n, p_mm, mac_addr_3);
- + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
- + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
- + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
- + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
- + FM_DMP_V32(buf, n, p_mm, imask);
- + FM_DMP_V32(buf, n, p_mm, ievent);
- +
- + return n;
- +}
- +
- +static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
- +{
- + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
- + int i = 0, n = nn;
- +
- + FM_DMP_SUBTITLE(buf, n, "\n");
- + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
- +
- + FM_DMP_V32(buf, n, p_mm, command_config);
- + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
- + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
- + FM_DMP_V32(buf, n, p_mm, maxfrm);
- + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
- + FM_DMP_V32(buf, n, p_mm, ievent);
- + FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
- + FM_DMP_V32(buf, n, p_mm, imask);
- +
- + for (i = 0; i < 4; ++i)
- + FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
- +
- + for (i = 0; i < 4; ++i)
- + FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
- +
- + FM_DMP_V32(buf, n, p_mm, rx_pause_status);
- +
- + for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
- + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
- + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
- + }
- +
- + FM_DMP_V32(buf, n, p_mm, lpwake_timer);
- + FM_DMP_V32(buf, n, p_mm, sleep_timer);
- + FM_DMP_V32(buf, n, p_mm, statn_config);
- + FM_DMP_V32(buf, n, p_mm, if_mode);
- + FM_DMP_V32(buf, n, p_mm, if_status);
- + FM_DMP_V32(buf, n, p_mm, hg_config);
- + FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
- + FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
- + FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
- + FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
- + FM_DMP_V32(buf, n, p_mm, rhm);
- + FM_DMP_V32(buf, n, p_mm, thm);
- +
- + return n;
- +}
- +
- +static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
- +{
- + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
- + int n = nn;
- +
- + FM_DMP_SUBTITLE(buf, n, "\n");
- + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
- +
- + /* Rx Statistics Counter */
- + FM_DMP_V32(buf, n, p_mm, reoct_l);
- + FM_DMP_V32(buf, n, p_mm, reoct_u);
- + FM_DMP_V32(buf, n, p_mm, roct_l);
- + FM_DMP_V32(buf, n, p_mm, roct_u);
- + FM_DMP_V32(buf, n, p_mm, raln_l);
- + FM_DMP_V32(buf, n, p_mm, raln_u);
- + FM_DMP_V32(buf, n, p_mm, rxpf_l);
- + FM_DMP_V32(buf, n, p_mm, rxpf_u);
- + FM_DMP_V32(buf, n, p_mm, rfrm_l);
- + FM_DMP_V32(buf, n, p_mm, rfrm_u);
- + FM_DMP_V32(buf, n, p_mm, rfcs_l);
- + FM_DMP_V32(buf, n, p_mm, rfcs_u);
- + FM_DMP_V32(buf, n, p_mm, rvlan_l);
- + FM_DMP_V32(buf, n, p_mm, rvlan_u);
- + FM_DMP_V32(buf, n, p_mm, rerr_l);
- + FM_DMP_V32(buf, n, p_mm, rerr_u);
- + FM_DMP_V32(buf, n, p_mm, ruca_l);
- + FM_DMP_V32(buf, n, p_mm, ruca_u);
- + FM_DMP_V32(buf, n, p_mm, rmca_l);
- + FM_DMP_V32(buf, n, p_mm, rmca_u);
- + FM_DMP_V32(buf, n, p_mm, rbca_l);
- + FM_DMP_V32(buf, n, p_mm, rbca_u);
- + FM_DMP_V32(buf, n, p_mm, rdrp_l);
- + FM_DMP_V32(buf, n, p_mm, rdrp_u);
- + FM_DMP_V32(buf, n, p_mm, rpkt_l);
- + FM_DMP_V32(buf, n, p_mm, rpkt_u);
- + FM_DMP_V32(buf, n, p_mm, rund_l);
- + FM_DMP_V32(buf, n, p_mm, rund_u);
- + FM_DMP_V32(buf, n, p_mm, r64_l);
- + FM_DMP_V32(buf, n, p_mm, r64_u);
- + FM_DMP_V32(buf, n, p_mm, r127_l);
- + FM_DMP_V32(buf, n, p_mm, r127_u);
- + FM_DMP_V32(buf, n, p_mm, r255_l);
- + FM_DMP_V32(buf, n, p_mm, r255_u);
- + FM_DMP_V32(buf, n, p_mm, r511_l);
- + FM_DMP_V32(buf, n, p_mm, r511_u);
- + FM_DMP_V32(buf, n, p_mm, r1023_l);
- + FM_DMP_V32(buf, n, p_mm, r1023_u);
- + FM_DMP_V32(buf, n, p_mm, r1518_l);
- + FM_DMP_V32(buf, n, p_mm, r1518_u);
- + FM_DMP_V32(buf, n, p_mm, r1519x_l);
- + FM_DMP_V32(buf, n, p_mm, r1519x_u);
- + FM_DMP_V32(buf, n, p_mm, rovr_l);
- + FM_DMP_V32(buf, n, p_mm, rovr_u);
- + FM_DMP_V32(buf, n, p_mm, rjbr_l);
- + FM_DMP_V32(buf, n, p_mm, rjbr_u);
- + FM_DMP_V32(buf, n, p_mm, rfrg_l);
- + FM_DMP_V32(buf, n, p_mm, rfrg_u);
- + FM_DMP_V32(buf, n, p_mm, rcnp_l);
- + FM_DMP_V32(buf, n, p_mm, rcnp_u);
- + FM_DMP_V32(buf, n, p_mm, rdrntp_l);
- + FM_DMP_V32(buf, n, p_mm, rdrntp_u);
- +
- + return n;
- +}
- +
- +static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
- +{
- + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
- + int n = nn;
- +
- + FM_DMP_SUBTITLE(buf, n, "\n");
- + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
- +
- +
- + /* Tx Statistics Counter */
- + FM_DMP_V32(buf, n, p_mm, teoct_l);
- + FM_DMP_V32(buf, n, p_mm, teoct_u);
- + FM_DMP_V32(buf, n, p_mm, toct_l);
- + FM_DMP_V32(buf, n, p_mm, toct_u);
- + FM_DMP_V32(buf, n, p_mm, txpf_l);
- + FM_DMP_V32(buf, n, p_mm, txpf_u);
- + FM_DMP_V32(buf, n, p_mm, tfrm_l);
- + FM_DMP_V32(buf, n, p_mm, tfrm_u);
- + FM_DMP_V32(buf, n, p_mm, tfcs_l);
- + FM_DMP_V32(buf, n, p_mm, tfcs_u);
- + FM_DMP_V32(buf, n, p_mm, tvlan_l);
- + FM_DMP_V32(buf, n, p_mm, tvlan_u);
- + FM_DMP_V32(buf, n, p_mm, terr_l);
- + FM_DMP_V32(buf, n, p_mm, terr_u);
- + FM_DMP_V32(buf, n, p_mm, tuca_l);
- + FM_DMP_V32(buf, n, p_mm, tuca_u);
- + FM_DMP_V32(buf, n, p_mm, tmca_l);
- + FM_DMP_V32(buf, n, p_mm, tmca_u);
- + FM_DMP_V32(buf, n, p_mm, tbca_l);
- + FM_DMP_V32(buf, n, p_mm, tbca_u);
- + FM_DMP_V32(buf, n, p_mm, tpkt_l);
- + FM_DMP_V32(buf, n, p_mm, tpkt_u);
- + FM_DMP_V32(buf, n, p_mm, tund_l);
- + FM_DMP_V32(buf, n, p_mm, tund_u);
- + FM_DMP_V32(buf, n, p_mm, t64_l);
- + FM_DMP_V32(buf, n, p_mm, t64_u);
- + FM_DMP_V32(buf, n, p_mm, t127_l);
- + FM_DMP_V32(buf, n, p_mm, t127_u);
- + FM_DMP_V32(buf, n, p_mm, t255_l);
- + FM_DMP_V32(buf, n, p_mm, t255_u);
- + FM_DMP_V32(buf, n, p_mm, t511_l);
- + FM_DMP_V32(buf, n, p_mm, t511_u);
- + FM_DMP_V32(buf, n, p_mm, t1023_l);
- + FM_DMP_V32(buf, n, p_mm, t1023_u);
- + FM_DMP_V32(buf, n, p_mm, t1518_l);
- + FM_DMP_V32(buf, n, p_mm, t1518_u);
- + FM_DMP_V32(buf, n, p_mm, t1519x_l);
- + FM_DMP_V32(buf, n, p_mm, t1519x_u);
- + FM_DMP_V32(buf, n, p_mm, tcnp_l);
- + FM_DMP_V32(buf, n, p_mm, tcnp_u);
- +
- + return n;
- +}
- +
- +int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
- +{
- + int n = nn;
- +
- + n = h_mac->dump_mac_regs(h_mac, buf, n);
- +
- + return n;
- +}
- +EXPORT_SYMBOL(fm_mac_dump_regs);
- +
- +int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
- +{
- + int n = nn;
- +
- + if(h_mac->dump_mac_rx_stats)
- + n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
- +
- + return n;
- +}
- +EXPORT_SYMBOL(fm_mac_dump_rx_stats);
- +
- +int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
- +{
- + int n = nn;
- +
- + if(h_mac->dump_mac_tx_stats)
- + n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
- +
- + return n;
- +}
- +EXPORT_SYMBOL(fm_mac_dump_tx_stats);
- +
- +static void __cold setup_dtsec(struct mac_device *mac_dev)
- +{
- + mac_dev->init_phy = dtsec_init_phy;
- + mac_dev->init = init;
- + mac_dev->start = start;
- + mac_dev->stop = stop;
- + mac_dev->set_promisc = fm_mac_set_promiscuous;
- + mac_dev->change_addr = fm_mac_modify_mac_addr;
- + mac_dev->set_multi = set_multi;
- + mac_dev->uninit = uninit;
- + mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
- + mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
- + mac_dev->get_mac_handle = get_mac_handle;
- + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
- + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
- + mac_dev->fm_rtc_enable = fm_rtc_enable;
- + mac_dev->fm_rtc_disable = fm_rtc_disable;
- + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
- + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
- + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
- + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
- + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
- + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
- + mac_dev->set_wol = fm_mac_set_wol;
- + mac_dev->dump_mac_regs = dtsec_dump_regs;
- +}
- +
- +static void __cold setup_xgmac(struct mac_device *mac_dev)
- +{
- + mac_dev->init_phy = xgmac_init_phy;
- + mac_dev->init = init;
- + mac_dev->start = start;
- + mac_dev->stop = stop;
- + mac_dev->set_promisc = fm_mac_set_promiscuous;
- + mac_dev->change_addr = fm_mac_modify_mac_addr;
- + mac_dev->set_multi = set_multi;
- + mac_dev->uninit = uninit;
- + mac_dev->get_mac_handle = get_mac_handle;
- + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
- + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
- + mac_dev->set_wol = fm_mac_set_wol;
- + mac_dev->dump_mac_regs = xgmac_dump_regs;
- +}
- +
- +static void __cold setup_memac(struct mac_device *mac_dev)
- +{
- + mac_dev->init_phy = memac_init_phy;
- + mac_dev->init = memac_init;
- + mac_dev->start = start;
- + mac_dev->stop = stop;
- + mac_dev->set_promisc = fm_mac_set_promiscuous;
- + mac_dev->change_addr = fm_mac_modify_mac_addr;
- + mac_dev->set_multi = set_multi;
- + mac_dev->uninit = uninit;
- + mac_dev->get_mac_handle = get_mac_handle;
- + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
- + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
- + mac_dev->fm_rtc_enable = fm_rtc_enable;
- + mac_dev->fm_rtc_disable = fm_rtc_disable;
- + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
- + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
- + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
- + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
- + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
- + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
- + mac_dev->set_wol = fm_mac_set_wol;
- + mac_dev->dump_mac_regs = memac_dump_regs;
- + mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
- + mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
- +}
- +
- +void (*const mac_setup[])(struct mac_device *mac_dev) = {
- + [DTSEC] = setup_dtsec,
- + [XGMAC] = setup_xgmac,
- + [MEMAC] = setup_memac
- +};
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
- @@ -0,0 +1,470 @@
- +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/of_address.h>
- +#include <linux/of_platform.h>
- +#include <linux/of_net.h>
- +#include <linux/device.h>
- +#include <linux/phy.h>
- +#include <linux/io.h>
- +
- +#include "lnxwrp_fm_ext.h"
- +
- +#include "mac.h"
- +
- +#define DTSEC_SUPPORTED \
- + (SUPPORTED_10baseT_Half \
- + | SUPPORTED_10baseT_Full \
- + | SUPPORTED_100baseT_Half \
- + | SUPPORTED_100baseT_Full \
- + | SUPPORTED_Autoneg \
- + | SUPPORTED_Pause \
- + | SUPPORTED_Asym_Pause \
- + | SUPPORTED_MII)
- +
- +static const char phy_str[][11] = {
- + [PHY_INTERFACE_MODE_MII] = "mii",
- + [PHY_INTERFACE_MODE_GMII] = "gmii",
- + [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- + [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
- + [PHY_INTERFACE_MODE_TBI] = "tbi",
- + [PHY_INTERFACE_MODE_RMII] = "rmii",
- + [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- + [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- + [PHY_INTERFACE_MODE_XGMII] = "xgmii",
- + [PHY_INTERFACE_MODE_QSGMII] = "sgmii-2500"
- +};
- +
- +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
- +{
- + int i;
- +
- + for (i = 0; i < ARRAY_SIZE(phy_str); i++)
- + if (strcmp(str, phy_str[i]) == 0)
- + return (phy_interface_t)i;
- +
- + return PHY_INTERFACE_MODE_MII;
- +}
- +
- +static const uint16_t phy2speed[] = {
- + [PHY_INTERFACE_MODE_MII] = SPEED_100,
- + [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
- + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
- + [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
- + [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
- + [PHY_INTERFACE_MODE_RMII] = SPEED_100,
- + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
- + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
- + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
- + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
- + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
- + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
- + [PHY_INTERFACE_MODE_QSGMII] = SPEED_2500
- +};
- +
- +static struct mac_device * __cold
- +alloc_macdev(struct device *dev, size_t sizeof_priv,
- + void (*setup)(struct mac_device *mac_dev))
- +{
- + struct mac_device *mac_dev;
- +
- + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
- + if (unlikely(mac_dev == NULL))
- + mac_dev = ERR_PTR(-ENOMEM);
- + else {
- + mac_dev->dev = dev;
- + dev_set_drvdata(dev, mac_dev);
- + setup(mac_dev);
- + }
- +
- + return mac_dev;
- +}
- +
- +static int __cold free_macdev(struct mac_device *mac_dev)
- +{
- + dev_set_drvdata(mac_dev->dev, NULL);
- +
- + return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
- +}
- +
- +static const struct of_device_id mac_match[] = {
- + [DTSEC] = {
- + .compatible = "fsl,fman-1g-mac"
- + },
- + [XGMAC] = {
- + .compatible = "fsl,fman-10g-mac"
- + },
- + [MEMAC] = {
- + .compatible = "fsl,fman-memac"
- + },
- + {}
- +};
- +MODULE_DEVICE_TABLE(of, mac_match);
- +
- +static int __cold mac_probe(struct platform_device *_of_dev)
- +{
- + int _errno, i;
- + struct device *dev;
- + struct device_node *mac_node, *dev_node;
- + struct mac_device *mac_dev;
- + struct platform_device *of_dev;
- + struct resource res;
- + const uint8_t *mac_addr;
- + const char *char_prop;
- + int nph;
- + u32 cell_index;
- + const struct of_device_id *match;
- +
- + dev = &_of_dev->dev;
- + mac_node = dev->of_node;
- +
- + match = of_match_device(mac_match, dev);
- + if (!match)
- + return -EINVAL;
- +
- + for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
- + i++)
- + ;
- + BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
- +
- + mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
- + if (IS_ERR(mac_dev)) {
- + _errno = PTR_ERR(mac_dev);
- + dev_err(dev, "alloc_macdev() = %d\n", _errno);
- + goto _return;
- + }
- +
- + INIT_LIST_HEAD(&mac_dev->mc_addr_list);
- +
- + /* Get the FM node */
- + dev_node = of_get_parent(mac_node);
- + if (unlikely(dev_node == NULL)) {
- + dev_err(dev, "of_get_parent(%s) failed\n",
- + mac_node->full_name);
- + _errno = -EINVAL;
- + goto _return_dev_set_drvdata;
- + }
- +
- + of_dev = of_find_device_by_node(dev_node);
- + if (unlikely(of_dev == NULL)) {
- + dev_err(dev, "of_find_device_by_node(%s) failed\n",
- + dev_node->full_name);
- + _errno = -EINVAL;
- + goto _return_of_node_put;
- + }
- +
- + mac_dev->fm_dev = fm_bind(&of_dev->dev);
- + if (unlikely(mac_dev->fm_dev == NULL)) {
- + dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
- + _errno = -ENODEV;
- + goto _return_of_node_put;
- + }
- +
- + mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
- + of_node_put(dev_node);
- +
- + /* Get the address of the memory mapped registers */
- + _errno = of_address_to_resource(mac_node, 0, &res);
- + if (unlikely(_errno < 0)) {
- + dev_err(dev, "of_address_to_resource(%s) = %d\n",
- + mac_node->full_name, _errno);
- + goto _return_dev_set_drvdata;
- + }
- +
- + mac_dev->res = __devm_request_region(
- + dev,
- + fm_get_mem_region(mac_dev->fm_dev),
- + res.start, res.end + 1 - res.start, "mac");
- + if (unlikely(mac_dev->res == NULL)) {
- + dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- + _errno = -EBUSY;
- + goto _return_dev_set_drvdata;
- + }
- +
- + mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
- + mac_dev->res->end + 1
- + - mac_dev->res->start);
- + if (unlikely(mac_dev->vaddr == NULL)) {
- + dev_err(dev, "devm_ioremap() failed\n");
- + _errno = -EIO;
- + goto _return_dev_set_drvdata;
- + }
- +
- +#define TBIPA_OFFSET 0x1c
- +#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
- + mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
- + if (mac_dev->tbi_node) {
- + u32 tbiaddr = TBIPA_DEFAULT_ADDR;
- + const __be32 *tbi_reg;
- + void __iomem *addr;
- +
- + tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
- + if (tbi_reg)
- + tbiaddr = be32_to_cpup(tbi_reg);
- + addr = mac_dev->vaddr + TBIPA_OFFSET;
- + /* TODO: out_be32 does not exist on ARM */
- + out_be32(addr, tbiaddr);
- + }
- +
- + if (!of_device_is_available(mac_node)) {
- + devm_iounmap(dev, mac_dev->vaddr);
- + __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
- + res.start, res.end + 1 - res.start);
- + fm_unbind(mac_dev->fm_dev);
- + devm_kfree(dev, mac_dev);
- + dev_set_drvdata(dev, NULL);
- + return -ENODEV;
- + }
- +
- + /* Get the cell-index */
- + _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
- + if (unlikely(_errno)) {
- + dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
- + mac_node->full_name);
- + goto _return_dev_set_drvdata;
- + }
- + mac_dev->cell_index = (uint8_t)cell_index;
- +
- + /* Get the MAC address */
- + mac_addr = of_get_mac_address(mac_node);
- + if (unlikely(mac_addr == NULL)) {
- + dev_err(dev, "of_get_mac_address(%s) failed\n",
- + mac_node->full_name);
- + _errno = -EINVAL;
- + goto _return_dev_set_drvdata;
- + }
- + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
- +
- + /* Verify the number of port handles */
- + nph = of_count_phandle_with_args(mac_node, "fsl,port-handles", NULL);
- + if (unlikely(nph < 0)) {
- + dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
- + mac_node->full_name);
- + _errno = nph;
- + goto _return_dev_set_drvdata;
- + }
- +
- + if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
- + dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
- + mac_node->full_name);
- + _errno = -EINVAL;
- + goto _return_dev_set_drvdata;
- + }
- +
- + for_each_port_device(i, mac_dev->port_dev) {
- + dev_node = of_parse_phandle(mac_node, "fsl,port-handles", i);
- + if (unlikely(dev_node == NULL)) {
- + dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
- + mac_node->full_name);
- + _errno = -EINVAL;
- + goto _return_of_node_put;
- + }
- +
- + of_dev = of_find_device_by_node(dev_node);
- + if (unlikely(of_dev == NULL)) {
- + dev_err(dev, "of_find_device_by_node(%s) failed\n",
- + dev_node->full_name);
- + _errno = -EINVAL;
- + goto _return_of_node_put;
- + }
- +
- + mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
- + if (unlikely(mac_dev->port_dev[i] == NULL)) {
- + dev_err(dev, "dev_get_drvdata(%s) failed\n",
- + dev_node->full_name);
- + _errno = -EINVAL;
- + goto _return_of_node_put;
- + }
- + of_node_put(dev_node);
- + }
- +
- + /* Get the PHY connection type */
- + _errno = of_property_read_string(mac_node, "phy-connection-type",
- + &char_prop);
- + if (unlikely(_errno)) {
- + dev_warn(dev,
- + "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
- + mac_node->full_name);
- + mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
- + } else
- + mac_dev->phy_if = str2phy(char_prop);
- +
- + mac_dev->link = false;
- + mac_dev->half_duplex = false;
- + mac_dev->speed = phy2speed[mac_dev->phy_if];
- + mac_dev->max_speed = mac_dev->speed;
- + mac_dev->if_support = DTSEC_SUPPORTED;
- + /* We don't support half-duplex in SGMII mode */
- + if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii"))
- + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
- + SUPPORTED_100baseT_Half);
- +
- + if (strstr(char_prop, "sgmii-2500"))
- + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
- + SUPPORTED_100baseT_Half);
- +
- + /* Gigabit support (no half-duplex) */
- + if (mac_dev->max_speed == 1000)
- + mac_dev->if_support |= SUPPORTED_1000baseT_Full;
- +
- + /* The 10G interface only supports one mode */
- + if (strstr(char_prop, "xgmii"))
- + mac_dev->if_support = SUPPORTED_10000baseT_Full;
- +
- + /* Get the rest of the PHY information */
- + mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
- + if (mac_dev->phy_node == NULL) {
- + u32 phy_id;
- +
- + _errno = of_property_read_u32(mac_node, "fixed-link", &phy_id);
- + if (_errno) {
- + dev_err(dev, "No PHY (or fixed link) found\n");
- + _errno = -EINVAL;
- + goto _return_dev_set_drvdata;
- + }
- +
- + sprintf(mac_dev->fixed_bus_id, PHY_ID_FMT, "fixed-0",
- + phy_id);
- + }
- +
- + _errno = mac_dev->init(mac_dev);
- + if (unlikely(_errno < 0)) {
- + dev_err(dev, "mac_dev->init() = %d\n", _errno);
- + goto _return_dev_set_drvdata;
- + }
- +
- + /* pause frame autonegotiation enabled*/
- + mac_dev->autoneg_pause = true;
- +
- + /* by intializing the values to false, force FMD to enable PAUSE frames
- + * on RX and TX
- + */
- + mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
- + mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
- + _errno = set_mac_active_pause(mac_dev, true, true);
- + if (unlikely(_errno < 0))
- + dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
- +
- + dev_info(dev,
- + "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
- + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
- + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
- +
- + goto _return;
- +
- +_return_of_node_put:
- + of_node_put(dev_node);
- +_return_dev_set_drvdata:
- + dev_set_drvdata(dev, NULL);
- +_return:
- + return _errno;
- +}
- +
- +static int __cold mac_remove(struct platform_device *of_dev)
- +{
- + int i, _errno;
- + struct device *dev;
- + struct mac_device *mac_dev;
- +
- + dev = &of_dev->dev;
- + mac_dev = (struct mac_device *)dev_get_drvdata(dev);
- +
- + for_each_port_device(i, mac_dev->port_dev)
- + fm_port_unbind(mac_dev->port_dev[i]);
- +
- + fm_unbind(mac_dev->fm_dev);
- +
- + _errno = free_macdev(mac_dev);
- +
- + return _errno;
- +}
- +
- +static struct platform_driver mac_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME,
- + .of_match_table = mac_match,
- + .owner = THIS_MODULE,
- + },
- + .probe = mac_probe,
- + .remove = mac_remove
- +};
- +
- +static int __init __cold mac_load(void)
- +{
- + int _errno;
- +
- + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
- +
- + _errno = platform_driver_register(&mac_driver);
- + if (unlikely(_errno < 0)) {
- + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- + goto _return;
- + }
- +
- + goto _return;
- +
- +_return:
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + return _errno;
- +}
- +module_init(mac_load);
- +
- +static void __exit __cold mac_unload(void)
- +{
- + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + platform_driver_unregister(&mac_driver);
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +}
- +module_exit(mac_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
- @@ -0,0 +1,134 @@
- +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __MAC_H
- +#define __MAC_H
- +
- +#include <linux/device.h> /* struct device, BUS_ID_SIZE */
- +#include <linux/if_ether.h> /* ETH_ALEN */
- +#include <linux/phy.h> /* phy_interface_t, struct phy_device */
- +#include <linux/list.h>
- +
- +#include "lnxwrp_fsl_fman.h" /* struct port_device */
- +
- +enum {DTSEC, XGMAC, MEMAC};
- +
- +struct mac_device {
- + struct device *dev;
- + void *priv;
- + uint8_t cell_index;
- + struct resource *res;
- + void __iomem *vaddr;
- + uint8_t addr[ETH_ALEN];
- + bool promisc;
- +
- + struct fm *fm_dev;
- + struct fm_port *port_dev[2];
- +
- + phy_interface_t phy_if;
- + u32 if_support;
- + bool link;
- + bool half_duplex;
- + uint16_t speed;
- + uint16_t max_speed;
- + struct device_node *phy_node;
- + char fixed_bus_id[MII_BUS_ID_SIZE + 3];
- + struct device_node *tbi_node;
- + struct phy_device *phy_dev;
- + void *fm;
- + /* List of multicast addresses */
- + struct list_head mc_addr_list;
- +
- + bool autoneg_pause;
- + bool rx_pause_req;
- + bool tx_pause_req;
- + bool rx_pause_active;
- + bool tx_pause_active;
- +
- + struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
- + int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
- + int (*init)(struct mac_device *mac_dev);
- + int (*start)(struct mac_device *mac_dev);
- + int (*stop)(struct mac_device *mac_dev);
- + int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
- + int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
- + int (*set_multi)(struct net_device *net_dev,
- + struct mac_device *mac_dev);
- + int (*uninit)(struct fm_mac_dev *fm_mac_dev);
- + int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
- + int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
- + int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
- + int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
- + int (*fm_rtc_enable)(struct fm *fm_dev);
- + int (*fm_rtc_disable)(struct fm *fm_dev);
- + int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
- + int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
- + int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
- + int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
- + int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
- + int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
- + uint64_t fiper);
- +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
- + int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
- + int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
- +#endif
- + int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
- + bool en);
- + int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
- + int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
- + int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
- +};
- +
- +struct mac_address {
- + uint8_t addr[ETH_ALEN];
- + struct list_head list;
- +};
- +
- +#define get_fm_handle(net_dev) \
- + (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
- +
- +#define for_each_port_device(i, port_dev) \
- + for (i = 0; i < ARRAY_SIZE(port_dev); i++)
- +
- +static inline __attribute((nonnull)) void *macdev_priv(
- + const struct mac_device *mac_dev)
- +{
- + return (void *)mac_dev + sizeof(*mac_dev);
- +}
- +
- +extern const char *mac_driver_description;
- +extern const size_t mac_sizeof_priv[];
- +extern void (*const mac_setup[])(struct mac_device *mac_dev);
- +
- +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
- +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
- +
- +#endif /* __MAC_H */
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
- @@ -0,0 +1,848 @@
- +/* Copyright 2011-2012 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
- + * Validates device-tree configuration and sets up the offline ports.
- + */
- +
- +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
- + KBUILD_BASENAME".c", __LINE__, __func__
- +#else
- +#define pr_fmt(fmt) \
- + KBUILD_MODNAME ": " fmt
- +#endif
- +
- +
- +#include <linux/init.h>
- +#include <linux/module.h>
- +#include <linux/of_platform.h>
- +#include <linux/fsl_qman.h>
- +
- +#include "offline_port.h"
- +#include "dpaa_eth.h"
- +#include "dpaa_eth_common.h"
- +
- +#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
- +/* Manip extra space and data alignment for fragmentation */
- +#define FRAG_MANIP_SPACE 128
- +#define FRAG_DATA_ALIGN 64
- +
- +
- +MODULE_LICENSE("Dual BSD/GPL");
- +MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
- +MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
- +
- +
- +static const struct of_device_id oh_port_match_table[] = {
- + {
- + .compatible = "fsl,dpa-oh"
- + },
- + {
- + .compatible = "fsl,dpa-oh-shared"
- + },
- + {}
- +};
- +MODULE_DEVICE_TABLE(of, oh_port_match_table);
- +
- +#ifdef CONFIG_PM
- +
- +static int oh_suspend(struct device *dev)
- +{
- + struct dpa_oh_config_s *oh_config;
- +
- + oh_config = dev_get_drvdata(dev);
- + return fm_port_suspend(oh_config->oh_port);
- +}
- +
- +static int oh_resume(struct device *dev)
- +{
- + struct dpa_oh_config_s *oh_config;
- +
- + oh_config = dev_get_drvdata(dev);
- + return fm_port_resume(oh_config->oh_port);
- +}
- +
- +static const struct dev_pm_ops oh_pm_ops = {
- + .suspend = oh_suspend,
- + .resume = oh_resume,
- +};
- +
- +#define OH_PM_OPS (&oh_pm_ops)
- +
- +#else /* CONFIG_PM */
- +
- +#define OH_PM_OPS NULL
- +
- +#endif /* CONFIG_PM */
- +
- +/* Creates Frame Queues */
- +static uint32_t oh_fq_create(struct qman_fq *fq,
- + uint32_t fq_id, uint16_t channel,
- + uint16_t wq_id)
- +{
- + struct qm_mcc_initfq fq_opts;
- + uint32_t create_flags, init_flags;
- + uint32_t ret = 0;
- +
- + if (fq == NULL)
- + return 1;
- +
- + /* Set flags for FQ create */
- + create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
- +
- + /* Create frame queue */
- + ret = qman_create_fq(fq_id, create_flags, fq);
- + if (ret != 0)
- + return 1;
- +
- + /* Set flags for FQ init */
- + init_flags = QMAN_INITFQ_FLAG_SCHED;
- +
- + /* Set FQ init options. Specify destination WQ ID and channel */
- + fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
- + fq_opts.fqd.dest.wq = wq_id;
- + fq_opts.fqd.dest.channel = channel;
- +
- + /* Initialize frame queue */
- + ret = qman_init_fq(fq, init_flags, &fq_opts);
- + if (ret != 0) {
- + qman_destroy_fq(fq, 0);
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +static void dump_fq(struct device *dev, int fqid, uint16_t channel)
- +{
- + if (channel) {
- + /* display fqs with a valid (!= 0) destination channel */
- + dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
- + }
- +}
- +
- +static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
- + int fqs_count, uint16_t channel_id)
- +{
- + int i;
- + for (i = 0; i < fqs_count; i++)
- + dump_fq(dev, (fqs + i)->fqid, channel_id);
- +}
- +
- +static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
- +{
- + struct list_head *fq_list;
- + struct fq_duple *fqd;
- + int i;
- +
- + dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
- + dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
- +
- + /* TX queues (old initialization) */
- + dev_info(dev, "Initialized queues:");
- + for (i = 0; i < conf->egress_cnt; i++)
- + dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
- + conf->channel);
- +
- + /* initialized ingress queues */
- + list_for_each(fq_list, &conf->fqs_ingress_list) {
- + fqd = list_entry(fq_list, struct fq_duple, fq_list);
- + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
- + }
- +
- + /* initialized egress queues */
- + list_for_each(fq_list, &conf->fqs_egress_list) {
- + fqd = list_entry(fq_list, struct fq_duple, fq_list);
- + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
- + }
- +}
- +
- +/* Destroys Frame Queues */
- +static void oh_fq_destroy(struct qman_fq *fq)
- +{
- + int _errno = 0;
- +
- + _errno = qman_retire_fq(fq, NULL);
- + if (unlikely(_errno < 0))
- + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__,
- + qman_fq_fqid(fq), _errno);
- +
- + _errno = qman_oos_fq(fq);
- + if (unlikely(_errno < 0)) {
- + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__,
- + qman_fq_fqid(fq), _errno);
- + }
- +
- + qman_destroy_fq(fq, 0);
- +}
- +
- +/* Allocation code for the OH port's PCD frame queues */
- +static int __cold oh_alloc_pcd_fqids(struct device *dev,
- + uint32_t num,
- + uint8_t alignment,
- + uint32_t *base_fqid)
- +{
- + dev_crit(dev, "callback not implemented!\n");
- + BUG();
- +
- + return 0;
- +}
- +
- +static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
- +{
- + dev_crit(dev, "callback not implemented!\n");
- + BUG();
- +
- + return 0;
- +}
- +
- +static void oh_set_buffer_layout(struct fm_port *port,
- + struct dpa_buffer_layout_s *layout)
- +{
- + struct fm_port_params params;
- +
- + layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
- + layout->parse_results = true;
- + layout->hash_results = true;
- + layout->time_stamp = false;
- +
- + fm_port_get_buff_layout_ext_params(port, ¶ms);
- + layout->manip_extra_space = params.manip_extra_space;
- + layout->data_align = params.data_align;
- +}
- +
- +static int
- +oh_port_probe(struct platform_device *_of_dev)
- +{
- + struct device *dpa_oh_dev;
- + struct device_node *dpa_oh_node;
- + int lenp, _errno = 0, fq_idx, duple_idx;
- + int n_size, i, j, ret, duples_count;
- + struct platform_device *oh_of_dev;
- + struct device_node *oh_node, *bpool_node = NULL, *root_node;
- + struct device *oh_dev;
- + struct dpa_oh_config_s *oh_config = NULL;
- + const __be32 *oh_all_queues;
- + const __be32 *channel_ids;
- + const __be32 *oh_tx_queues;
- + uint32_t queues_count;
- + uint32_t crt_fqid_base;
- + uint32_t crt_fq_count;
- + bool frag_enabled = false;
- + struct fm_port_params oh_port_tx_params;
- + struct fm_port_pcd_param oh_port_pcd_params;
- + struct dpa_buffer_layout_s buf_layout;
- +
- + /* True if the current partition owns the OH port. */
- + bool init_oh_port;
- +
- + const struct of_device_id *match;
- + int crt_ext_pools_count;
- + u32 ext_pool_size;
- + u32 port_id;
- + u32 channel_id;
- +
- + int channel_ids_count;
- + int channel_idx;
- + struct fq_duple *fqd;
- + struct list_head *fq_list, *fq_list_tmp;
- +
- + const __be32 *bpool_cfg;
- + uint32_t bpid;
- +
- + memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
- + dpa_oh_dev = &_of_dev->dev;
- + dpa_oh_node = dpa_oh_dev->of_node;
- + BUG_ON(dpa_oh_node == NULL);
- +
- + match = of_match_device(oh_port_match_table, dpa_oh_dev);
- + if (!match)
- + return -EINVAL;
- +
- + dev_dbg(dpa_oh_dev, "Probing OH port...\n");
- +
- + /* Find the referenced OH node */
- + oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
- + if (oh_node == NULL) {
- + dev_err(dpa_oh_dev,
- + "Can't find OH node referenced from node %s\n",
- + dpa_oh_node->full_name);
- + return -EINVAL;
- + }
- + dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
- + match->compatible);
- +
- + _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
- + if (_errno) {
- + dev_err(dpa_oh_dev, "No port id found in node %s\n",
- + dpa_oh_node->full_name);
- + goto return_kfree;
- + }
- +
- + _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
- + &channel_id);
- + if (_errno) {
- + dev_err(dpa_oh_dev, "No channel id found in node %s\n",
- + dpa_oh_node->full_name);
- + goto return_kfree;
- + }
- +
- + oh_of_dev = of_find_device_by_node(oh_node);
- + BUG_ON(oh_of_dev == NULL);
- + oh_dev = &oh_of_dev->dev;
- +
- + /* The OH port must be initialized exactly once.
- + * The following scenarios are of interest:
- + * - the node is Linux-private (will always initialize it);
- + * - the node is shared between two Linux partitions
- + * (only one of them will initialize it);
- + * - the node is shared between a Linux and a LWE partition
- + * (Linux will initialize it) - "fsl,dpa-oh-shared"
- + */
- +
- + /* Check if the current partition owns the OH port
- + * and ought to initialize it. It may be the case that we leave this
- + * to another (also Linux) partition.
- + */
- + init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
- +
- + /* If we aren't the "owner" of the OH node, we're done here. */
- + if (!init_oh_port) {
- + dev_dbg(dpa_oh_dev,
- + "Not owning the shared OH port %s, will not initialize it.\n",
- + oh_node->full_name);
- + of_node_put(oh_node);
- + return 0;
- + }
- +
- + /* Allocate OH dev private data */
- + oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
- + if (oh_config == NULL) {
- + dev_err(dpa_oh_dev,
- + "Can't allocate private data for OH node %s referenced from node %s!\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + _errno = -ENOMEM;
- + goto return_kfree;
- + }
- +
- + INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
- + INIT_LIST_HEAD(&oh_config->fqs_egress_list);
- +
- + /* FQs that enter OH port */
- + lenp = 0;
- + oh_all_queues = of_get_property(dpa_oh_node,
- + "fsl,qman-frame-queues-ingress", &lenp);
- + if (lenp % (2 * sizeof(*oh_all_queues))) {
- + dev_warn(dpa_oh_dev,
- + "Wrong ingress queues format for OH node %s referenced from node %s!\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + /* just ignore the last unpaired value */
- + }
- +
- + duples_count = lenp / (2 * sizeof(*oh_all_queues));
- + dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
- + duples_count);
- + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
- + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
- + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
- +
- + fqd = devm_kzalloc(dpa_oh_dev,
- + sizeof(struct fq_duple), GFP_KERNEL);
- + if (!fqd) {
- + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
- + oh_node->full_name,
- + dpa_oh_node->full_name);
- + _errno = -ENOMEM;
- + goto return_kfree;
- + }
- +
- + fqd->fqs = devm_kzalloc(dpa_oh_dev,
- + crt_fq_count * sizeof(struct qman_fq),
- + GFP_KERNEL);
- + if (!fqd->fqs) {
- + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
- + oh_node->full_name,
- + dpa_oh_node->full_name);
- + _errno = -ENOMEM;
- + goto return_kfree;
- + }
- +
- + for (j = 0; j < crt_fq_count; j++)
- + (fqd->fqs + j)->fqid = crt_fqid_base + j;
- + fqd->fqs_count = crt_fq_count;
- + fqd->channel_id = (uint16_t)channel_id;
- + list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
- + }
- +
- + /* create the ingress queues */
- + list_for_each(fq_list, &oh_config->fqs_ingress_list) {
- + fqd = list_entry(fq_list, struct fq_duple, fq_list);
- +
- + for (j = 0; j < fqd->fqs_count; j++) {
- + ret = oh_fq_create(fqd->fqs + j,
- + (fqd->fqs + j)->fqid,
- + fqd->channel_id, 3);
- + if (ret != 0) {
- + dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
- + (fqd->fqs + j)->fqid,
- + oh_node->full_name,
- + dpa_oh_node->full_name);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- + }
- + }
- +
- + /* FQs that exit OH port */
- + lenp = 0;
- + oh_all_queues = of_get_property(dpa_oh_node,
- + "fsl,qman-frame-queues-egress", &lenp);
- + if (lenp % (2 * sizeof(*oh_all_queues))) {
- + dev_warn(dpa_oh_dev,
- + "Wrong egress queues format for OH node %s referenced from node %s!\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + /* just ignore the last unpaired value */
- + }
- +
- + duples_count = lenp / (2 * sizeof(*oh_all_queues));
- + dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
- + duples_count);
- + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
- + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
- + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
- +
- + fqd = devm_kzalloc(dpa_oh_dev,
- + sizeof(struct fq_duple), GFP_KERNEL);
- + if (!fqd) {
- + dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
- + oh_node->full_name,
- + dpa_oh_node->full_name);
- + _errno = -ENOMEM;
- + goto return_kfree;
- + }
- +
- + fqd->fqs = devm_kzalloc(dpa_oh_dev,
- + crt_fq_count * sizeof(struct qman_fq),
- + GFP_KERNEL);
- + if (!fqd->fqs) {
- + dev_err(dpa_oh_dev,
- + "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
- + oh_node->full_name,
- + dpa_oh_node->full_name);
- + _errno = -ENOMEM;
- + goto return_kfree;
- + }
- +
- + for (j = 0; j < crt_fq_count; j++)
- + (fqd->fqs + j)->fqid = crt_fqid_base + j;
- + fqd->fqs_count = crt_fq_count;
- + /* channel ID is specified in another attribute */
- + fqd->channel_id = 0;
- + list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
- +
- + /* allocate the queue */
- +
- + }
- +
- + /* channel_ids for FQs that exit OH port */
- + lenp = 0;
- + channel_ids = of_get_property(dpa_oh_node,
- + "fsl,qman-channel-ids-egress", &lenp);
- +
- + channel_ids_count = lenp / (sizeof(*channel_ids));
- + if (channel_ids_count != duples_count) {
- + dev_warn(dpa_oh_dev,
- + "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + /* just ignore the queues that do not have a Channel ID */
- + }
- +
- + channel_idx = 0;
- + list_for_each(fq_list, &oh_config->fqs_egress_list) {
- + if (channel_idx + 1 > channel_ids_count)
- + break;
- + fqd = list_entry(fq_list, struct fq_duple, fq_list);
- + fqd->channel_id =
- + (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
- + }
- +
- + /* create egress queues */
- + list_for_each(fq_list, &oh_config->fqs_egress_list) {
- + fqd = list_entry(fq_list, struct fq_duple, fq_list);
- +
- + if (fqd->channel_id == 0) {
- + /* missing channel id in dts */
- + continue;
- + }
- +
- + for (j = 0; j < fqd->fqs_count; j++) {
- + ret = oh_fq_create(fqd->fqs + j,
- + (fqd->fqs + j)->fqid,
- + fqd->channel_id, 3);
- + if (ret != 0) {
- + dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
- + (fqd->fqs + j)->fqid,
- + oh_node->full_name,
- + dpa_oh_node->full_name);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- + }
- + }
- +
- + /* Read FQ ids/nums for the DPA OH node */
- + oh_all_queues = of_get_property(dpa_oh_node,
- + "fsl,qman-frame-queues-oh", &lenp);
- + if (oh_all_queues == NULL) {
- + dev_err(dpa_oh_dev,
- + "No frame queues have been defined for OH node %s referenced from node %s\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + /* Check that the OH error and default FQs are there */
- + BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
- + queues_count = lenp / (2 * sizeof(*oh_all_queues));
- + if (queues_count != 2) {
- + dev_err(dpa_oh_dev,
- + "Error and Default queues must be defined for OH node %s referenced from node %s\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + /* Read the FQIDs defined for this OH port */
- + dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
- + fq_idx = 0;
- +
- + /* Error FQID - must be present */
- + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
- + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
- + if (crt_fq_count != 1) {
- + dev_err(dpa_oh_dev,
- + "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
- + oh_node->full_name, dpa_oh_node->full_name,
- + crt_fq_count);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- + oh_config->error_fqid = crt_fqid_base;
- + dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
- + oh_config->error_fqid, oh_node->full_name);
- +
- + /* Default FQID - must be present */
- + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
- + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
- + if (crt_fq_count != 1) {
- + dev_err(dpa_oh_dev,
- + "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
- + oh_node->full_name, dpa_oh_node->full_name,
- + crt_fq_count);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- + oh_config->default_fqid = crt_fqid_base;
- + dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
- + oh_config->default_fqid, oh_node->full_name);
- +
- + /* TX FQID - presence is optional */
- + oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
- + &lenp);
- + if (oh_tx_queues == NULL) {
- + dev_dbg(dpa_oh_dev,
- + "No tx queues have been defined for OH node %s referenced from node %s\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + goto config_port;
- + }
- +
- + /* Check that queues-tx has only a base and a count defined */
- + BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
- + queues_count = lenp / (2 * sizeof(*oh_tx_queues));
- + if (queues_count != 1) {
- + dev_err(dpa_oh_dev,
- + "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + fq_idx = 0;
- + crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
- + crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
- + oh_config->egress_cnt = crt_fq_count;
- +
- + /* Allocate TX queues */
- + dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
- + oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
- + crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
- + if (oh_config->egress_fqs == NULL) {
- + dev_err(dpa_oh_dev,
- + "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
- + oh_node->full_name, dpa_oh_node->full_name);
- + _errno = -ENOMEM;
- + goto return_kfree;
- + }
- +
- + /* Create TX queues */
- + for (i = 0; i < crt_fq_count; i++) {
- + ret = oh_fq_create(oh_config->egress_fqs + i,
- + crt_fqid_base + i, (uint16_t)channel_id, 3);
- + if (ret != 0) {
- + dev_err(dpa_oh_dev,
- + "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
- + crt_fqid_base + i, oh_node->full_name,
- + dpa_oh_node->full_name);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- + }
- +
- +config_port:
- + /* Get a handle to the fm_port so we can set
- + * its configuration params
- + */
- + oh_config->oh_port = fm_port_bind(oh_dev);
- + if (oh_config->oh_port == NULL) {
- + dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
- + oh_node->full_name);
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
- +
- + /* read the pool handlers */
- + crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
- + "fsl,bman-buffer-pools", NULL);
- + if (crt_ext_pools_count <= 0) {
- + dev_info(dpa_oh_dev,
- + "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
- + oh_node->full_name);
- + goto init_port;
- + }
- +
- + /* used for reading ext_pool_size*/
- + root_node = of_find_node_by_path("/");
- + if (root_node == NULL) {
- + dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + n_size = of_n_size_cells(root_node);
- + of_node_put(root_node);
- +
- + dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
- + crt_ext_pools_count);
- +
- + oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
- +
- + for (i = 0; i < crt_ext_pools_count; i++) {
- + bpool_node = of_parse_phandle(dpa_oh_node,
- + "fsl,bman-buffer-pools", i);
- + if (bpool_node == NULL) {
- + dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
- + if (_errno) {
- + dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
- + dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
- +
- + bpool_cfg = of_get_property(bpool_node,
- + "fsl,bpool-ethernet-cfg", &lenp);
- + if (bpool_cfg == NULL) {
- + dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
- + _errno = -EINVAL;
- + goto return_kfree;
- + }
- +
- + ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
- + oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
- + dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
- + ext_pool_size);
- + of_node_put(bpool_node);
- +
- + }
- +
- + if (buf_layout.data_align != FRAG_DATA_ALIGN ||
- + buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
- + goto init_port;
- +
- + frag_enabled = true;
- + dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
- + port_id);
- +
- +init_port:
- + of_node_put(oh_node);
- + /* Set Tx params */
- + dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
- + oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
- + frag_enabled);
- + /* Set PCD params */
- + oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
- + oh_port_pcd_params.cbf = oh_free_pcd_fqids;
- + oh_port_pcd_params.dev = dpa_oh_dev;
- + fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
- +
- + dev_set_drvdata(dpa_oh_dev, oh_config);
- +
- + /* Enable the OH port */
- + _errno = fm_port_enable(oh_config->oh_port);
- + if (_errno)
- + goto return_kfree;
- +
- + dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
- +
- + /* print of all referenced & created queues */
- + dump_oh_config(dpa_oh_dev, oh_config);
- +
- + return 0;
- +
- +return_kfree:
- + if (bpool_node)
- + of_node_put(bpool_node);
- + if (oh_node)
- + of_node_put(oh_node);
- + if (oh_config && oh_config->egress_fqs)
- + devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
- +
- + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
- + fqd = list_entry(fq_list, struct fq_duple, fq_list);
- + list_del(fq_list);
- + devm_kfree(dpa_oh_dev, fqd->fqs);
- + devm_kfree(dpa_oh_dev, fqd);
- + }
- +
- + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
- + fqd = list_entry(fq_list, struct fq_duple, fq_list);
- + list_del(fq_list);
- + devm_kfree(dpa_oh_dev, fqd->fqs);
- + devm_kfree(dpa_oh_dev, fqd);
- + }
- +
- + devm_kfree(dpa_oh_dev, oh_config);
- + return _errno;
- +}
- +
- +static int __cold oh_port_remove(struct platform_device *_of_dev)
- +{
- + int _errno = 0, i;
- + struct dpa_oh_config_s *oh_config;
- +
- + pr_info("Removing OH port...\n");
- +
- + oh_config = dev_get_drvdata(&_of_dev->dev);
- + if (oh_config == NULL) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): No OH config in device private data!\n",
- + KBUILD_BASENAME".c", __LINE__, __func__);
- + _errno = -ENODEV;
- + goto return_error;
- + }
- +
- + if (oh_config->egress_fqs)
- + for (i = 0; i < oh_config->egress_cnt; i++)
- + oh_fq_destroy(oh_config->egress_fqs + i);
- +
- + if (oh_config->oh_port == NULL) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): No fm port in device private data!\n",
- + KBUILD_BASENAME".c", __LINE__, __func__);
- + _errno = -EINVAL;
- + goto free_egress_fqs;
- + }
- +
- + _errno = fm_port_disable(oh_config->oh_port);
- +
- +free_egress_fqs:
- + if (oh_config->egress_fqs)
- + devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
- + devm_kfree(&_of_dev->dev, oh_config);
- + dev_set_drvdata(&_of_dev->dev, NULL);
- +
- +return_error:
- + return _errno;
- +}
- +
- +static struct platform_driver oh_port_driver = {
- + .driver = {
- + .name = KBUILD_MODNAME,
- + .of_match_table = oh_port_match_table,
- + .owner = THIS_MODULE,
- + .pm = OH_PM_OPS,
- + },
- + .probe = oh_port_probe,
- + .remove = oh_port_remove
- +};
- +
- +static int __init __cold oh_port_load(void)
- +{
- + int _errno;
- +
- + pr_info(OH_MOD_DESCRIPTION "\n");
- +
- + _errno = platform_driver_register(&oh_port_driver);
- + if (_errno < 0) {
- + pr_err(KBUILD_MODNAME
- + ": %s:%hu:%s(): platform_driver_register() = %d\n",
- + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
- + }
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- + return _errno;
- +}
- +module_init(oh_port_load);
- +
- +static void __exit __cold oh_port_unload(void)
- +{
- + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
- + KBUILD_BASENAME".c", __func__);
- +
- + platform_driver_unregister(&oh_port_driver);
- +
- + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
- + KBUILD_BASENAME".c", __func__);
- +}
- +module_exit(oh_port_unload);
- --- /dev/null
- +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
- @@ -0,0 +1,59 @@
- +/* Copyright 2011 Freescale Semiconductor Inc.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions are met:
- + * * Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * * Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + * * Neither the name of Freescale Semiconductor nor the
- + * names of its contributors may be used to endorse or promote products
- + * derived from this software without specific prior written permission.
- + *
- + *
- + * ALTERNATIVELY, this software may be distributed under the terms of the
- + * GNU General Public License ("GPL") as published by the Free Software
- + * Foundation, either version 2 of that License or (at your option) any
- + * later version.
- + *
- + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- + */
- +
- +#ifndef __OFFLINE_PORT_H
- +#define __OFFLINE_PORT_H
- +
- +struct fm_port;
- +struct qman_fq;
- +
- +/* fqs are defined in duples (base_fq, fq_count) */
- +struct fq_duple {
- + struct qman_fq *fqs;
- + int fqs_count;
- + uint16_t channel_id;
- + struct list_head fq_list;
- +};
- +
- +/* OH port configuration */
- +struct dpa_oh_config_s {
- + uint32_t error_fqid;
- + uint32_t default_fqid;
- + struct fm_port *oh_port;
- + uint32_t egress_cnt;
- + struct qman_fq *egress_fqs;
- + uint16_t channel;
- +
- + struct list_head fqs_ingress_list;
- + struct list_head fqs_egress_list;
- +};
- +
- +#endif /* __OFFLINE_PORT_H */
|