7017-fsl_qbman-add-qbman-driver.patch 771 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352203532035420355203562035720358203592036020361203622036320364203652036620367203682036920370203712037220373203742037520376203772037820379203802038120382203832038420385203862038720388203892039020391203922039320394203952039620397203982039920400204012040220403204042040520406204072040820409204102041120412204132041420415204162041720418204192042020421204222042320424204252042620427204282042920430204312043220433204342043520436204372043820439204402044120442204432044420445204462044720448204492045020451204522045320454204552045620457204582045920460204612046220463204642046520466204672046820469204702047120472204732047420475204762047720478204792048020481204822048320484204852048620487204882048920490204912049220493204942049520496204972049820499205002050120502205032050420505205062050720508205092051020511205122051320514205152051620517205182051920520205212052220523205242052520526205272052820529205302053120532205332053420535205362053720538205392054020541205422054320544205452054620547205482054920550205512055220553205542055520556205572055820559205602056120562205632056420565205662056720568205692057020571205722057320574205752057620577205782057920580205812058220583205842058520586205872058820589205902059120592205932059420595205962059720598205992060020601206022060320604206052060620607206082060920610206112061220613206142061520616206172061820619206202062120622206232062420625206262062720628206292063020631206322063320634206352063620637206382063920640206412064220643206442064520646206472064820649206502065120652206532065420655206562065720658206592066020661206622066320664206652066620667206682066920670206712067220673206742067520676206772067820679206802068120682206832068420685206862068720688206892069020691206922069320694206952069620697206982069920700207012070220703207042070520706207072070820709207102071120712207132071420715207162071720718207192072020721207222072320724207252072620727207282072920730207312073220733207342073520736207372073820739207402074120742207432074420745207462074720748207492075020751207522075320754207552075620757207582075920760207612076220763207642076520766207672076820769207702077120772207732077420775207762077720778207792078020781207822078320784207852078620787207882078920790207912079220793207942079520796207972079820799208002080120802208032080420805208062080720808208092081020811208122081320814208152081620817208182081920820208212082220823208242082520826208272082820829208302083120832208332083420835208362083720838208392084020841208422084320844208452084620847208482084920850208512085220853208542085520856208572085820859208602086120862208632086420865208662086720868208692087020871208722087320874208752087620877208782087920880208812088220883208842088520886208872088820889208902089120892208932089420895208962089720898208992090020901209022090320904209052090620907209082090920910209112091220913209142091520916209172091820919209202092120922209232092420925209262092720928209292093020931209322093320934209352093620937209382093920940209412094220943209442094520946209472094820949209502095120952209532095420955209562095720958209592096020961209622096320964209652096620967209682096920970209712097220973209742097520976209772097820979209802098120982209832098420985209862098720988209892099020991209922099320994209952099620997209982099921000210012100221003210042100521006210072100821009210102101121012210132101421015210162101721018210192102021021210222102321024210252102621027210282102921030210312103221033210342103521036210372103821039210402104121042210432104421045210462104721048210492105021051210522105321054210552105621057210582105921060210612106221063210642106521066210672106821069210702107121072210732107421075210762107721078210792108021081210822108321084210852108621087210882108921090210912109221093210942109521096210972109821099211002110121102211032110421105211062110721108211092111021111211122111321114211152111621117211182111921120211212112221123211242112521126211272112821129211302113121132211332113421135211362113721138211392114021141211422114321144211452114621147211482114921150211512115221153211542115521156211572115821159211602116121162211632116421165211662116721168211692117021171211722117321174211752117621177211782117921180211812118221183211842118521186211872118821189211902119121192211932119421195211962119721198211992120021201212022120321204212052120621207212082120921210212112121221213212142121521216212172121821219212202122121222212232122421225212262122721228212292123021231212322123321234212352123621237212382123921240212412124221243212442124521246212472124821249212502125121252212532125421255212562125721258212592126021261212622126321264212652126621267212682126921270212712127221273212742127521276212772127821279212802128121282212832128421285212862128721288212892129021291212922129321294212952129621297212982129921300213012130221303213042130521306213072130821309213102131121312213132131421315213162131721318213192132021321213222132321324213252132621327213282132921330213312133221333213342133521336213372133821339213402134121342213432134421345213462134721348213492135021351213522135321354213552135621357213582135921360213612136221363213642136521366213672136821369213702137121372213732137421375213762137721378213792138021381213822138321384213852138621387213882138921390213912139221393213942139521396213972139821399214002140121402214032140421405214062140721408214092141021411214122141321414214152141621417214182141921420214212142221423214242142521426214272142821429214302143121432214332143421435214362143721438214392144021441214422144321444214452144621447214482144921450214512145221453214542145521456214572145821459214602146121462214632146421465214662146721468214692147021471214722147321474214752147621477214782147921480214812148221483214842148521486214872148821489214902149121492214932149421495214962149721498214992150021501215022150321504215052150621507215082150921510215112151221513215142151521516215172151821519215202152121522215232152421525215262152721528215292153021531215322153321534215352153621537215382153921540215412154221543215442154521546215472154821549215502155121552215532155421555215562155721558215592156021561215622156321564215652156621567215682156921570215712157221573215742157521576215772157821579215802158121582215832158421585215862158721588215892159021591215922159321594215952159621597215982159921600216012160221603216042160521606216072160821609216102161121612216132161421615216162161721618216192162021621216222162321624216252162621627216282162921630216312163221633216342163521636216372163821639216402164121642216432164421645216462164721648216492165021651216522165321654216552165621657216582165921660216612166221663216642166521666216672166821669216702167121672216732167421675216762167721678216792168021681216822168321684216852168621687216882168921690216912169221693216942169521696216972169821699217002170121702217032170421705217062170721708217092171021711217122171321714217152171621717217182171921720217212172221723217242172521726217272172821729217302173121732217332173421735217362173721738217392174021741217422174321744217452174621747217482174921750217512175221753217542175521756217572175821759217602176121762217632176421765217662176721768217692177021771217722177321774217752177621777217782177921780217812178221783217842178521786217872178821789217902179121792217932179421795217962179721798217992180021801218022180321804218052180621807218082180921810218112181221813218142181521816218172181821819218202182121822218232182421825218262182721828218292183021831218322183321834218352183621837218382183921840218412184221843218442184521846218472184821849218502185121852218532185421855218562185721858218592186021861218622186321864218652186621867218682186921870218712187221873218742187521876218772187821879218802188121882218832188421885218862188721888218892189021891218922189321894218952189621897218982189921900219012190221903219042190521906219072190821909219102191121912219132191421915219162191721918219192192021921219222192321924219252192621927219282192921930219312193221933219342193521936219372193821939219402194121942219432194421945219462194721948219492195021951219522195321954219552195621957219582195921960219612196221963219642196521966219672196821969219702197121972219732197421975219762197721978219792198021981219822198321984219852198621987219882198921990219912199221993219942199521996219972199821999220002200122002220032200422005220062200722008220092201022011220122201322014220152201622017220182201922020220212202222023220242202522026220272202822029220302203122032220332203422035220362203722038220392204022041220422204322044220452204622047220482204922050220512205222053220542205522056220572205822059220602206122062220632206422065220662206722068220692207022071220722207322074220752207622077220782207922080220812208222083220842208522086220872208822089220902209122092220932209422095220962209722098220992210022101221022210322104221052210622107221082210922110221112211222113221142211522116221172211822119221202212122122221232212422125221262212722128221292213022131221322213322134221352213622137221382213922140221412214222143221442214522146221472214822149221502215122152221532215422155221562215722158221592216022161221622216322164221652216622167221682216922170221712217222173221742217522176221772217822179221802218122182221832218422185221862218722188221892219022191221922219322194221952219622197221982219922200222012220222203222042220522206222072220822209222102221122212222132221422215222162221722218222192222022221222222222322224222252222622227222282222922230222312223222233222342223522236222372223822239222402224122242222432224422245222462224722248222492225022251222522225322254222552225622257222582225922260222612226222263222642226522266222672226822269222702227122272222732227422275222762227722278222792228022281222822228322284222852228622287222882228922290222912229222293222942229522296222972229822299223002230122302223032230422305223062230722308223092231022311223122231322314223152231622317223182231922320223212232222323223242232522326223272232822329223302233122332223332233422335223362233722338223392234022341223422234322344223452234622347223482234922350223512235222353223542235522356223572235822359223602236122362223632236422365223662236722368223692237022371223722237322374223752237622377223782237922380223812238222383223842238522386223872238822389223902239122392223932239422395223962239722398223992240022401224022240322404224052240622407224082240922410224112241222413224142241522416224172241822419224202242122422224232242422425224262242722428224292243022431224322243322434224352243622437224382243922440224412244222443224442244522446224472244822449224502245122452224532245422455224562245722458224592246022461224622246322464224652246622467224682246922470224712247222473224742247522476224772247822479224802248122482224832248422485224862248722488224892249022491224922249322494224952249622497224982249922500225012250222503225042250522506225072250822509225102251122512225132251422515225162251722518225192252022521225222252322524225252252622527225282252922530225312253222533225342253522536225372253822539225402254122542225432254422545225462254722548225492255022551225522255322554225552255622557225582255922560225612256222563225642256522566225672256822569225702257122572225732257422575225762257722578225792258022581225822258322584225852258622587225882258922590225912259222593225942259522596225972259822599226002260122602226032260422605226062260722608226092261022611226122261322614226152261622617226182261922620226212262222623226242262522626226272262822629226302263122632226332263422635226362263722638226392264022641226422264322644226452264622647226482264922650226512265222653226542265522656226572265822659226602266122662226632266422665226662266722668226692267022671226722267322674226752267622677226782267922680226812268222683226842268522686226872268822689226902269122692226932269422695226962269722698226992270022701227022270322704227052270622707227082270922710227112271222713227142271522716227172271822719227202272122722227232272422725227262272722728227292273022731227322273322734227352273622737227382273922740227412274222743227442274522746227472274822749227502275122752227532275422755227562275722758227592276022761227622276322764227652276622767227682276922770227712277222773227742277522776227772277822779227802278122782227832278422785227862278722788227892279022791227922279322794227952279622797227982279922800228012280222803228042280522806228072280822809228102281122812228132281422815228162281722818228192282022821228222282322824228252282622827228282282922830228312283222833228342283522836228372283822839228402284122842228432284422845228462284722848228492285022851228522285322854228552285622857228582285922860228612286222863228642286522866228672286822869228702287122872228732287422875228762287722878228792288022881228822288322884228852288622887228882288922890228912289222893228942289522896228972289822899229002290122902229032290422905229062290722908229092291022911229122291322914229152291622917229182291922920229212292222923229242292522926229272292822929229302293122932229332293422935229362293722938229392294022941229422294322944229452294622947229482294922950229512295222953229542295522956229572295822959229602296122962229632296422965229662296722968229692297022971229722297322974229752297622977229782297922980229812298222983229842298522986229872298822989229902299122992229932299422995229962299722998229992300023001230022300323004230052300623007230082300923010230112301223013230142301523016230172301823019230202302123022230232302423025230262302723028230292303023031230322303323034230352303623037230382303923040230412304223043230442304523046230472304823049230502305123052230532305423055230562305723058230592306023061230622306323064230652306623067230682306923070230712307223073230742307523076230772307823079230802308123082230832308423085230862308723088230892309023091230922309323094230952309623097230982309923100231012310223103231042310523106231072310823109231102311123112231132311423115231162311723118231192312023121231222312323124231252312623127231282312923130231312313223133231342313523136231372313823139231402314123142231432314423145231462314723148231492315023151231522315323154231552315623157231582315923160231612316223163231642316523166231672316823169231702317123172231732317423175231762317723178231792318023181231822318323184231852318623187231882318923190231912319223193231942319523196231972319823199232002320123202232032320423205232062320723208232092321023211232122321323214232152321623217232182321923220232212322223223232242322523226232272322823229232302323123232232332323423235232362323723238232392324023241232422324323244232452324623247232482324923250232512325223253232542325523256232572325823259232602326123262232632326423265232662326723268232692327023271232722327323274232752327623277232782327923280232812328223283232842328523286232872328823289232902329123292232932329423295232962329723298232992330023301233022330323304233052330623307233082330923310233112331223313233142331523316233172331823319233202332123322233232332423325233262332723328233292333023331233322333323334233352333623337233382333923340233412334223343233442334523346233472334823349233502335123352233532335423355233562335723358233592336023361233622336323364233652336623367233682336923370233712337223373233742337523376233772337823379233802338123382233832338423385233862338723388233892339023391233922339323394233952339623397233982339923400234012340223403234042340523406234072340823409234102341123412234132341423415234162341723418234192342023421234222342323424234252342623427234282342923430234312343223433234342343523436234372343823439234402344123442234432344423445234462344723448234492345023451234522345323454234552345623457234582345923460234612346223463234642346523466234672346823469234702347123472234732347423475234762347723478234792348023481234822348323484234852348623487234882348923490234912349223493234942349523496234972349823499235002350123502235032350423505235062350723508235092351023511235122351323514235152351623517235182351923520235212352223523235242352523526235272352823529235302353123532235332353423535235362353723538235392354023541235422354323544235452354623547235482354923550235512355223553235542355523556235572355823559235602356123562235632356423565235662356723568235692357023571235722357323574235752357623577235782357923580235812358223583235842358523586235872358823589235902359123592235932359423595235962359723598235992360023601236022360323604236052360623607236082360923610236112361223613236142361523616236172361823619236202362123622236232362423625236262362723628236292363023631236322363323634236352363623637236382363923640236412364223643236442364523646236472364823649236502365123652236532365423655236562365723658236592366023661236622366323664236652366623667236682366923670236712367223673236742367523676236772367823679236802368123682236832368423685236862368723688236892369023691236922369323694236952369623697236982369923700237012370223703237042370523706237072370823709237102371123712237132371423715237162371723718237192372023721237222372323724237252372623727237282372923730237312373223733237342373523736237372373823739237402374123742237432374423745237462374723748237492375023751237522375323754237552375623757237582375923760237612376223763237642376523766237672376823769237702377123772237732377423775237762377723778237792378023781237822378323784237852378623787237882378923790237912379223793237942379523796237972379823799238002380123802238032380423805238062380723808238092381023811238122381323814238152381623817238182381923820238212382223823238242382523826238272382823829238302383123832238332383423835238362383723838238392384023841238422384323844238452384623847238482384923850238512385223853238542385523856238572385823859238602386123862238632386423865238662386723868238692387023871238722387323874238752387623877238782387923880238812388223883238842388523886238872388823889238902389123892238932389423895238962389723898238992390023901239022390323904239052390623907239082390923910239112391223913239142391523916239172391823919239202392123922239232392423925239262392723928239292393023931239322393323934239352393623937239382393923940239412394223943239442394523946239472394823949239502395123952239532395423955239562395723958239592396023961239622396323964239652396623967239682396923970239712397223973239742397523976239772397823979239802398123982239832398423985239862398723988239892399023991239922399323994239952399623997239982399924000240012400224003240042400524006240072400824009240102401124012240132401424015240162401724018240192402024021240222402324024240252402624027240282402924030240312403224033240342403524036240372403824039240402404124042240432404424045240462404724048240492405024051240522405324054240552405624057240582405924060240612406224063240642406524066240672406824069240702407124072240732407424075240762407724078240792408024081240822408324084240852408624087240882408924090240912409224093240942409524096240972409824099241002410124102241032410424105241062410724108241092411024111241122411324114241152411624117241182411924120241212412224123241242412524126241272412824129241302413124132241332413424135241362413724138241392414024141241422414324144241452414624147241482414924150241512415224153241542415524156241572415824159241602416124162241632416424165241662416724168241692417024171241722417324174241752417624177241782417924180241812418224183241842418524186241872418824189241902419124192241932419424195241962419724198241992420024201242022420324204242052420624207242082420924210242112421224213242142421524216242172421824219242202422124222242232422424225242262422724228242292423024231242322423324234242352423624237242382423924240242412424224243242442424524246242472424824249242502425124252242532425424255242562425724258242592426024261242622426324264242652426624267242682426924270242712427224273242742427524276242772427824279242802428124282242832428424285242862428724288242892429024291242922429324294242952429624297242982429924300243012430224303243042430524306243072430824309243102431124312243132431424315243162431724318243192432024321243222432324324243252432624327243282432924330243312433224333243342433524336243372433824339243402434124342243432434424345243462434724348243492435024351243522435324354243552435624357243582435924360243612436224363243642436524366243672436824369243702437124372243732437424375243762437724378243792438024381243822438324384243852438624387243882438924390243912439224393243942439524396243972439824399244002440124402244032440424405244062440724408244092441024411244122441324414244152441624417244182441924420244212442224423244242442524426244272442824429244302443124432244332443424435244362443724438244392444024441244422444324444244452444624447244482444924450244512445224453244542445524456244572445824459244602446124462244632446424465244662446724468244692447024471244722447324474244752447624477244782447924480244812448224483244842448524486244872448824489244902449124492244932449424495244962449724498244992450024501245022450324504245052450624507245082450924510245112451224513245142451524516245172451824519245202452124522245232452424525245262452724528245292453024531245322453324534245352453624537245382453924540245412454224543245442454524546245472454824549245502455124552245532455424555245562455724558245592456024561245622456324564245652456624567245682456924570245712457224573245742457524576245772457824579245802458124582245832458424585245862458724588245892459024591245922459324594245952459624597245982459924600246012460224603246042460524606246072460824609246102461124612246132461424615246162461724618246192462024621246222462324624246252462624627246282462924630246312463224633246342463524636246372463824639246402464124642246432464424645246462464724648246492465024651246522465324654246552465624657246582465924660246612466224663246642466524666246672466824669246702467124672246732467424675246762467724678246792468024681246822468324684246852468624687246882468924690246912469224693246942469524696246972469824699247002470124702247032470424705247062470724708247092471024711247122471324714247152471624717247182471924720247212472224723247242472524726247272472824729247302473124732247332473424735247362473724738247392474024741247422474324744247452474624747247482474924750247512475224753247542475524756247572475824759247602476124762247632476424765247662476724768247692477024771247722477324774247752477624777247782477924780247812478224783247842478524786247872478824789247902479124792247932479424795247962479724798247992480024801248022480324804248052480624807248082480924810248112481224813248142481524816248172481824819248202482124822248232482424825248262482724828
  1. From f6f8ed4784936724154832ff9e4c5afe8caa63e4 Mon Sep 17 00:00:00 2001
  2. From: Zhao Qiang <qiang.zhao@nxp.com>
  3. Date: Mon, 11 Jul 2016 14:39:18 +0800
  4. Subject: [PATCH 17/70] fsl_qbman: add qbman driver
  5. The QMan and BMan are infrastructure components of dpaa, which are used
  6. by both software and hardware for queuing and memory allocation/deallocation.
  7. Signed-off-by: Roy Pledge <Roy.Pledge@freescale.com> Signed-off-by:
  8. Camelia Groza <camelia.groza@freescale.com> Signed-off-by: Geoff Thorpe
  9. <Geoff.Thorpe@freescale.com> Signed-off-by: Ahmed Mansour
  10. <Ahmed.Mansour@freescale.com> Signed-off-by: Alex Porosanu
  11. <alexandru.porosanu@nxp.com> Signed-off-by: Pan Jiafei
  12. <Jiafei.Pan@nxp.com> Signed-off-by: Haiying Wang
  13. <Haiying.wang@freescale.com>
  14. Signed-off-by: Xie Jianhua-B29408 <Jianhua.Xie@freescale.com>
  15. Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
  16. ---
  17. arch/arm/Kconfig | 5 +
  18. arch/powerpc/Kconfig | 9 +-
  19. drivers/misc/Kconfig | 17 +
  20. drivers/staging/Kconfig | 2 +
  21. drivers/staging/Makefile | 1 +
  22. drivers/staging/fsl_qbman/Kconfig | 211 +
  23. drivers/staging/fsl_qbman/Makefile | 28 +
  24. drivers/staging/fsl_qbman/bman_config.c | 705 +++
  25. drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
  26. drivers/staging/fsl_qbman/bman_driver.c | 574 +++
  27. drivers/staging/fsl_qbman/bman_high.c | 1141 +++++
  28. drivers/staging/fsl_qbman/bman_low.h | 559 +++
  29. drivers/staging/fsl_qbman/bman_private.h | 166 +
  30. drivers/staging/fsl_qbman/bman_test.c | 56 +
  31. drivers/staging/fsl_qbman/bman_test.h | 44 +
  32. drivers/staging/fsl_qbman/bman_test_high.c | 183 +
  33. drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
  34. drivers/staging/fsl_qbman/dpa_alloc.c | 706 +++
  35. drivers/staging/fsl_qbman/dpa_sys.h | 259 ++
  36. drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
  37. drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
  38. drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
  39. drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
  40. drivers/staging/fsl_qbman/fsl_usdpaa.c | 1982 ++++++++
  41. drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 ++
  42. drivers/staging/fsl_qbman/qbman_driver.c | 88 +
  43. drivers/staging/fsl_qbman/qman_config.c | 1199 +++++
  44. drivers/staging/fsl_qbman/qman_debugfs.c | 1594 +++++++
  45. drivers/staging/fsl_qbman/qman_driver.c | 980 ++++
  46. drivers/staging/fsl_qbman/qman_high.c | 5568 +++++++++++++++++++++++
  47. drivers/staging/fsl_qbman/qman_low.h | 1407 ++++++
  48. drivers/staging/fsl_qbman/qman_private.h | 398 ++
  49. drivers/staging/fsl_qbman/qman_test.c | 57 +
  50. drivers/staging/fsl_qbman/qman_test.h | 45 +
  51. drivers/staging/fsl_qbman/qman_test_high.c | 216 +
  52. drivers/staging/fsl_qbman/qman_test_hotpotato.c | 499 ++
  53. drivers/staging/fsl_qbman/qman_utility.c | 129 +
  54. include/linux/fsl_bman.h | 532 +++
  55. include/linux/fsl_qman.h | 3889 ++++++++++++++++
  56. include/linux/fsl_usdpaa.h | 372 ++
  57. 40 files changed, 24569 insertions(+), 2 deletions(-)
  58. create mode 100644 drivers/staging/fsl_qbman/Kconfig
  59. create mode 100644 drivers/staging/fsl_qbman/Makefile
  60. create mode 100644 drivers/staging/fsl_qbman/bman_config.c
  61. create mode 100644 drivers/staging/fsl_qbman/bman_debugfs.c
  62. create mode 100644 drivers/staging/fsl_qbman/bman_driver.c
  63. create mode 100644 drivers/staging/fsl_qbman/bman_high.c
  64. create mode 100644 drivers/staging/fsl_qbman/bman_low.h
  65. create mode 100644 drivers/staging/fsl_qbman/bman_private.h
  66. create mode 100644 drivers/staging/fsl_qbman/bman_test.c
  67. create mode 100644 drivers/staging/fsl_qbman/bman_test.h
  68. create mode 100644 drivers/staging/fsl_qbman/bman_test_high.c
  69. create mode 100644 drivers/staging/fsl_qbman/bman_test_thresh.c
  70. create mode 100644 drivers/staging/fsl_qbman/dpa_alloc.c
  71. create mode 100644 drivers/staging/fsl_qbman/dpa_sys.h
  72. create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm.h
  73. create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm64.h
  74. create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc32.h
  75. create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc64.h
  76. create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa.c
  77. create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
  78. create mode 100644 drivers/staging/fsl_qbman/qbman_driver.c
  79. create mode 100644 drivers/staging/fsl_qbman/qman_config.c
  80. create mode 100644 drivers/staging/fsl_qbman/qman_debugfs.c
  81. create mode 100644 drivers/staging/fsl_qbman/qman_driver.c
  82. create mode 100644 drivers/staging/fsl_qbman/qman_high.c
  83. create mode 100644 drivers/staging/fsl_qbman/qman_low.h
  84. create mode 100644 drivers/staging/fsl_qbman/qman_private.h
  85. create mode 100644 drivers/staging/fsl_qbman/qman_test.c
  86. create mode 100644 drivers/staging/fsl_qbman/qman_test.h
  87. create mode 100644 drivers/staging/fsl_qbman/qman_test_high.c
  88. create mode 100644 drivers/staging/fsl_qbman/qman_test_hotpotato.c
  89. create mode 100644 drivers/staging/fsl_qbman/qman_utility.c
  90. create mode 100644 include/linux/fsl_bman.h
  91. create mode 100644 include/linux/fsl_qman.h
  92. create mode 100644 include/linux/fsl_usdpaa.h
  93. --- a/arch/arm/Kconfig
  94. +++ b/arch/arm/Kconfig
  95. @@ -1250,6 +1250,11 @@ source "arch/arm/common/Kconfig"
  96. menu "Bus support"
  97. +config HAS_FSL_QBMAN
  98. + bool "Datapath Acceleration Queue and Buffer management"
  99. + help
  100. + Datapath Acceleration Queue and Buffer management
  101. +
  102. config ISA
  103. bool
  104. help
  105. --- a/arch/powerpc/Kconfig
  106. +++ b/arch/powerpc/Kconfig
  107. @@ -792,6 +792,11 @@ config FSL_GTM
  108. help
  109. Freescale General-purpose Timers support
  110. +config HAS_FSL_QBMAN
  111. + bool "Datapath Acceleration Queue and Buffer management"
  112. + help
  113. + Datapath Acceleration Queue and Buffer management
  114. +
  115. # Yes MCA RS/6000s exist but Linux-PPC does not currently support any
  116. config MCA
  117. bool
  118. @@ -924,14 +929,14 @@ config DYNAMIC_MEMSTART
  119. select NONSTATIC_KERNEL
  120. help
  121. This option enables the kernel to be loaded at any page aligned
  122. - physical address. The kernel creates a mapping from KERNELBASE to
  123. + physical address. The kernel creates a mapping from KERNELBASE to
  124. the address where the kernel is loaded. The page size here implies
  125. the TLB page size of the mapping for kernel on the particular platform.
  126. Please refer to the init code for finding the TLB page size.
  127. DYNAMIC_MEMSTART is an easy way of implementing pseudo-RELOCATABLE
  128. kernel image, where the only restriction is the page aligned kernel
  129. - load address. When this option is enabled, the compile time physical
  130. + load address. When this option is enabled, the compile time physical
  131. address CONFIG_PHYSICAL_START is ignored.
  132. This option is overridden by CONFIG_RELOCATABLE
  133. --- a/drivers/misc/Kconfig
  134. +++ b/drivers/misc/Kconfig
  135. @@ -236,6 +236,23 @@ config SGI_XP
  136. this feature will allow for direct communication between SSIs
  137. based on a network adapter and DMA messaging.
  138. +config FSL_USDPAA
  139. + bool "Freescale USDPAA process driver"
  140. + depends on FSL_DPA
  141. + default y
  142. + help
  143. + This driver provides user-space access to kernel-managed
  144. + resource interfaces for USDPAA applications, on the assumption
  145. + that each process will open this device once. Specifically, this
  146. + device exposes functionality that would be awkward if exposed
  147. + via the portal devices - ie. this device exposes functionality
  148. + that is inherently process-wide rather than portal-specific.
  149. + This device is necessary for obtaining access to DMA memory and
  150. + for allocation of Qman and Bman resources. In short, if you wish
  151. + to use USDPAA applications, you need this.
  152. +
  153. + If unsure, say Y.
  154. +
  155. config CS5535_MFGPT
  156. tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
  157. depends on MFD_CS5535
  158. --- a/drivers/staging/Kconfig
  159. +++ b/drivers/staging/Kconfig
  160. @@ -106,6 +106,8 @@ source "drivers/staging/fbtft/Kconfig"
  161. source "drivers/staging/fsl-mc/Kconfig"
  162. +source "drivers/staging/fsl_qbman/Kconfig"
  163. +
  164. source "drivers/staging/wilc1000/Kconfig"
  165. source "drivers/staging/most/Kconfig"
  166. --- a/drivers/staging/Makefile
  167. +++ b/drivers/staging/Makefile
  168. @@ -45,5 +45,6 @@ obj-$(CONFIG_UNISYSSPAR) += unisys/
  169. obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
  170. obj-$(CONFIG_FB_TFT) += fbtft/
  171. obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
  172. +obj-$(CONFIG_FSL_DPA) += fsl_qbman/
  173. obj-$(CONFIG_WILC1000) += wilc1000/
  174. obj-$(CONFIG_MOST) += most/
  175. --- /dev/null
  176. +++ b/drivers/staging/fsl_qbman/Kconfig
  177. @@ -0,0 +1,211 @@
  178. +config FSL_DPA
  179. + bool "Freescale Datapath Queue and Buffer management"
  180. + depends on HAS_FSL_QBMAN
  181. + default y
  182. + select FSL_QMAN_FQ_LOOKUP if PPC64
  183. + select FSL_QMAN_FQ_LOOKUP if ARM64
  184. +
  185. +
  186. +menu "Freescale Datapath QMan/BMan options"
  187. + depends on FSL_DPA
  188. +
  189. +config FSL_DPA_CHECKING
  190. + bool "additional driver checking"
  191. + default n
  192. + ---help---
  193. + Compiles in additional checks to sanity-check the drivers and any
  194. + use of it by other code. Not recommended for performance.
  195. +
  196. +config FSL_DPA_CAN_WAIT
  197. + bool
  198. + default y
  199. +
  200. +config FSL_DPA_CAN_WAIT_SYNC
  201. + bool
  202. + default y
  203. +
  204. +config FSL_DPA_PIRQ_FAST
  205. + bool
  206. + default y
  207. +
  208. +config FSL_DPA_PIRQ_SLOW
  209. + bool
  210. + default y
  211. +
  212. +config FSL_DPA_PORTAL_SHARE
  213. + bool
  214. + default y
  215. +
  216. +config FSL_BMAN
  217. + bool "Freescale Buffer Manager (BMan) support"
  218. + default y
  219. +
  220. +if FSL_BMAN
  221. +
  222. +config FSL_BMAN_CONFIG
  223. + bool "BMan device management"
  224. + default y
  225. + ---help---
  226. + If this linux image is running natively, you need this option. If this
  227. + linux image is running as a guest OS under the hypervisor, only one
  228. + guest OS ("the control plane") needs this option.
  229. +
  230. +config FSL_BMAN_TEST
  231. + tristate "BMan self-tests"
  232. + default n
  233. + ---help---
  234. + This option compiles self-test code for BMan.
  235. +
  236. +config FSL_BMAN_TEST_HIGH
  237. + bool "BMan high-level self-test"
  238. + depends on FSL_BMAN_TEST
  239. + default y
  240. + ---help---
  241. + This requires the presence of cpu-affine portals, and performs
  242. + high-level API testing with them (whichever portal(s) are affine to
  243. + the cpu(s) the test executes on).
  244. +
  245. +config FSL_BMAN_TEST_THRESH
  246. + bool "BMan threshold test"
  247. + depends on FSL_BMAN_TEST
  248. + default y
  249. + ---help---
  250. + Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded
  251. + before multiple threads (one per cpu) create pool objects to track
  252. + depletion state changes. The pool is then drained to empty by a
  253. + "drainer" thread, and the other threads that they observe exactly
  254. + the depletion state changes that are expected.
  255. +
  256. +config FSL_BMAN_DEBUGFS
  257. + tristate "BMan debugfs interface"
  258. + depends on DEBUG_FS
  259. + default y
  260. + ---help---
  261. + This option compiles debugfs code for BMan.
  262. +
  263. +endif # FSL_BMAN
  264. +
  265. +config FSL_QMAN
  266. + bool "Freescale Queue Manager (QMan) support"
  267. + default y
  268. +
  269. +if FSL_QMAN
  270. +
  271. +config FSL_QMAN_POLL_LIMIT
  272. + int
  273. + default 32
  274. +
  275. +config FSL_QMAN_CONFIG
  276. + bool "QMan device management"
  277. + default y
  278. + ---help---
  279. + If this linux image is running natively, you need this option. If this
  280. + linux image is running as a guest OS under the hypervisor, only one
  281. + guest OS ("the control plane") needs this option.
  282. +
  283. +config FSL_QMAN_TEST
  284. + tristate "QMan self-tests"
  285. + default n
  286. + ---help---
  287. + This option compiles self-test code for QMan.
  288. +
  289. +config FSL_QMAN_TEST_STASH_POTATO
  290. + bool "QMan 'hot potato' data-stashing self-test"
  291. + depends on FSL_QMAN_TEST
  292. + default y
  293. + ---help---
  294. + This performs a "hot potato" style test enqueuing/dequeuing a frame
  295. + across a series of FQs scheduled to different portals (and cpus), with
  296. + DQRR, data and context stashing always on.
  297. +
  298. +config FSL_QMAN_TEST_HIGH
  299. + bool "QMan high-level self-test"
  300. + depends on FSL_QMAN_TEST
  301. + default y
  302. + ---help---
  303. + This requires the presence of cpu-affine portals, and performs
  304. + high-level API testing with them (whichever portal(s) are affine to
  305. + the cpu(s) the test executes on).
  306. +
  307. +config FSL_QMAN_DEBUGFS
  308. + tristate "QMan debugfs interface"
  309. + depends on DEBUG_FS
  310. + default y
  311. + ---help---
  312. + This option compiles debugfs code for QMan.
  313. +
  314. +# H/w settings that can be hard-coded for now.
  315. +config FSL_QMAN_FQD_SZ
  316. + int "size of Frame Queue Descriptor region"
  317. + default 10
  318. + ---help---
  319. + This is the size of the FQD region defined as: PAGE_SIZE * (2^value)
  320. + ex: 10 => PAGE_SIZE * (2^10)
  321. + Note: Default device-trees now require minimum Kconfig setting of 10.
  322. +
  323. +config FSL_QMAN_PFDR_SZ
  324. + int "size of the PFDR pool"
  325. + default 13
  326. + ---help---
  327. + This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value)
  328. + ex: 13 => PAGE_SIZE * (2^13)
  329. +
  330. +# Corenet initiator settings. Stash request queues are 4-deep to match cores'
  331. +# ability to snart. Stash priority is 3, other priorities are 2.
  332. +config FSL_QMAN_CI_SCHED_CFG_SRCCIV
  333. + int
  334. + depends on FSL_QMAN_CONFIG
  335. + default 4
  336. +config FSL_QMAN_CI_SCHED_CFG_SRQ_W
  337. + int
  338. + depends on FSL_QMAN_CONFIG
  339. + default 3
  340. +config FSL_QMAN_CI_SCHED_CFG_RW_W
  341. + int
  342. + depends on FSL_QMAN_CONFIG
  343. + default 2
  344. +config FSL_QMAN_CI_SCHED_CFG_BMAN_W
  345. + int
  346. + depends on FSL_QMAN_CONFIG
  347. + default 2
  348. +
  349. +# portal interrupt settings
  350. +config FSL_QMAN_PIRQ_DQRR_ITHRESH
  351. + int
  352. + default 12
  353. +config FSL_QMAN_PIRQ_MR_ITHRESH
  354. + int
  355. + default 4
  356. +config FSL_QMAN_PIRQ_IPERIOD
  357. + int
  358. + default 100
  359. +
  360. +# 64 bit kernel support
  361. +config FSL_QMAN_FQ_LOOKUP
  362. + bool
  363. + default n
  364. +
  365. +config QMAN_CEETM_UPDATE_PERIOD
  366. + int "Token update period for shaping, in nanoseconds"
  367. + default 1000
  368. + ---help---
  369. + Traffic shaping works by performing token calculations (using
  370. + credits) on shaper instances periodically. This update period
  371. + sets the granularity for how often those token rate credit
  372. + updates are performed, and thus determines the accuracy and
  373. + range of traffic rates that can be configured by users. The
  374. + reference manual recommends a 1 microsecond period as providing
  375. + a good balance between granularity and range.
  376. +
  377. + Unless you know what you are doing, leave this value at its default.
  378. +
  379. +config FSL_QMAN_INIT_TIMEOUT
  380. + int "timeout for qman init stage, in seconds"
  381. + default 10
  382. + ---help---
  383. + The timeout setting to quit the initialization loop for non-control
  384. + partition in case the control partition fails to boot-up.
  385. +
  386. +endif # FSL_QMAN
  387. +
  388. +endmenu
  389. --- /dev/null
  390. +++ b/drivers/staging/fsl_qbman/Makefile
  391. @@ -0,0 +1,28 @@
  392. +subdir-ccflags-y := -Werror
  393. +
  394. +# Common
  395. +obj-$(CONFIG_FSL_DPA) += dpa_alloc.o
  396. +obj-$(CONFIG_HAS_FSL_QBMAN) += qbman_driver.o
  397. +
  398. +# Bman
  399. +obj-$(CONFIG_FSL_BMAN) += bman_high.o
  400. +obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o
  401. +obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o
  402. +obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o
  403. +bman_tester-y = bman_test.o
  404. +bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o
  405. +bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o
  406. +bman_debugfs_interface-y = bman_debugfs.o
  407. +
  408. +# Qman
  409. +obj-$(CONFIG_FSL_QMAN) += qman_high.o qman_utility.o
  410. +obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o
  411. +obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o
  412. +qman_tester-y = qman_test.o
  413. +qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o
  414. +qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o
  415. +obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o
  416. +qman_debugfs_interface-y = qman_debugfs.o
  417. +
  418. +# USDPAA
  419. +obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o
  420. --- /dev/null
  421. +++ b/drivers/staging/fsl_qbman/bman_config.c
  422. @@ -0,0 +1,705 @@
  423. +/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc.
  424. + *
  425. + * Redistribution and use in source and binary forms, with or without
  426. + * modification, are permitted provided that the following conditions are met:
  427. + * * Redistributions of source code must retain the above copyright
  428. + * notice, this list of conditions and the following disclaimer.
  429. + * * Redistributions in binary form must reproduce the above copyright
  430. + * notice, this list of conditions and the following disclaimer in the
  431. + * documentation and/or other materials provided with the distribution.
  432. + * * Neither the name of Freescale Semiconductor nor the
  433. + * names of its contributors may be used to endorse or promote products
  434. + * derived from this software without specific prior written permission.
  435. + *
  436. + *
  437. + * ALTERNATIVELY, this software may be distributed under the terms of the
  438. + * GNU General Public License ("GPL") as published by the Free Software
  439. + * Foundation, either version 2 of that License or (at your option) any
  440. + * later version.
  441. + *
  442. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  443. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  444. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  445. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  446. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  447. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  448. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  449. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  450. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  451. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  452. + */
  453. +
  454. +#include <asm/cacheflush.h>
  455. +#include "bman_private.h"
  456. +#include <linux/of_reserved_mem.h>
  457. +
  458. +/* Last updated for v00.79 of the BG */
  459. +
  460. +struct bman;
  461. +
  462. +/* Register offsets */
  463. +#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
  464. +#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
  465. +#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
  466. +#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
  467. +#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
  468. +#define REG_FBPR_FPC 0x0800
  469. +#define REG_STATE_IDLE 0x960
  470. +#define REG_STATE_STOP 0x964
  471. +#define REG_ECSR 0x0a00
  472. +#define REG_ECIR 0x0a04
  473. +#define REG_EADR 0x0a08
  474. +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
  475. +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
  476. +#define REG_IP_REV_1 0x0bf8
  477. +#define REG_IP_REV_2 0x0bfc
  478. +#define REG_FBPR_BARE 0x0c00
  479. +#define REG_FBPR_BAR 0x0c04
  480. +#define REG_FBPR_AR 0x0c10
  481. +#define REG_SRCIDR 0x0d04
  482. +#define REG_LIODNR 0x0d08
  483. +#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
  484. +
  485. +/* Used by all error interrupt registers except 'inhibit' */
  486. +#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
  487. +#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
  488. +#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
  489. +#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
  490. +#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
  491. +
  492. +/* BMAN_ECIR valid error bit */
  493. +#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
  494. +
  495. +union bman_ecir {
  496. + u32 ecir_raw;
  497. + struct {
  498. + u32 __reserved1:4;
  499. + u32 portal_num:4;
  500. + u32 __reserved2:12;
  501. + u32 numb:4;
  502. + u32 __reserved3:2;
  503. + u32 pid:6;
  504. + } __packed info;
  505. +};
  506. +
  507. +union bman_eadr {
  508. + u32 eadr_raw;
  509. + struct {
  510. + u32 __reserved1:5;
  511. + u32 memid:3;
  512. + u32 __reserved2:14;
  513. + u32 eadr:10;
  514. + } __packed info;
  515. +};
  516. +
  517. +struct bman_hwerr_txt {
  518. + u32 mask;
  519. + const char *txt;
  520. +};
  521. +
  522. +#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
  523. +
  524. +static const struct bman_hwerr_txt bman_hwerr_txts[] = {
  525. + BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
  526. + BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
  527. + BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
  528. + BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
  529. + BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
  530. +};
  531. +#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
  532. +
  533. +struct bman_error_info_mdata {
  534. + u16 addr_mask;
  535. + u16 bits;
  536. + const char *txt;
  537. +};
  538. +
  539. +#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
  540. +static const struct bman_error_info_mdata error_mdata[] = {
  541. + BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
  542. + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
  543. + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
  544. +};
  545. +#define BMAN_ERR_MDATA_COUNT \
  546. + (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
  547. +
  548. +/* Add this in Kconfig */
  549. +#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
  550. +
  551. +/**
  552. + * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
  553. + * @v: for accessors that write values, this is the 32-bit value
  554. + *
  555. + * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
  556. + * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
  557. + * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
  558. + * "write the enable register" rather than "enable the write register"!
  559. + */
  560. +#define bm_err_isr_status_read(bm) \
  561. + __bm_err_isr_read(bm, bm_isr_status)
  562. +#define bm_err_isr_status_clear(bm, m) \
  563. + __bm_err_isr_write(bm, bm_isr_status, m)
  564. +#define bm_err_isr_enable_read(bm) \
  565. + __bm_err_isr_read(bm, bm_isr_enable)
  566. +#define bm_err_isr_enable_write(bm, v) \
  567. + __bm_err_isr_write(bm, bm_isr_enable, v)
  568. +#define bm_err_isr_disable_read(bm) \
  569. + __bm_err_isr_read(bm, bm_isr_disable)
  570. +#define bm_err_isr_disable_write(bm, v) \
  571. + __bm_err_isr_write(bm, bm_isr_disable, v)
  572. +#define bm_err_isr_inhibit(bm) \
  573. + __bm_err_isr_write(bm, bm_isr_inhibit, 1)
  574. +#define bm_err_isr_uninhibit(bm) \
  575. + __bm_err_isr_write(bm, bm_isr_inhibit, 0)
  576. +
  577. +/*
  578. + * TODO: unimplemented registers
  579. + *
  580. + * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
  581. + * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
  582. + */
  583. +
  584. +/* Encapsulate "struct bman *" as a cast of the register space address. */
  585. +
  586. +static struct bman *bm_create(void *regs)
  587. +{
  588. + return (struct bman *)regs;
  589. +}
  590. +
  591. +static inline u32 __bm_in(struct bman *bm, u32 offset)
  592. +{
  593. + return in_be32((void *)bm + offset);
  594. +}
  595. +static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
  596. +{
  597. + out_be32((void *)bm + offset, val);
  598. +}
  599. +#define bm_in(reg) __bm_in(bm, REG_##reg)
  600. +#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
  601. +
  602. +static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
  603. +{
  604. + return __bm_in(bm, REG_ERR_ISR + (n << 2));
  605. +}
  606. +
  607. +static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
  608. +{
  609. + __bm_out(bm, REG_ERR_ISR + (n << 2), val);
  610. +}
  611. +
  612. +static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
  613. +{
  614. + u32 v = bm_in(IP_REV_1);
  615. + *id = (v >> 16);
  616. + *major = (v >> 8) & 0xff;
  617. + *minor = v & 0xff;
  618. +}
  619. +
  620. +static u32 __generate_thresh(u32 val, int roundup)
  621. +{
  622. + u32 e = 0; /* co-efficient, exponent */
  623. + int oddbit = 0;
  624. + while (val > 0xff) {
  625. + oddbit = val & 1;
  626. + val >>= 1;
  627. + e++;
  628. + if (roundup && oddbit)
  629. + val++;
  630. + }
  631. + DPA_ASSERT(e < 0x10);
  632. + return val | (e << 8);
  633. +}
  634. +
  635. +static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
  636. + u32 hwdet, u32 hwdxt)
  637. +{
  638. + DPA_ASSERT(pool < bman_pool_max);
  639. + bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
  640. + bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
  641. + bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
  642. + bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
  643. +}
  644. +
  645. +static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
  646. +{
  647. + u32 exp = ilog2(size);
  648. + /* choke if size isn't within range */
  649. + DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
  650. + is_power_of_2(size));
  651. + /* choke if '[e]ba' has lower-alignment than 'size' */
  652. + DPA_ASSERT(!(ba & (size - 1)));
  653. + bm_out(FBPR_BARE, upper_32_bits(ba));
  654. + bm_out(FBPR_BAR, lower_32_bits(ba));
  655. + bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
  656. +}
  657. +
  658. +/*****************/
  659. +/* Config driver */
  660. +/*****************/
  661. +
  662. +/* TODO: Kconfig these? */
  663. +#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12)
  664. +
  665. +/* We support only one of these. */
  666. +static struct bman *bm;
  667. +static struct device_node *bm_node;
  668. +
  669. +/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used
  670. + * during bman_init_ccsr(). */
  671. +static dma_addr_t fbpr_a;
  672. +static size_t fbpr_sz = DEFAULT_FBPR_SZ;
  673. +
  674. +static int bman_fbpr(struct reserved_mem *rmem)
  675. +{
  676. + fbpr_a = rmem->base;
  677. + fbpr_sz = rmem->size;
  678. +
  679. + WARN_ON(!(fbpr_a && fbpr_sz));
  680. +
  681. + return 0;
  682. +}
  683. +RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
  684. +
  685. +static int __init fsl_bman_init(struct device_node *node)
  686. +{
  687. + struct resource res;
  688. + u32 __iomem *regs;
  689. + const char *s;
  690. + int ret, standby = 0;
  691. + u16 id;
  692. + u8 major, minor;
  693. +
  694. + ret = of_address_to_resource(node, 0, &res);
  695. + if (ret) {
  696. + pr_err("Can't get %s property 'reg'\n",
  697. + node->full_name);
  698. + return ret;
  699. + }
  700. + s = of_get_property(node, "fsl,hv-claimable", &ret);
  701. + if (s && !strcmp(s, "standby"))
  702. + standby = 1;
  703. + /* Global configuration */
  704. + regs = ioremap(res.start, res.end - res.start + 1);
  705. + bm = bm_create(regs);
  706. + BUG_ON(!bm);
  707. + bm_node = node;
  708. + bm_get_version(bm, &id, &major, &minor);
  709. + pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor);
  710. + if ((major == 1) && (minor == 0)) {
  711. + bman_ip_rev = BMAN_REV10;
  712. + bman_pool_max = 64;
  713. + } else if ((major == 2) && (minor == 0)) {
  714. + bman_ip_rev = BMAN_REV20;
  715. + bman_pool_max = 8;
  716. + } else if ((major == 2) && (minor == 1)) {
  717. + bman_ip_rev = BMAN_REV21;
  718. + bman_pool_max = 64;
  719. + } else {
  720. + pr_warn("unknown Bman version, default to rev1.0\n");
  721. + }
  722. +
  723. + if (standby) {
  724. + pr_info(" -> in standby mode\n");
  725. + return 0;
  726. + }
  727. + return 0;
  728. +}
  729. +
  730. +int bman_have_ccsr(void)
  731. +{
  732. + return bm ? 1 : 0;
  733. +}
  734. +
  735. +int bm_pool_set(u32 bpid, const u32 *thresholds)
  736. +{
  737. + if (!bm)
  738. + return -ENODEV;
  739. + bm_set_pool(bm, bpid, thresholds[0],
  740. + thresholds[1], thresholds[2],
  741. + thresholds[3]);
  742. + return 0;
  743. +}
  744. +EXPORT_SYMBOL(bm_pool_set);
  745. +
  746. +__init int bman_init_early(void)
  747. +{
  748. + struct device_node *dn;
  749. + int ret;
  750. +
  751. + for_each_compatible_node(dn, NULL, "fsl,bman") {
  752. + if (bm)
  753. + pr_err("%s: only one 'fsl,bman' allowed\n",
  754. + dn->full_name);
  755. + else {
  756. + if (!of_device_is_available(dn))
  757. + continue;
  758. +
  759. + ret = fsl_bman_init(dn);
  760. + BUG_ON(ret);
  761. + }
  762. + }
  763. + return 0;
  764. +}
  765. +postcore_initcall_sync(bman_init_early);
  766. +
  767. +
  768. +static void log_edata_bits(u32 bit_count)
  769. +{
  770. + u32 i, j, mask = 0xffffffff;
  771. +
  772. + pr_warn("Bman ErrInt, EDATA:\n");
  773. + i = bit_count/32;
  774. + if (bit_count%32) {
  775. + i++;
  776. + mask = ~(mask << bit_count%32);
  777. + }
  778. + j = 16-i;
  779. + pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
  780. + j++;
  781. + for (; j < 16; j++)
  782. + pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
  783. +}
  784. +
  785. +static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
  786. +{
  787. + union bman_ecir ecir_val;
  788. + union bman_eadr eadr_val;
  789. +
  790. + ecir_val.ecir_raw = bm_in(ECIR);
  791. + /* Is portal info valid */
  792. + if (ecsr_val & PORTAL_ECSR_ERR) {
  793. + pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n",
  794. + ecir_val.info.portal_num, ecir_val.info.numb,
  795. + ecir_val.info.pid);
  796. + }
  797. + if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
  798. + eadr_val.eadr_raw = bm_in(EADR);
  799. + pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n",
  800. + error_mdata[eadr_val.info.memid].txt,
  801. + error_mdata[eadr_val.info.memid].addr_mask
  802. + & eadr_val.info.eadr);
  803. + log_edata_bits(error_mdata[eadr_val.info.memid].bits);
  804. + }
  805. +}
  806. +
  807. +/* Bman interrupt handler */
  808. +static irqreturn_t bman_isr(int irq, void *ptr)
  809. +{
  810. + u32 isr_val, ier_val, ecsr_val, isr_mask, i;
  811. +
  812. + ier_val = bm_err_isr_enable_read(bm);
  813. + isr_val = bm_err_isr_status_read(bm);
  814. + ecsr_val = bm_in(ECSR);
  815. + isr_mask = isr_val & ier_val;
  816. +
  817. + if (!isr_mask)
  818. + return IRQ_NONE;
  819. + for (i = 0; i < BMAN_HWE_COUNT; i++) {
  820. + if (bman_hwerr_txts[i].mask & isr_mask) {
  821. + pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt);
  822. + if (bman_hwerr_txts[i].mask & ecsr_val) {
  823. + log_additional_error_info(isr_mask, ecsr_val);
  824. + /* Re-arm error capture registers */
  825. + bm_out(ECSR, ecsr_val);
  826. + }
  827. + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
  828. + pr_devel("Bman un-enabling error 0x%x\n",
  829. + bman_hwerr_txts[i].mask);
  830. + ier_val &= ~bman_hwerr_txts[i].mask;
  831. + bm_err_isr_enable_write(bm, ier_val);
  832. + }
  833. + }
  834. + }
  835. + bm_err_isr_status_clear(bm, isr_val);
  836. + return IRQ_HANDLED;
  837. +}
  838. +
  839. +static int __bind_irq(void)
  840. +{
  841. + int ret, err_irq;
  842. +
  843. + err_irq = of_irq_to_resource(bm_node, 0, NULL);
  844. + if (err_irq == 0) {
  845. + pr_info("Can't get %s property '%s'\n", bm_node->full_name,
  846. + "interrupts");
  847. + return -ENODEV;
  848. + }
  849. + ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node);
  850. + if (ret) {
  851. + pr_err("request_irq() failed %d for '%s'\n", ret,
  852. + bm_node->full_name);
  853. + return -ENODEV;
  854. + }
  855. + /* Disable Buffer Pool State Change */
  856. + bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
  857. + /* Write-to-clear any stale bits, (eg. starvation being asserted prior
  858. + * to resource allocation during driver init). */
  859. + bm_err_isr_status_clear(bm, 0xffffffff);
  860. + /* Enable Error Interrupts */
  861. + bm_err_isr_enable_write(bm, 0xffffffff);
  862. + return 0;
  863. +}
  864. +
  865. +int bman_init_ccsr(struct device_node *node)
  866. +{
  867. + int ret;
  868. + if (!bman_have_ccsr())
  869. + return 0;
  870. + if (node != bm_node)
  871. + return -EINVAL;
  872. + /* FBPR memory */
  873. + bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
  874. + pr_info("bman-fbpr addr 0x%llx size 0x%zx\n",
  875. + (unsigned long long)fbpr_a, fbpr_sz);
  876. +
  877. + ret = __bind_irq();
  878. + if (ret)
  879. + return ret;
  880. + return 0;
  881. +}
  882. +
  883. +u32 bm_pool_free_buffers(u32 bpid)
  884. +{
  885. + return bm_in(POOL_CONTENT(bpid));
  886. +}
  887. +
  888. +#ifdef CONFIG_SYSFS
  889. +
  890. +#define DRV_NAME "fsl-bman"
  891. +#define SBEC_MAX_ID 1
  892. +#define SBEC_MIN_ID 0
  893. +
  894. +static ssize_t show_fbpr_fpc(struct device *dev,
  895. + struct device_attribute *dev_attr, char *buf)
  896. +{
  897. + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
  898. +};
  899. +
  900. +static ssize_t show_pool_count(struct device *dev,
  901. + struct device_attribute *dev_attr, char *buf)
  902. +{
  903. + u32 data;
  904. + int i;
  905. +
  906. + if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max))
  907. + return -EINVAL;
  908. + data = bm_in(POOL_CONTENT(i));
  909. + return snprintf(buf, PAGE_SIZE, "%d\n", data);
  910. +};
  911. +
  912. +static ssize_t show_err_isr(struct device *dev,
  913. + struct device_attribute *dev_attr, char *buf)
  914. +{
  915. + return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
  916. +};
  917. +
  918. +static ssize_t show_sbec(struct device *dev,
  919. + struct device_attribute *dev_attr, char *buf)
  920. +{
  921. + int i;
  922. +
  923. + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
  924. + return -EINVAL;
  925. + if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
  926. + return -EINVAL;
  927. + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
  928. +};
  929. +
  930. +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
  931. +static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
  932. +
  933. +/* Didn't use DEVICE_ATTR as 64 of this would be required.
  934. + * Initialize them when needed. */
  935. +static char *name_attrs_pool_count; /* "xx" + null-terminator */
  936. +static struct device_attribute *dev_attr_buffer_pool_count;
  937. +
  938. +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
  939. +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
  940. +
  941. +static struct attribute *bman_dev_attributes[] = {
  942. + &dev_attr_fbpr_fpc.attr,
  943. + &dev_attr_err_isr.attr,
  944. + NULL
  945. +};
  946. +
  947. +static struct attribute *bman_dev_ecr_attributes[] = {
  948. + &dev_attr_sbec_0.attr,
  949. + &dev_attr_sbec_1.attr,
  950. + NULL
  951. +};
  952. +
  953. +static struct attribute **bman_dev_pool_count_attributes;
  954. +
  955. +
  956. +/* root level */
  957. +static const struct attribute_group bman_dev_attr_grp = {
  958. + .name = NULL,
  959. + .attrs = bman_dev_attributes
  960. +};
  961. +static const struct attribute_group bman_dev_ecr_grp = {
  962. + .name = "error_capture",
  963. + .attrs = bman_dev_ecr_attributes
  964. +};
  965. +static struct attribute_group bman_dev_pool_countent_grp = {
  966. + .name = "pool_count",
  967. +};
  968. +
  969. +static int of_fsl_bman_remove(struct platform_device *ofdev)
  970. +{
  971. + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
  972. + return 0;
  973. +};
  974. +
  975. +static int of_fsl_bman_probe(struct platform_device *ofdev)
  976. +{
  977. + int ret, i;
  978. +
  979. + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
  980. + if (ret)
  981. + goto done;
  982. + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
  983. + if (ret)
  984. + goto del_group_0;
  985. +
  986. + name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3,
  987. + GFP_KERNEL);
  988. + if (!name_attrs_pool_count) {
  989. + pr_err("Can't alloc name_attrs_pool_count\n");
  990. + goto del_group_1;
  991. + }
  992. +
  993. + dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) *
  994. + bman_pool_max, GFP_KERNEL);
  995. + if (!dev_attr_buffer_pool_count) {
  996. + pr_err("Can't alloc dev_attr-buffer_pool_count\n");
  997. + goto del_group_2;
  998. + }
  999. +
  1000. + bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) *
  1001. + (bman_pool_max + 1), GFP_KERNEL);
  1002. + if (!bman_dev_pool_count_attributes) {
  1003. + pr_err("can't alloc bman_dev_pool_count_attributes\n");
  1004. + goto del_group_3;
  1005. + }
  1006. +
  1007. + for (i = 0; i < bman_pool_max; i++) {
  1008. + ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
  1009. + if (!ret)
  1010. + goto del_group_4;
  1011. + dev_attr_buffer_pool_count[i].attr.name =
  1012. + (name_attrs_pool_count + i * 3);
  1013. + dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
  1014. + dev_attr_buffer_pool_count[i].show = show_pool_count;
  1015. + bman_dev_pool_count_attributes[i] =
  1016. + &dev_attr_buffer_pool_count[i].attr;
  1017. + sysfs_attr_init(bman_dev_pool_count_attributes[i]);
  1018. + }
  1019. + bman_dev_pool_count_attributes[bman_pool_max] = NULL;
  1020. +
  1021. + bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
  1022. +
  1023. + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp);
  1024. + if (ret)
  1025. + goto del_group_4;
  1026. +
  1027. + goto done;
  1028. +
  1029. +del_group_4:
  1030. + kfree(bman_dev_pool_count_attributes);
  1031. +del_group_3:
  1032. + kfree(dev_attr_buffer_pool_count);
  1033. +del_group_2:
  1034. + kfree(name_attrs_pool_count);
  1035. +del_group_1:
  1036. + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
  1037. +del_group_0:
  1038. + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
  1039. +done:
  1040. + if (ret)
  1041. + dev_err(&ofdev->dev,
  1042. + "Cannot create dev attributes ret=%d\n", ret);
  1043. + return ret;
  1044. +};
  1045. +
  1046. +static struct of_device_id of_fsl_bman_ids[] = {
  1047. + {
  1048. + .compatible = "fsl,bman",
  1049. + },
  1050. + {}
  1051. +};
  1052. +MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
  1053. +
  1054. +#ifdef CONFIG_SUSPEND
  1055. +static u32 saved_isdr;
  1056. +
  1057. +static int bman_pm_suspend_noirq(struct device *dev)
  1058. +{
  1059. + uint32_t idle_state;
  1060. +
  1061. + suspend_unused_bportal();
  1062. + /* save isdr, disable all, clear isr */
  1063. + saved_isdr = bm_err_isr_disable_read(bm);
  1064. + bm_err_isr_disable_write(bm, 0xffffffff);
  1065. + bm_err_isr_status_clear(bm, 0xffffffff);
  1066. +
  1067. + if (bman_ip_rev < BMAN_REV21) {
  1068. +#ifdef CONFIG_PM_DEBUG
  1069. + pr_info("Bman version doesn't have STATE_IDLE\n");
  1070. +#endif
  1071. + return 0;
  1072. + }
  1073. + idle_state = bm_in(STATE_IDLE);
  1074. + if (!(idle_state & 0x1)) {
  1075. + pr_err("Bman not idle 0x%x aborting\n", idle_state);
  1076. + bm_err_isr_disable_write(bm, saved_isdr);
  1077. + resume_unused_bportal();
  1078. + return -EBUSY;
  1079. + }
  1080. +#ifdef CONFIG_PM_DEBUG
  1081. + pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state);
  1082. +#endif
  1083. + return 0;
  1084. +}
  1085. +
  1086. +static int bman_pm_resume_noirq(struct device *dev)
  1087. +{
  1088. + /* restore isdr */
  1089. + bm_err_isr_disable_write(bm, saved_isdr);
  1090. + resume_unused_bportal();
  1091. + return 0;
  1092. +}
  1093. +#else
  1094. +#define bman_pm_suspend_noirq NULL
  1095. +#define bman_pm_resume_noirq NULL
  1096. +#endif
  1097. +
  1098. +static const struct dev_pm_ops bman_pm_ops = {
  1099. + .suspend_noirq = bman_pm_suspend_noirq,
  1100. + .resume_noirq = bman_pm_resume_noirq,
  1101. +};
  1102. +
  1103. +static struct platform_driver of_fsl_bman_driver = {
  1104. + .driver = {
  1105. + .owner = THIS_MODULE,
  1106. + .name = DRV_NAME,
  1107. + .of_match_table = of_fsl_bman_ids,
  1108. + .pm = &bman_pm_ops,
  1109. + },
  1110. + .probe = of_fsl_bman_probe,
  1111. + .remove = of_fsl_bman_remove,
  1112. +};
  1113. +
  1114. +static int bman_ctrl_init(void)
  1115. +{
  1116. + return platform_driver_register(&of_fsl_bman_driver);
  1117. +}
  1118. +
  1119. +static void bman_ctrl_exit(void)
  1120. +{
  1121. + platform_driver_unregister(&of_fsl_bman_driver);
  1122. +}
  1123. +
  1124. +module_init(bman_ctrl_init);
  1125. +module_exit(bman_ctrl_exit);
  1126. +
  1127. +#endif /* CONFIG_SYSFS */
  1128. --- /dev/null
  1129. +++ b/drivers/staging/fsl_qbman/bman_debugfs.c
  1130. @@ -0,0 +1,119 @@
  1131. +/* Copyright 2010-2011 Freescale Semiconductor, Inc.
  1132. + *
  1133. + * Redistribution and use in source and binary forms, with or without
  1134. + * modification, are permitted provided that the following conditions are met:
  1135. + * * Redistributions of source code must retain the above copyright
  1136. + * notice, this list of conditions and the following disclaimer.
  1137. + * * Redistributions in binary form must reproduce the above copyright
  1138. + * notice, this list of conditions and the following disclaimer in the
  1139. + * documentation and/or other materials provided with the distribution.
  1140. + * * Neither the name of Freescale Semiconductor nor the
  1141. + * names of its contributors may be used to endorse or promote products
  1142. + * derived from this software without specific prior written permission.
  1143. + *
  1144. + *
  1145. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1146. + * GNU General Public License ("GPL") as published by the Free Software
  1147. + * Foundation, either version 2 of that License or (at your option) any
  1148. + * later version.
  1149. + *
  1150. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  1151. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1152. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1153. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  1154. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1155. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1156. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1157. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1158. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1159. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1160. + */
  1161. +#include <linux/module.h>
  1162. +#include <linux/fsl_bman.h>
  1163. +#include <linux/debugfs.h>
  1164. +#include <linux/seq_file.h>
  1165. +#include <linux/uaccess.h>
  1166. +
  1167. +static struct dentry *dfs_root; /* debugfs root directory */
  1168. +
  1169. +/*******************************************************************************
  1170. + * Query Buffer Pool State
  1171. + ******************************************************************************/
  1172. +static int query_bp_state_show(struct seq_file *file, void *offset)
  1173. +{
  1174. + int ret;
  1175. + struct bm_pool_state state;
  1176. + int i, j;
  1177. + u32 mask;
  1178. +
  1179. + memset(&state, 0, sizeof(struct bm_pool_state));
  1180. + ret = bman_query_pools(&state);
  1181. + if (ret) {
  1182. + seq_printf(file, "Error %d\n", ret);
  1183. + return 0;
  1184. + }
  1185. + seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
  1186. + for (i = 0; i < 2; i++) {
  1187. + mask = 0x80000000;
  1188. + for (j = 0; j < 32; j++) {
  1189. + seq_printf(file,
  1190. + " %-2u %-3s %-3s\n",
  1191. + (i*32)+j,
  1192. + (state.as.state.__state[i] & mask) ? "no" : "yes",
  1193. + (state.ds.state.__state[i] & mask) ? "yes" : "no");
  1194. + mask >>= 1;
  1195. + }
  1196. + }
  1197. + return 0;
  1198. +}
  1199. +
  1200. +static int query_bp_state_open(struct inode *inode, struct file *file)
  1201. +{
  1202. + return single_open(file, query_bp_state_show, NULL);
  1203. +}
  1204. +
  1205. +static const struct file_operations query_bp_state_fops = {
  1206. + .owner = THIS_MODULE,
  1207. + .open = query_bp_state_open,
  1208. + .read = seq_read,
  1209. + .release = single_release,
  1210. +};
  1211. +
  1212. +static int __init bman_debugfs_module_init(void)
  1213. +{
  1214. + int ret = 0;
  1215. + struct dentry *d;
  1216. +
  1217. + dfs_root = debugfs_create_dir("bman", NULL);
  1218. +
  1219. + if (dfs_root == NULL) {
  1220. + ret = -ENOMEM;
  1221. + pr_err("Cannot create bman debugfs dir\n");
  1222. + goto _return;
  1223. + }
  1224. + d = debugfs_create_file("query_bp_state",
  1225. + S_IRUGO,
  1226. + dfs_root,
  1227. + NULL,
  1228. + &query_bp_state_fops);
  1229. + if (d == NULL) {
  1230. + ret = -ENOMEM;
  1231. + pr_err("Cannot create query_bp_state\n");
  1232. + goto _return;
  1233. + }
  1234. + return 0;
  1235. +
  1236. +_return:
  1237. + debugfs_remove_recursive(dfs_root);
  1238. + return ret;
  1239. +}
  1240. +
  1241. +static void __exit bman_debugfs_module_exit(void)
  1242. +{
  1243. + debugfs_remove_recursive(dfs_root);
  1244. +}
  1245. +
  1246. +
  1247. +module_init(bman_debugfs_module_init);
  1248. +module_exit(bman_debugfs_module_exit);
  1249. +MODULE_LICENSE("Dual BSD/GPL");
  1250. --- /dev/null
  1251. +++ b/drivers/staging/fsl_qbman/bman_driver.c
  1252. @@ -0,0 +1,574 @@
  1253. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  1254. + *
  1255. + * Redistribution and use in source and binary forms, with or without
  1256. + * modification, are permitted provided that the following conditions are met:
  1257. + * * Redistributions of source code must retain the above copyright
  1258. + * notice, this list of conditions and the following disclaimer.
  1259. + * * Redistributions in binary form must reproduce the above copyright
  1260. + * notice, this list of conditions and the following disclaimer in the
  1261. + * documentation and/or other materials provided with the distribution.
  1262. + * * Neither the name of Freescale Semiconductor nor the
  1263. + * names of its contributors may be used to endorse or promote products
  1264. + * derived from this software without specific prior written permission.
  1265. + *
  1266. + *
  1267. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1268. + * GNU General Public License ("GPL") as published by the Free Software
  1269. + * Foundation, either version 2 of that License or (at your option) any
  1270. + * later version.
  1271. + *
  1272. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  1273. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1274. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1275. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  1276. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1277. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1278. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1279. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1280. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1281. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1282. + */
  1283. +#include "bman_low.h"
  1284. +#ifdef CONFIG_HOTPLUG_CPU
  1285. +#include <linux/cpu.h>
  1286. +#endif
  1287. +/*
  1288. + * Global variables of the max portal/pool number this bman version supported
  1289. + */
  1290. +u16 bman_ip_rev;
  1291. +EXPORT_SYMBOL(bman_ip_rev);
  1292. +u16 bman_pool_max;
  1293. +EXPORT_SYMBOL(bman_pool_max);
  1294. +static u16 bman_portal_max;
  1295. +
  1296. +/* After initialising cpus that own shared portal configs, we cache the
  1297. + * resulting portals (ie. not just the configs) in this array. Then we
  1298. + * initialise slave cpus that don't have their own portals, redirecting them to
  1299. + * portals from this cache in a round-robin assignment. */
  1300. +static struct bman_portal *shared_portals[NR_CPUS];
  1301. +static int num_shared_portals;
  1302. +static int shared_portals_idx;
  1303. +static LIST_HEAD(unused_pcfgs);
  1304. +static DEFINE_SPINLOCK(unused_pcfgs_lock);
  1305. +static void *affine_bportals[NR_CPUS];
  1306. +
  1307. +static int __init fsl_bpool_init(struct device_node *node)
  1308. +{
  1309. + int ret;
  1310. + u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret);
  1311. + if (!bpid || (ret != 4)) {
  1312. + pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name);
  1313. + return -ENODEV;
  1314. + }
  1315. + thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret);
  1316. + if (thresh) {
  1317. + if (ret != 16) {
  1318. + pr_err("Invalid %s property '%s'\n",
  1319. + node->full_name, "fsl,bpool-thresholds");
  1320. + return -ENODEV;
  1321. + }
  1322. + }
  1323. + if (thresh) {
  1324. +#ifdef CONFIG_FSL_BMAN_CONFIG
  1325. + ret = bm_pool_set(be32_to_cpu(*bpid), thresh);
  1326. + if (ret)
  1327. + pr_err("No CCSR node for %s property '%s'\n",
  1328. + node->full_name, "fsl,bpool-thresholds");
  1329. + return ret;
  1330. +#else
  1331. + pr_err("Ignoring %s property '%s', no CCSR support\n",
  1332. + node->full_name, "fsl,bpool-thresholds");
  1333. +#endif
  1334. + }
  1335. + return 0;
  1336. +}
  1337. +
  1338. +static int __init fsl_bpid_range_init(struct device_node *node)
  1339. +{
  1340. + int ret;
  1341. + u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret);
  1342. + if (!range) {
  1343. + pr_err("No 'fsl,bpid-range' property in node %s\n",
  1344. + node->full_name);
  1345. + return -EINVAL;
  1346. + }
  1347. + if (ret != 8) {
  1348. + pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n",
  1349. + node->full_name);
  1350. + return -EINVAL;
  1351. + }
  1352. + bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  1353. + pr_info("Bman: BPID allocator includes range %d:%d\n",
  1354. + be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  1355. + return 0;
  1356. +}
  1357. +
  1358. +static struct bm_portal_config * __init parse_pcfg(struct device_node *node)
  1359. +{
  1360. + struct bm_portal_config *pcfg;
  1361. + const u32 *index;
  1362. + int irq, ret;
  1363. + resource_size_t len;
  1364. +
  1365. + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
  1366. + if (!pcfg) {
  1367. + pr_err("can't allocate portal config");
  1368. + return NULL;
  1369. + }
  1370. +
  1371. + if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
  1372. + of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
  1373. + bman_ip_rev = BMAN_REV10;
  1374. + bman_pool_max = 64;
  1375. + bman_portal_max = 10;
  1376. + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
  1377. + of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
  1378. + bman_ip_rev = BMAN_REV20;
  1379. + bman_pool_max = 8;
  1380. + bman_portal_max = 3;
  1381. + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) {
  1382. + bman_ip_rev = BMAN_REV21;
  1383. + bman_pool_max = 64;
  1384. + bman_portal_max = 50;
  1385. + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) {
  1386. + bman_ip_rev = BMAN_REV21;
  1387. + bman_pool_max = 64;
  1388. + bman_portal_max = 25;
  1389. + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) {
  1390. + bman_ip_rev = BMAN_REV21;
  1391. + bman_pool_max = 64;
  1392. + bman_portal_max = 18;
  1393. + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
  1394. + bman_ip_rev = BMAN_REV21;
  1395. + bman_pool_max = 64;
  1396. + bman_portal_max = 10;
  1397. + } else {
  1398. + pr_warn("unknown BMan version in portal node,"
  1399. + "default to rev1.0\n");
  1400. + bman_ip_rev = BMAN_REV10;
  1401. + bman_pool_max = 64;
  1402. + bman_portal_max = 10;
  1403. + }
  1404. +
  1405. + ret = of_address_to_resource(node, DPA_PORTAL_CE,
  1406. + &pcfg->addr_phys[DPA_PORTAL_CE]);
  1407. + if (ret) {
  1408. + pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
  1409. + goto err;
  1410. + }
  1411. + ret = of_address_to_resource(node, DPA_PORTAL_CI,
  1412. + &pcfg->addr_phys[DPA_PORTAL_CI]);
  1413. + if (ret) {
  1414. + pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
  1415. + goto err;
  1416. + }
  1417. +
  1418. + index = of_get_property(node, "cell-index", &ret);
  1419. + if (!index || (ret != 4)) {
  1420. + pr_err("Can't get %s property '%s'\n", node->full_name,
  1421. + "cell-index");
  1422. + goto err;
  1423. + }
  1424. + if (be32_to_cpu(*index) >= bman_portal_max) {
  1425. + pr_err("BMan portal cell index %d out of range, max %d\n",
  1426. + be32_to_cpu(*index), bman_portal_max);
  1427. + goto err;
  1428. + }
  1429. +
  1430. + pcfg->public_cfg.cpu = -1;
  1431. +
  1432. + irq = irq_of_parse_and_map(node, 0);
  1433. + if (irq == 0) {
  1434. + pr_err("Can't get %s property 'interrupts'\n", node->full_name);
  1435. + goto err;
  1436. + }
  1437. + pcfg->public_cfg.irq = irq;
  1438. + pcfg->public_cfg.index = be32_to_cpu(*index);
  1439. + bman_depletion_fill(&pcfg->public_cfg.mask);
  1440. +
  1441. + len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
  1442. + if (len != (unsigned long)len)
  1443. + goto err;
  1444. +
  1445. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  1446. + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
  1447. + pcfg->addr_phys[DPA_PORTAL_CE].start,
  1448. + resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
  1449. + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
  1450. + pcfg->addr_phys[DPA_PORTAL_CI].start,
  1451. + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
  1452. +
  1453. +#else
  1454. + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
  1455. + pcfg->addr_phys[DPA_PORTAL_CE].start,
  1456. + (unsigned long)len,
  1457. + 0);
  1458. + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
  1459. + pcfg->addr_phys[DPA_PORTAL_CI].start,
  1460. + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
  1461. + _PAGE_GUARDED | _PAGE_NO_CACHE);
  1462. +#endif
  1463. + /* disable bp depletion */
  1464. + __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0));
  1465. + __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1));
  1466. + return pcfg;
  1467. +err:
  1468. + kfree(pcfg);
  1469. + return NULL;
  1470. +}
  1471. +
  1472. +static struct bm_portal_config *get_pcfg(struct list_head *list)
  1473. +{
  1474. + struct bm_portal_config *pcfg;
  1475. + if (list_empty(list))
  1476. + return NULL;
  1477. + pcfg = list_entry(list->prev, struct bm_portal_config, list);
  1478. + list_del(&pcfg->list);
  1479. + return pcfg;
  1480. +}
  1481. +
  1482. +static struct bm_portal_config *get_pcfg_idx(struct list_head *list,
  1483. + uint32_t idx)
  1484. +{
  1485. + struct bm_portal_config *pcfg;
  1486. + if (list_empty(list))
  1487. + return NULL;
  1488. + list_for_each_entry(pcfg, list, list) {
  1489. + if (pcfg->public_cfg.index == idx) {
  1490. + list_del(&pcfg->list);
  1491. + return pcfg;
  1492. + }
  1493. + }
  1494. + return NULL;
  1495. +}
  1496. +
  1497. +struct bm_portal_config *bm_get_unused_portal(void)
  1498. +{
  1499. + return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
  1500. +}
  1501. +
  1502. +struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx)
  1503. +{
  1504. + struct bm_portal_config *ret;
  1505. + spin_lock(&unused_pcfgs_lock);
  1506. + if (idx == QBMAN_ANY_PORTAL_IDX)
  1507. + ret = get_pcfg(&unused_pcfgs);
  1508. + else
  1509. + ret = get_pcfg_idx(&unused_pcfgs, idx);
  1510. + spin_unlock(&unused_pcfgs_lock);
  1511. + return ret;
  1512. +}
  1513. +
  1514. +void bm_put_unused_portal(struct bm_portal_config *pcfg)
  1515. +{
  1516. + spin_lock(&unused_pcfgs_lock);
  1517. + list_add(&pcfg->list, &unused_pcfgs);
  1518. + spin_unlock(&unused_pcfgs_lock);
  1519. +}
  1520. +
  1521. +static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
  1522. +{
  1523. + struct bman_portal *p;
  1524. + p = bman_create_affine_portal(pcfg);
  1525. + if (p) {
  1526. +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
  1527. + bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
  1528. +#endif
  1529. + pr_info("Bman portal %sinitialised, cpu %d\n",
  1530. + pcfg->public_cfg.is_shared ? "(shared) " : "",
  1531. + pcfg->public_cfg.cpu);
  1532. + affine_bportals[pcfg->public_cfg.cpu] = p;
  1533. + } else
  1534. + pr_crit("Bman portal failure on cpu %d\n",
  1535. + pcfg->public_cfg.cpu);
  1536. + return p;
  1537. +}
  1538. +
  1539. +static void init_slave(int cpu)
  1540. +{
  1541. + struct bman_portal *p;
  1542. + p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
  1543. + if (!p)
  1544. + pr_err("Bman slave portal failure on cpu %d\n", cpu);
  1545. + else
  1546. + pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
  1547. + if (shared_portals_idx >= num_shared_portals)
  1548. + shared_portals_idx = 0;
  1549. + affine_bportals[cpu] = p;
  1550. +}
  1551. +
  1552. +/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
  1553. + * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes
  1554. + * and/or ranges of indexes, with each being optionally prefixed by "s" to
  1555. + * explicitly mark it or them for sharing.
  1556. + * Eg;
  1557. + * bportals=s0,1-3,s4
  1558. + * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
  1559. + * portals, and any remaining cpus share the portals that are assigned to cpus 0
  1560. + * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
  1561. + * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
  1562. + * 0's portal.) */
  1563. +static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
  1564. +static struct cpumask want_shared __initdata; /* cpus requested with "s" */
  1565. +
  1566. +static int __init parse_bportals(char *str)
  1567. +{
  1568. + return parse_portals_bootarg(str, &want_shared, &want_unshared,
  1569. + "bportals");
  1570. +}
  1571. +__setup("bportals=", parse_bportals);
  1572. +
  1573. +static void bman_offline_cpu(unsigned int cpu)
  1574. +{
  1575. + struct bman_portal *p;
  1576. + const struct bm_portal_config *pcfg;
  1577. + p = (struct bman_portal *)affine_bportals[cpu];
  1578. + if (p) {
  1579. + pcfg = bman_get_bm_portal_config(p);
  1580. + if (pcfg)
  1581. + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
  1582. + }
  1583. +}
  1584. +
  1585. +#ifdef CONFIG_HOTPLUG_CPU
  1586. +static void bman_online_cpu(unsigned int cpu)
  1587. +{
  1588. + struct bman_portal *p;
  1589. + const struct bm_portal_config *pcfg;
  1590. + p = (struct bman_portal *)affine_bportals[cpu];
  1591. + if (p) {
  1592. + pcfg = bman_get_bm_portal_config(p);
  1593. + if (pcfg)
  1594. + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
  1595. + }
  1596. +}
  1597. +
  1598. +static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
  1599. + unsigned long action, void *hcpu)
  1600. +{
  1601. + unsigned int cpu = (unsigned long)hcpu;
  1602. +
  1603. + switch (action) {
  1604. + case CPU_ONLINE:
  1605. + case CPU_ONLINE_FROZEN:
  1606. + bman_online_cpu(cpu);
  1607. + break;
  1608. + case CPU_DOWN_PREPARE:
  1609. + case CPU_DOWN_PREPARE_FROZEN:
  1610. + bman_offline_cpu(cpu);
  1611. + default:
  1612. + break;
  1613. + }
  1614. + return NOTIFY_OK;
  1615. +}
  1616. +
  1617. +static struct notifier_block bman_hotplug_cpu_notifier = {
  1618. + .notifier_call = bman_hotplug_cpu_callback,
  1619. +};
  1620. +#endif /* CONFIG_HOTPLUG_CPU */
  1621. +
  1622. +/* Initialise the Bman driver. The meat of this function deals with portals. The
  1623. + * following describes the flow of portal-handling, the code "steps" refer to
  1624. + * this description;
  1625. + * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
  1626. + * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
  1627. + * bound).
  1628. + * 2. The "want_shared" and "want_unshared" lists (as filled by the
  1629. + * "bportals=[...]" bootarg) are processed, allocating portals and assigning
  1630. + * them to cpus, placing them in the relevant list and setting ::cpu as
  1631. + * appropriate. If no "bportals" bootarg was present, the defaut is to try to
  1632. + * assign portals to all online cpus at the time of driver initialisation.
  1633. + * Any failure to allocate portals (when parsing the "want" lists or when
  1634. + * using default behaviour) will be silently tolerated (the "fixup" logic in
  1635. + * step 3 will determine what happens in this case).
  1636. + * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
  1637. + * sharing and sharing is required (because not all cpus have been assigned
  1638. + * portals), then one portal will marked for sharing. Conversely if no
  1639. + * sharing is required, any portals marked for sharing will not be shared. It
  1640. + * may be that sharing occurs when it wasn't expected, if portal allocation
  1641. + * failed to honour all the requested assignments (including the default
  1642. + * assignments if no bootarg is present).
  1643. + * 4. Unshared portals are initialised on their respective cpus.
  1644. + * 5. Shared portals are initialised on their respective cpus.
  1645. + * 6. Each remaining cpu is initialised to slave to one of the shared portals,
  1646. + * which are selected in a round-robin fashion.
  1647. + * Any portal configs left unused are available for USDPAA allocation.
  1648. + */
  1649. +__init int bman_init(void)
  1650. +{
  1651. + struct cpumask slave_cpus;
  1652. + struct cpumask unshared_cpus = *cpu_none_mask;
  1653. + struct cpumask shared_cpus = *cpu_none_mask;
  1654. + LIST_HEAD(unshared_pcfgs);
  1655. + LIST_HEAD(shared_pcfgs);
  1656. + struct device_node *dn;
  1657. + struct bm_portal_config *pcfg;
  1658. + struct bman_portal *p;
  1659. + int cpu, ret;
  1660. + struct cpumask offline_cpus;
  1661. +
  1662. + /* Initialise the Bman (CCSR) device */
  1663. + for_each_compatible_node(dn, NULL, "fsl,bman") {
  1664. + if (!bman_init_ccsr(dn))
  1665. + pr_info("Bman err interrupt handler present\n");
  1666. + else
  1667. + pr_err("Bman CCSR setup failed\n");
  1668. + }
  1669. + /* Initialise any declared buffer pools */
  1670. + for_each_compatible_node(dn, NULL, "fsl,bpool") {
  1671. + ret = fsl_bpool_init(dn);
  1672. + if (ret)
  1673. + return ret;
  1674. + }
  1675. + /* Step 1. See comments at the beginning of the file. */
  1676. + for_each_compatible_node(dn, NULL, "fsl,bman-portal") {
  1677. + if (!of_device_is_available(dn))
  1678. + continue;
  1679. + pcfg = parse_pcfg(dn);
  1680. + if (pcfg)
  1681. + list_add_tail(&pcfg->list, &unused_pcfgs);
  1682. + }
  1683. + /* Step 2. */
  1684. + for_each_possible_cpu(cpu) {
  1685. + if (cpumask_test_cpu(cpu, &want_shared)) {
  1686. + pcfg = get_pcfg(&unused_pcfgs);
  1687. + if (!pcfg)
  1688. + break;
  1689. + pcfg->public_cfg.cpu = cpu;
  1690. + list_add_tail(&pcfg->list, &shared_pcfgs);
  1691. + cpumask_set_cpu(cpu, &shared_cpus);
  1692. + }
  1693. + if (cpumask_test_cpu(cpu, &want_unshared)) {
  1694. + if (cpumask_test_cpu(cpu, &shared_cpus))
  1695. + continue;
  1696. + pcfg = get_pcfg(&unused_pcfgs);
  1697. + if (!pcfg)
  1698. + break;
  1699. + pcfg->public_cfg.cpu = cpu;
  1700. + list_add_tail(&pcfg->list, &unshared_pcfgs);
  1701. + cpumask_set_cpu(cpu, &unshared_cpus);
  1702. + }
  1703. + }
  1704. + if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
  1705. + /* Default, give an unshared portal to each online cpu */
  1706. + for_each_online_cpu(cpu) {
  1707. + pcfg = get_pcfg(&unused_pcfgs);
  1708. + if (!pcfg)
  1709. + break;
  1710. + pcfg->public_cfg.cpu = cpu;
  1711. + list_add_tail(&pcfg->list, &unshared_pcfgs);
  1712. + cpumask_set_cpu(cpu, &unshared_cpus);
  1713. + }
  1714. + }
  1715. + /* Step 3. */
  1716. + cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
  1717. + cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
  1718. + if (cpumask_empty(&slave_cpus)) {
  1719. + /* No sharing required */
  1720. + if (!list_empty(&shared_pcfgs)) {
  1721. + /* Migrate "shared" to "unshared" */
  1722. + cpumask_or(&unshared_cpus, &unshared_cpus,
  1723. + &shared_cpus);
  1724. + cpumask_clear(&shared_cpus);
  1725. + list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
  1726. + INIT_LIST_HEAD(&shared_pcfgs);
  1727. + }
  1728. + } else {
  1729. + /* Sharing required */
  1730. + if (list_empty(&shared_pcfgs)) {
  1731. + /* Migrate one "unshared" to "shared" */
  1732. + pcfg = get_pcfg(&unshared_pcfgs);
  1733. + if (!pcfg) {
  1734. + pr_crit("No BMan portals available!\n");
  1735. + return 0;
  1736. + }
  1737. + cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
  1738. + cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
  1739. + list_add_tail(&pcfg->list, &shared_pcfgs);
  1740. + }
  1741. + }
  1742. + /* Step 4. */
  1743. + list_for_each_entry(pcfg, &unshared_pcfgs, list) {
  1744. + pcfg->public_cfg.is_shared = 0;
  1745. + p = init_pcfg(pcfg);
  1746. + if (!p) {
  1747. + pr_crit("Unable to initialize bman portal\n");
  1748. + return 0;
  1749. + }
  1750. + }
  1751. + /* Step 5. */
  1752. + list_for_each_entry(pcfg, &shared_pcfgs, list) {
  1753. + pcfg->public_cfg.is_shared = 1;
  1754. + p = init_pcfg(pcfg);
  1755. + if (p)
  1756. + shared_portals[num_shared_portals++] = p;
  1757. + }
  1758. + /* Step 6. */
  1759. + if (!cpumask_empty(&slave_cpus))
  1760. + for_each_cpu(cpu, &slave_cpus)
  1761. + init_slave(cpu);
  1762. + pr_info("Bman portals initialised\n");
  1763. + cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
  1764. + for_each_cpu(cpu, &offline_cpus)
  1765. + bman_offline_cpu(cpu);
  1766. +#ifdef CONFIG_HOTPLUG_CPU
  1767. + register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
  1768. +#endif
  1769. + return 0;
  1770. +}
  1771. +
  1772. +__init int bman_resource_init(void)
  1773. +{
  1774. + struct device_node *dn;
  1775. + int ret;
  1776. +
  1777. + /* Initialise BPID allocation ranges */
  1778. + for_each_compatible_node(dn, NULL, "fsl,bpid-range") {
  1779. + ret = fsl_bpid_range_init(dn);
  1780. + if (ret)
  1781. + return ret;
  1782. + }
  1783. + return 0;
  1784. +}
  1785. +
  1786. +#ifdef CONFIG_SUSPEND
  1787. +void suspend_unused_bportal(void)
  1788. +{
  1789. + struct bm_portal_config *pcfg;
  1790. +
  1791. + if (list_empty(&unused_pcfgs))
  1792. + return;
  1793. +
  1794. + list_for_each_entry(pcfg, &unused_pcfgs, list) {
  1795. +#ifdef CONFIG_PM_DEBUG
  1796. + pr_info("Need to save bportal %d\n", pcfg->public_cfg.index);
  1797. +#endif
  1798. + /* save isdr, disable all via isdr, clear isr */
  1799. + pcfg->saved_isdr =
  1800. + __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
  1801. + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
  1802. + 0xe08);
  1803. + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
  1804. + 0xe00);
  1805. + }
  1806. + return;
  1807. +}
  1808. +
  1809. +void resume_unused_bportal(void)
  1810. +{
  1811. + struct bm_portal_config *pcfg;
  1812. +
  1813. + if (list_empty(&unused_pcfgs))
  1814. + return;
  1815. +
  1816. + list_for_each_entry(pcfg, &unused_pcfgs, list) {
  1817. +#ifdef CONFIG_PM_DEBUG
  1818. + pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index);
  1819. +#endif
  1820. + /* restore isdr */
  1821. + __raw_writel(pcfg->saved_isdr,
  1822. + pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
  1823. + }
  1824. + return;
  1825. +}
  1826. +#endif
  1827. --- /dev/null
  1828. +++ b/drivers/staging/fsl_qbman/bman_high.c
  1829. @@ -0,0 +1,1141 @@
  1830. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  1831. + *
  1832. + * Redistribution and use in source and binary forms, with or without
  1833. + * modification, are permitted provided that the following conditions are met:
  1834. + * * Redistributions of source code must retain the above copyright
  1835. + * notice, this list of conditions and the following disclaimer.
  1836. + * * Redistributions in binary form must reproduce the above copyright
  1837. + * notice, this list of conditions and the following disclaimer in the
  1838. + * documentation and/or other materials provided with the distribution.
  1839. + * * Neither the name of Freescale Semiconductor nor the
  1840. + * names of its contributors may be used to endorse or promote products
  1841. + * derived from this software without specific prior written permission.
  1842. + *
  1843. + *
  1844. + * ALTERNATIVELY, this software may be distributed under the terms of the
  1845. + * GNU General Public License ("GPL") as published by the Free Software
  1846. + * Foundation, either version 2 of that License or (at your option) any
  1847. + * later version.
  1848. + *
  1849. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  1850. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  1851. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  1852. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  1853. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  1854. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  1855. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  1856. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  1857. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  1858. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  1859. + */
  1860. +
  1861. +#include "bman_low.h"
  1862. +
  1863. +/* Compilation constants */
  1864. +#define RCR_THRESH 2 /* reread h/w CI when running out of space */
  1865. +#define IRQNAME "BMan portal %d"
  1866. +#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
  1867. +
  1868. +struct bman_portal {
  1869. + struct bm_portal p;
  1870. + /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
  1871. + struct bman_depletion *pools;
  1872. + int thresh_set;
  1873. + unsigned long irq_sources;
  1874. + u32 slowpoll; /* only used when interrupts are off */
  1875. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  1876. + struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
  1877. +#endif
  1878. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  1879. + raw_spinlock_t sharing_lock; /* only used if is_shared */
  1880. + int is_shared;
  1881. + struct bman_portal *sharing_redirect;
  1882. +#endif
  1883. + /* When the cpu-affine portal is activated, this is non-NULL */
  1884. + const struct bm_portal_config *config;
  1885. + /* This is needed for power management */
  1886. + struct platform_device *pdev;
  1887. + /* 64-entry hash-table of pool objects that are tracking depletion
  1888. + * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
  1889. + * we're not fussy about cache-misses and so forth - whereas the above
  1890. + * members should all fit in one cacheline.
  1891. + * BTW, with 64 entries in the hash table and 64 buffer pools to track,
  1892. + * you'll never guess the hash-function ... */
  1893. + struct bman_pool *cb[64];
  1894. + char irqname[MAX_IRQNAME];
  1895. + /* Track if the portal was alloced by the driver */
  1896. + u8 alloced;
  1897. + /* power management data */
  1898. + u32 save_isdr;
  1899. +};
  1900. +
  1901. +/* For an explanation of the locking, redirection, or affine-portal logic,
  1902. + * please consult the Qman driver for details. This is the same, only simpler
  1903. + * (no fiddly Qman-specific bits.) */
  1904. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  1905. +#define PORTAL_IRQ_LOCK(p, irqflags) \
  1906. + do { \
  1907. + if ((p)->is_shared) \
  1908. + raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
  1909. + else \
  1910. + local_irq_save(irqflags); \
  1911. + } while (0)
  1912. +#define PORTAL_IRQ_UNLOCK(p, irqflags) \
  1913. + do { \
  1914. + if ((p)->is_shared) \
  1915. + raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
  1916. + irqflags); \
  1917. + else \
  1918. + local_irq_restore(irqflags); \
  1919. + } while (0)
  1920. +#else
  1921. +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
  1922. +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
  1923. +#endif
  1924. +
  1925. +static cpumask_t affine_mask;
  1926. +static DEFINE_SPINLOCK(affine_mask_lock);
  1927. +static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
  1928. +static inline struct bman_portal *get_raw_affine_portal(void)
  1929. +{
  1930. + return &get_cpu_var(bman_affine_portal);
  1931. +}
  1932. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  1933. +static inline struct bman_portal *get_affine_portal(void)
  1934. +{
  1935. + struct bman_portal *p = get_raw_affine_portal();
  1936. + if (p->sharing_redirect)
  1937. + return p->sharing_redirect;
  1938. + return p;
  1939. +}
  1940. +#else
  1941. +#define get_affine_portal() get_raw_affine_portal()
  1942. +#endif
  1943. +static inline void put_affine_portal(void)
  1944. +{
  1945. + put_cpu_var(bman_affine_portal);
  1946. +}
  1947. +static inline struct bman_portal *get_poll_portal(void)
  1948. +{
  1949. + return &get_cpu_var(bman_affine_portal);
  1950. +}
  1951. +#define put_poll_portal()
  1952. +
  1953. +/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
  1954. + * more than one such object per Bman buffer pool, eg. if different users of the
  1955. + * pool are operating via different portals. */
  1956. +struct bman_pool {
  1957. + struct bman_pool_params params;
  1958. + /* Used for hash-table admin when using depletion notifications. */
  1959. + struct bman_portal *portal;
  1960. + struct bman_pool *next;
  1961. + /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
  1962. + struct bm_buffer *sp;
  1963. + unsigned int sp_fill;
  1964. +#ifdef CONFIG_FSL_DPA_CHECKING
  1965. + atomic_t in_use;
  1966. +#endif
  1967. +};
  1968. +
  1969. +/* (De)Registration of depletion notification callbacks */
  1970. +static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
  1971. +{
  1972. + __maybe_unused unsigned long irqflags;
  1973. + pool->portal = portal;
  1974. + PORTAL_IRQ_LOCK(portal, irqflags);
  1975. + pool->next = portal->cb[pool->params.bpid];
  1976. + portal->cb[pool->params.bpid] = pool;
  1977. + if (!pool->next)
  1978. + /* First object for that bpid on this portal, enable the BSCN
  1979. + * mask bit. */
  1980. + bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
  1981. + PORTAL_IRQ_UNLOCK(portal, irqflags);
  1982. +}
  1983. +static void depletion_unlink(struct bman_pool *pool)
  1984. +{
  1985. + struct bman_pool *it, *last = NULL;
  1986. + struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
  1987. + __maybe_unused unsigned long irqflags;
  1988. + PORTAL_IRQ_LOCK(pool->portal, irqflags);
  1989. + it = *base; /* <-- gotcha, don't do this prior to the irq_save */
  1990. + while (it != pool) {
  1991. + last = it;
  1992. + it = it->next;
  1993. + }
  1994. + if (!last)
  1995. + *base = pool->next;
  1996. + else
  1997. + last->next = pool->next;
  1998. + if (!last && !pool->next) {
  1999. + /* Last object for that bpid on this portal, disable the BSCN
  2000. + * mask bit. */
  2001. + bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
  2002. + /* And "forget" that we last saw this pool as depleted */
  2003. + bman_depletion_unset(&pool->portal->pools[1],
  2004. + pool->params.bpid);
  2005. + }
  2006. + PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
  2007. +}
  2008. +
  2009. +/* In the case that the application's core loop calls qman_poll() and
  2010. + * bman_poll(), we ought to balance how often we incur the overheads of the
  2011. + * slow-path poll. We'll use two decrementer sources. The idle decrementer
  2012. + * constant is used when the last slow-poll detected no work to do, and the busy
  2013. + * decrementer constant when the last slow-poll had work to do. */
  2014. +#define SLOW_POLL_IDLE 1000
  2015. +#define SLOW_POLL_BUSY 10
  2016. +static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
  2017. +
  2018. +/* Portal interrupt handler */
  2019. +static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
  2020. +{
  2021. + struct bman_portal *p = ptr;
  2022. + u32 clear = p->irq_sources;
  2023. + u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
  2024. + clear |= __poll_portal_slow(p, is);
  2025. + bm_isr_status_clear(&p->p, clear);
  2026. + return IRQ_HANDLED;
  2027. +}
  2028. +
  2029. +#ifdef CONFIG_SUSPEND
  2030. +static int _bman_portal_suspend_noirq(struct device *dev)
  2031. +{
  2032. + struct bman_portal *p = (struct bman_portal *)dev->platform_data;
  2033. +#ifdef CONFIG_PM_DEBUG
  2034. + struct platform_device *pdev = to_platform_device(dev);
  2035. +#endif
  2036. + p->save_isdr = bm_isr_disable_read(&p->p);
  2037. + bm_isr_disable_write(&p->p, 0xffffffff);
  2038. + bm_isr_status_clear(&p->p, 0xffffffff);
  2039. +#ifdef CONFIG_PM_DEBUG
  2040. + pr_info("Suspend for %s\n", pdev->name);
  2041. +#endif
  2042. + return 0;
  2043. +}
  2044. +
  2045. +static int _bman_portal_resume_noirq(struct device *dev)
  2046. +{
  2047. + struct bman_portal *p = (struct bman_portal *)dev->platform_data;
  2048. +
  2049. + /* restore isdr */
  2050. + bm_isr_disable_write(&p->p, p->save_isdr);
  2051. + return 0;
  2052. +}
  2053. +#else
  2054. +#define _bman_portal_suspend_noirq NULL
  2055. +#define _bman_portal_resume_noirq NULL
  2056. +#endif
  2057. +
  2058. +struct dev_pm_domain bman_portal_device_pm_domain = {
  2059. + .ops = {
  2060. + USE_PLATFORM_PM_SLEEP_OPS
  2061. + .suspend_noirq = _bman_portal_suspend_noirq,
  2062. + .resume_noirq = _bman_portal_resume_noirq,
  2063. + }
  2064. +};
  2065. +
  2066. +struct bman_portal *bman_create_portal(
  2067. + struct bman_portal *portal,
  2068. + const struct bm_portal_config *config)
  2069. +{
  2070. + struct bm_portal *__p;
  2071. + const struct bman_depletion *pools = &config->public_cfg.mask;
  2072. + int ret;
  2073. + u8 bpid = 0;
  2074. + char buf[16];
  2075. +
  2076. + if (!portal) {
  2077. + portal = kmalloc(sizeof(*portal), GFP_KERNEL);
  2078. + if (!portal)
  2079. + return portal;
  2080. + portal->alloced = 1;
  2081. + } else
  2082. + portal->alloced = 0;
  2083. +
  2084. + __p = &portal->p;
  2085. +
  2086. + /* prep the low-level portal struct with the mapped addresses from the
  2087. + * config, everything that follows depends on it and "config" is more
  2088. + * for (de)reference... */
  2089. + __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
  2090. + __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
  2091. + if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
  2092. + pr_err("Bman RCR initialisation failed\n");
  2093. + goto fail_rcr;
  2094. + }
  2095. + if (bm_mc_init(__p)) {
  2096. + pr_err("Bman MC initialisation failed\n");
  2097. + goto fail_mc;
  2098. + }
  2099. + if (bm_isr_init(__p)) {
  2100. + pr_err("Bman ISR initialisation failed\n");
  2101. + goto fail_isr;
  2102. + }
  2103. + portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
  2104. + if (!portal->pools)
  2105. + goto fail_pools;
  2106. + portal->pools[0] = *pools;
  2107. + bman_depletion_init(portal->pools + 1);
  2108. + while (bpid < bman_pool_max) {
  2109. + /* Default to all BPIDs disabled, we enable as required at
  2110. + * run-time. */
  2111. + bm_isr_bscn_mask(__p, bpid, 0);
  2112. + bpid++;
  2113. + }
  2114. + portal->slowpoll = 0;
  2115. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  2116. + portal->rcri_owned = NULL;
  2117. +#endif
  2118. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2119. + raw_spin_lock_init(&portal->sharing_lock);
  2120. + portal->is_shared = config->public_cfg.is_shared;
  2121. + portal->sharing_redirect = NULL;
  2122. +#endif
  2123. + sprintf(buf, "bportal-%u", config->public_cfg.index);
  2124. + portal->pdev = platform_device_alloc(buf, -1);
  2125. + if (!portal->pdev)
  2126. + goto fail_devalloc;
  2127. + portal->pdev->dev.pm_domain = &bman_portal_device_pm_domain;
  2128. + portal->pdev->dev.platform_data = portal;
  2129. + ret = platform_device_add(portal->pdev);
  2130. + if (ret)
  2131. + goto fail_devadd;
  2132. + memset(&portal->cb, 0, sizeof(portal->cb));
  2133. + /* Write-to-clear any stale interrupt status bits */
  2134. + bm_isr_disable_write(__p, 0xffffffff);
  2135. + portal->irq_sources = 0;
  2136. + bm_isr_enable_write(__p, portal->irq_sources);
  2137. + bm_isr_status_clear(__p, 0xffffffff);
  2138. + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
  2139. + if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
  2140. + portal)) {
  2141. + pr_err("request_irq() failed\n");
  2142. + goto fail_irq;
  2143. + }
  2144. + if ((config->public_cfg.cpu != -1) &&
  2145. + irq_can_set_affinity(config->public_cfg.irq) &&
  2146. + irq_set_affinity(config->public_cfg.irq,
  2147. + cpumask_of(config->public_cfg.cpu))) {
  2148. + pr_err("irq_set_affinity() failed %s\n", portal->irqname);
  2149. + goto fail_affinity;
  2150. + }
  2151. +
  2152. + /* Need RCR to be empty before continuing */
  2153. + ret = bm_rcr_get_fill(__p);
  2154. + if (ret) {
  2155. + pr_err("Bman RCR unclean\n");
  2156. + goto fail_rcr_empty;
  2157. + }
  2158. + /* Success */
  2159. + portal->config = config;
  2160. +
  2161. + bm_isr_disable_write(__p, 0);
  2162. + bm_isr_uninhibit(__p);
  2163. + return portal;
  2164. +fail_rcr_empty:
  2165. +fail_affinity:
  2166. + free_irq(config->public_cfg.irq, portal);
  2167. +fail_irq:
  2168. + platform_device_del(portal->pdev);
  2169. +fail_devadd:
  2170. + platform_device_put(portal->pdev);
  2171. +fail_devalloc:
  2172. + kfree(portal->pools);
  2173. +fail_pools:
  2174. + bm_isr_finish(__p);
  2175. +fail_isr:
  2176. + bm_mc_finish(__p);
  2177. +fail_mc:
  2178. + bm_rcr_finish(__p);
  2179. +fail_rcr:
  2180. + if (portal->alloced)
  2181. + kfree(portal);
  2182. + return NULL;
  2183. +}
  2184. +
  2185. +struct bman_portal *bman_create_affine_portal(
  2186. + const struct bm_portal_config *config)
  2187. +{
  2188. + struct bman_portal *portal;
  2189. +
  2190. + portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
  2191. + portal = bman_create_portal(portal, config);
  2192. + if (portal) {
  2193. + spin_lock(&affine_mask_lock);
  2194. + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
  2195. + spin_unlock(&affine_mask_lock);
  2196. + }
  2197. + return portal;
  2198. +}
  2199. +
  2200. +
  2201. +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
  2202. + int cpu)
  2203. +{
  2204. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2205. + struct bman_portal *p;
  2206. + p = &per_cpu(bman_affine_portal, cpu);
  2207. + BUG_ON(p->config);
  2208. + BUG_ON(p->is_shared);
  2209. + BUG_ON(!redirect->config->public_cfg.is_shared);
  2210. + p->irq_sources = 0;
  2211. + p->sharing_redirect = redirect;
  2212. + return p;
  2213. +#else
  2214. + BUG();
  2215. + return NULL;
  2216. +#endif
  2217. +}
  2218. +
  2219. +void bman_destroy_portal(struct bman_portal *bm)
  2220. +{
  2221. + const struct bm_portal_config *pcfg;
  2222. + pcfg = bm->config;
  2223. + bm_rcr_cce_update(&bm->p);
  2224. + bm_rcr_cce_update(&bm->p);
  2225. +
  2226. + free_irq(pcfg->public_cfg.irq, bm);
  2227. +
  2228. + kfree(bm->pools);
  2229. + bm_isr_finish(&bm->p);
  2230. + bm_mc_finish(&bm->p);
  2231. + bm_rcr_finish(&bm->p);
  2232. + bm->config = NULL;
  2233. + if (bm->alloced)
  2234. + kfree(bm);
  2235. +}
  2236. +
  2237. +const struct bm_portal_config *bman_destroy_affine_portal(void)
  2238. +{
  2239. + struct bman_portal *bm = get_raw_affine_portal();
  2240. + const struct bm_portal_config *pcfg;
  2241. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2242. + if (bm->sharing_redirect) {
  2243. + bm->sharing_redirect = NULL;
  2244. + put_affine_portal();
  2245. + return NULL;
  2246. + }
  2247. + bm->is_shared = 0;
  2248. +#endif
  2249. + pcfg = bm->config;
  2250. + bman_destroy_portal(bm);
  2251. + spin_lock(&affine_mask_lock);
  2252. + cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
  2253. + spin_unlock(&affine_mask_lock);
  2254. + put_affine_portal();
  2255. + return pcfg;
  2256. +}
  2257. +
  2258. +/* When release logic waits on available RCR space, we need a global waitqueue
  2259. + * in the case of "affine" use (as the waits wake on different cpus which means
  2260. + * different portals - so we can't wait on any per-portal waitqueue). */
  2261. +static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
  2262. +
  2263. +static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
  2264. +{
  2265. + struct bman_depletion tmp;
  2266. + u32 ret = is;
  2267. +
  2268. + /* There is a gotcha to be aware of. If we do the query before clearing
  2269. + * the status register, we may miss state changes that occur between the
  2270. + * two. If we write to clear the status register before the query, the
  2271. + * cache-enabled query command may overtake the status register write
  2272. + * unless we use a heavyweight sync (which we don't want). Instead, we
  2273. + * write-to-clear the status register then *read it back* before doing
  2274. + * the query, hence the odd while loop with the 'is' accumulation. */
  2275. + if (is & BM_PIRQ_BSCN) {
  2276. + struct bm_mc_result *mcr;
  2277. + __maybe_unused unsigned long irqflags;
  2278. + unsigned int i, j;
  2279. + u32 __is;
  2280. + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
  2281. + while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
  2282. + is |= __is;
  2283. + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
  2284. + }
  2285. + is &= ~BM_PIRQ_BSCN;
  2286. + PORTAL_IRQ_LOCK(p, irqflags);
  2287. + bm_mc_start(&p->p);
  2288. + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
  2289. + while (!(mcr = bm_mc_result(&p->p)))
  2290. + cpu_relax();
  2291. + tmp = mcr->query.ds.state;
  2292. + tmp.__state[0] = be32_to_cpu(tmp.__state[0]);
  2293. + tmp.__state[1] = be32_to_cpu(tmp.__state[1]);
  2294. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2295. + for (i = 0; i < 2; i++) {
  2296. + int idx = i * 32;
  2297. + /* tmp is a mask of currently-depleted pools.
  2298. + * pools[0] is mask of those we care about.
  2299. + * pools[1] is our previous view (we only want to
  2300. + * be told about changes). */
  2301. + tmp.__state[i] &= p->pools[0].__state[i];
  2302. + if (tmp.__state[i] == p->pools[1].__state[i])
  2303. + /* fast-path, nothing to see, move along */
  2304. + continue;
  2305. + for (j = 0; j <= 31; j++, idx++) {
  2306. + struct bman_pool *pool = p->cb[idx];
  2307. + int b4 = bman_depletion_get(&p->pools[1], idx);
  2308. + int af = bman_depletion_get(&tmp, idx);
  2309. + if (b4 == af)
  2310. + continue;
  2311. + while (pool) {
  2312. + pool->params.cb(p, pool,
  2313. + pool->params.cb_ctx, af);
  2314. + pool = pool->next;
  2315. + }
  2316. + }
  2317. + }
  2318. + p->pools[1] = tmp;
  2319. + }
  2320. +
  2321. + if (is & BM_PIRQ_RCRI) {
  2322. + __maybe_unused unsigned long irqflags;
  2323. + PORTAL_IRQ_LOCK(p, irqflags);
  2324. + bm_rcr_cce_update(&p->p);
  2325. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  2326. + /* If waiting for sync, we only cancel the interrupt threshold
  2327. + * when the ring utilisation hits zero. */
  2328. + if (p->rcri_owned) {
  2329. + if (!bm_rcr_get_fill(&p->p)) {
  2330. + p->rcri_owned = NULL;
  2331. + bm_rcr_set_ithresh(&p->p, 0);
  2332. + }
  2333. + } else
  2334. +#endif
  2335. + bm_rcr_set_ithresh(&p->p, 0);
  2336. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2337. + wake_up(&affine_queue);
  2338. + bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
  2339. + is &= ~BM_PIRQ_RCRI;
  2340. + }
  2341. +
  2342. + /* There should be no status register bits left undefined */
  2343. + DPA_ASSERT(!is);
  2344. + return ret;
  2345. +}
  2346. +
  2347. +const struct bman_portal_config *bman_get_portal_config(void)
  2348. +{
  2349. + struct bman_portal *p = get_affine_portal();
  2350. + const struct bman_portal_config *ret = &p->config->public_cfg;
  2351. + put_affine_portal();
  2352. + return ret;
  2353. +}
  2354. +EXPORT_SYMBOL(bman_get_portal_config);
  2355. +
  2356. +u32 bman_irqsource_get(void)
  2357. +{
  2358. + struct bman_portal *p = get_raw_affine_portal();
  2359. + u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
  2360. + put_affine_portal();
  2361. + return ret;
  2362. +}
  2363. +EXPORT_SYMBOL(bman_irqsource_get);
  2364. +
  2365. +int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
  2366. +{
  2367. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2368. + if (p->sharing_redirect)
  2369. + return -EINVAL;
  2370. + else
  2371. +#endif
  2372. + {
  2373. + __maybe_unused unsigned long irqflags;
  2374. + PORTAL_IRQ_LOCK(p, irqflags);
  2375. + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
  2376. + bm_isr_enable_write(&p->p, p->irq_sources);
  2377. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2378. + }
  2379. + return 0;
  2380. +}
  2381. +EXPORT_SYMBOL(bman_p_irqsource_add);
  2382. +
  2383. +int bman_irqsource_add(__maybe_unused u32 bits)
  2384. +{
  2385. + struct bman_portal *p = get_raw_affine_portal();
  2386. + int ret = 0;
  2387. + ret = bman_p_irqsource_add(p, bits);
  2388. + put_affine_portal();
  2389. + return ret;
  2390. +}
  2391. +EXPORT_SYMBOL(bman_irqsource_add);
  2392. +
  2393. +int bman_irqsource_remove(u32 bits)
  2394. +{
  2395. + struct bman_portal *p = get_raw_affine_portal();
  2396. + __maybe_unused unsigned long irqflags;
  2397. + u32 ier;
  2398. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2399. + if (p->sharing_redirect) {
  2400. + put_affine_portal();
  2401. + return -EINVAL;
  2402. + }
  2403. +#endif
  2404. + /* Our interrupt handler only processes+clears status register bits that
  2405. + * are in p->irq_sources. As we're trimming that mask, if one of them
  2406. + * were to assert in the status register just before we remove it from
  2407. + * the enable register, there would be an interrupt-storm when we
  2408. + * release the IRQ lock. So we wait for the enable register update to
  2409. + * take effect in h/w (by reading it back) and then clear all other bits
  2410. + * in the status register. Ie. we clear them from ISR once it's certain
  2411. + * IER won't allow them to reassert. */
  2412. + PORTAL_IRQ_LOCK(p, irqflags);
  2413. + bits &= BM_PIRQ_VISIBLE;
  2414. + clear_bits(bits, &p->irq_sources);
  2415. + bm_isr_enable_write(&p->p, p->irq_sources);
  2416. + ier = bm_isr_enable_read(&p->p);
  2417. + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
  2418. + * data-dependency, ie. to protect against re-ordering. */
  2419. + bm_isr_status_clear(&p->p, ~ier);
  2420. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2421. + put_affine_portal();
  2422. + return 0;
  2423. +}
  2424. +EXPORT_SYMBOL(bman_irqsource_remove);
  2425. +
  2426. +const cpumask_t *bman_affine_cpus(void)
  2427. +{
  2428. + return &affine_mask;
  2429. +}
  2430. +EXPORT_SYMBOL(bman_affine_cpus);
  2431. +
  2432. +u32 bman_poll_slow(void)
  2433. +{
  2434. + struct bman_portal *p = get_poll_portal();
  2435. + u32 ret;
  2436. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2437. + if (unlikely(p->sharing_redirect))
  2438. + ret = (u32)-1;
  2439. + else
  2440. +#endif
  2441. + {
  2442. + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
  2443. + ret = __poll_portal_slow(p, is);
  2444. + bm_isr_status_clear(&p->p, ret);
  2445. + }
  2446. + put_poll_portal();
  2447. + return ret;
  2448. +}
  2449. +EXPORT_SYMBOL(bman_poll_slow);
  2450. +
  2451. +/* Legacy wrapper */
  2452. +void bman_poll(void)
  2453. +{
  2454. + struct bman_portal *p = get_poll_portal();
  2455. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2456. + if (unlikely(p->sharing_redirect))
  2457. + goto done;
  2458. +#endif
  2459. + if (!(p->slowpoll--)) {
  2460. + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
  2461. + u32 active = __poll_portal_slow(p, is);
  2462. + if (active)
  2463. + p->slowpoll = SLOW_POLL_BUSY;
  2464. + else
  2465. + p->slowpoll = SLOW_POLL_IDLE;
  2466. + }
  2467. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  2468. +done:
  2469. +#endif
  2470. + put_poll_portal();
  2471. +}
  2472. +EXPORT_SYMBOL(bman_poll);
  2473. +
  2474. +static const u32 zero_thresholds[4] = {0, 0, 0, 0};
  2475. +
  2476. +struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
  2477. +{
  2478. + struct bman_pool *pool = NULL;
  2479. + u32 bpid;
  2480. +
  2481. + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
  2482. + int ret = bman_alloc_bpid(&bpid);
  2483. + if (ret)
  2484. + return NULL;
  2485. + } else {
  2486. + if (params->bpid >= bman_pool_max)
  2487. + return NULL;
  2488. + bpid = params->bpid;
  2489. + }
  2490. +#ifdef CONFIG_FSL_BMAN_CONFIG
  2491. + if (params->flags & BMAN_POOL_FLAG_THRESH) {
  2492. + int ret = bm_pool_set(bpid, params->thresholds);
  2493. + if (ret)
  2494. + goto err;
  2495. + }
  2496. +#else
  2497. + if (params->flags & BMAN_POOL_FLAG_THRESH)
  2498. + goto err;
  2499. +#endif
  2500. + pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  2501. + if (!pool)
  2502. + goto err;
  2503. + pool->sp = NULL;
  2504. + pool->sp_fill = 0;
  2505. + pool->params = *params;
  2506. +#ifdef CONFIG_FSL_DPA_CHECKING
  2507. + atomic_set(&pool->in_use, 1);
  2508. +#endif
  2509. + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
  2510. + pool->params.bpid = bpid;
  2511. + if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
  2512. + pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
  2513. + GFP_KERNEL);
  2514. + if (!pool->sp)
  2515. + goto err;
  2516. + }
  2517. + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
  2518. + struct bman_portal *p = get_affine_portal();
  2519. + if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
  2520. + pr_err("Depletion events disabled for bpid %d\n", bpid);
  2521. + goto err;
  2522. + }
  2523. + depletion_link(p, pool);
  2524. + put_affine_portal();
  2525. + }
  2526. + return pool;
  2527. +err:
  2528. +#ifdef CONFIG_FSL_BMAN_CONFIG
  2529. + if (params->flags & BMAN_POOL_FLAG_THRESH)
  2530. + bm_pool_set(bpid, zero_thresholds);
  2531. +#endif
  2532. + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
  2533. + bman_release_bpid(bpid);
  2534. + if (pool) {
  2535. + kfree(pool->sp);
  2536. + kfree(pool);
  2537. + }
  2538. + return NULL;
  2539. +}
  2540. +EXPORT_SYMBOL(bman_new_pool);
  2541. +
  2542. +void bman_free_pool(struct bman_pool *pool)
  2543. +{
  2544. +#ifdef CONFIG_FSL_BMAN_CONFIG
  2545. + if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
  2546. + bm_pool_set(pool->params.bpid, zero_thresholds);
  2547. +#endif
  2548. + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
  2549. + depletion_unlink(pool);
  2550. + if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
  2551. + if (pool->sp_fill)
  2552. + pr_err("Stockpile not flushed, has %u in bpid %u.\n",
  2553. + pool->sp_fill, pool->params.bpid);
  2554. + kfree(pool->sp);
  2555. + pool->sp = NULL;
  2556. + pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
  2557. + }
  2558. + if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
  2559. + bman_release_bpid(pool->params.bpid);
  2560. + kfree(pool);
  2561. +}
  2562. +EXPORT_SYMBOL(bman_free_pool);
  2563. +
  2564. +const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
  2565. +{
  2566. + return &pool->params;
  2567. +}
  2568. +EXPORT_SYMBOL(bman_get_params);
  2569. +
  2570. +static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
  2571. +{
  2572. + if (avail)
  2573. + bm_rcr_cce_prefetch(&p->p);
  2574. + else
  2575. + bm_rcr_cce_update(&p->p);
  2576. +}
  2577. +
  2578. +int bman_rcr_is_empty(void)
  2579. +{
  2580. + __maybe_unused unsigned long irqflags;
  2581. + struct bman_portal *p = get_affine_portal();
  2582. + u8 avail;
  2583. +
  2584. + PORTAL_IRQ_LOCK(p, irqflags);
  2585. + update_rcr_ci(p, 0);
  2586. + avail = bm_rcr_get_fill(&p->p);
  2587. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2588. + put_affine_portal();
  2589. + return avail == 0;
  2590. +}
  2591. +EXPORT_SYMBOL(bman_rcr_is_empty);
  2592. +
  2593. +static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
  2594. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  2595. + __maybe_unused struct bman_pool *pool,
  2596. +#endif
  2597. + __maybe_unused unsigned long *irqflags,
  2598. + __maybe_unused u32 flags)
  2599. +{
  2600. + struct bm_rcr_entry *r;
  2601. + u8 avail;
  2602. +
  2603. + *p = get_affine_portal();
  2604. + PORTAL_IRQ_LOCK(*p, (*irqflags));
  2605. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  2606. + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
  2607. + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
  2608. + if ((*p)->rcri_owned) {
  2609. + PORTAL_IRQ_UNLOCK(*p, (*irqflags));
  2610. + put_affine_portal();
  2611. + return NULL;
  2612. + }
  2613. + (*p)->rcri_owned = pool;
  2614. + }
  2615. +#endif
  2616. + avail = bm_rcr_get_avail(&(*p)->p);
  2617. + if (avail < 2)
  2618. + update_rcr_ci(*p, avail);
  2619. + r = bm_rcr_start(&(*p)->p);
  2620. + if (unlikely(!r)) {
  2621. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  2622. + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
  2623. + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
  2624. + (*p)->rcri_owned = NULL;
  2625. +#endif
  2626. + PORTAL_IRQ_UNLOCK(*p, (*irqflags));
  2627. + put_affine_portal();
  2628. + }
  2629. + return r;
  2630. +}
  2631. +
  2632. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  2633. +static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
  2634. + struct bman_pool *pool,
  2635. + __maybe_unused unsigned long *irqflags,
  2636. + u32 flags)
  2637. +{
  2638. + struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
  2639. + if (!rcr)
  2640. + bm_rcr_set_ithresh(&(*p)->p, 1);
  2641. + return rcr;
  2642. +}
  2643. +
  2644. +static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
  2645. + struct bman_pool *pool,
  2646. + __maybe_unused unsigned long *irqflags,
  2647. + u32 flags)
  2648. +{
  2649. + struct bm_rcr_entry *rcr;
  2650. +#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  2651. + pool = NULL;
  2652. +#endif
  2653. + if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
  2654. + /* NB: return NULL if signal occurs before completion. Signal
  2655. + * can occur during return. Caller must check for signal */
  2656. + wait_event_interruptible(affine_queue,
  2657. + (rcr = __wait_rel_start(p, pool, irqflags, flags)));
  2658. + else
  2659. + wait_event(affine_queue,
  2660. + (rcr = __wait_rel_start(p, pool, irqflags, flags)));
  2661. + return rcr;
  2662. +}
  2663. +#endif
  2664. +
  2665. +static inline int __bman_release(struct bman_pool *pool,
  2666. + const struct bm_buffer *bufs, u8 num, u32 flags)
  2667. +{
  2668. + struct bman_portal *p;
  2669. + struct bm_rcr_entry *r;
  2670. + __maybe_unused unsigned long irqflags;
  2671. + u32 i = num - 1;
  2672. +
  2673. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  2674. + if (flags & BMAN_RELEASE_FLAG_WAIT)
  2675. + r = wait_rel_start(&p, pool, &irqflags, flags);
  2676. + else
  2677. + r = try_rel_start(&p, pool, &irqflags, flags);
  2678. +#else
  2679. + r = try_rel_start(&p, &irqflags, flags);
  2680. +#endif
  2681. + if (!r)
  2682. + return -EBUSY;
  2683. + /* We can copy all but the first entry, as this can trigger badness
  2684. + * with the valid-bit. Use the overlay to mask the verb byte. */
  2685. + r->bufs[0].opaque =
  2686. + ((cpu_to_be64((bufs[0].opaque |
  2687. + ((u64)pool->params.bpid<<48))
  2688. + & 0x00ffffffffffffff)));
  2689. + if (i) {
  2690. + for (i = 1; i < num; i++)
  2691. + r->bufs[i].opaque =
  2692. + cpu_to_be64(bufs[i].opaque);
  2693. + }
  2694. +
  2695. + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
  2696. + (num & BM_RCR_VERB_BUFCOUNT_MASK));
  2697. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  2698. + /* if we wish to sync we need to set the threshold after h/w sees the
  2699. + * new ring entry. As we're mixing cache-enabled and cache-inhibited
  2700. + * accesses, this requires a heavy-weight sync. */
  2701. + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
  2702. + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
  2703. + hwsync();
  2704. + bm_rcr_set_ithresh(&p->p, 1);
  2705. + }
  2706. +#endif
  2707. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2708. + put_affine_portal();
  2709. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  2710. + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
  2711. + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
  2712. + if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
  2713. + /* NB: return success even if signal occurs before
  2714. + * condition is true. pvb_commit guarantees success */
  2715. + wait_event_interruptible(affine_queue,
  2716. + (p->rcri_owned != pool));
  2717. + else
  2718. + wait_event(affine_queue, (p->rcri_owned != pool));
  2719. + }
  2720. +#endif
  2721. + return 0;
  2722. +}
  2723. +
  2724. +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
  2725. + u32 flags)
  2726. +{
  2727. + int ret;
  2728. +#ifdef CONFIG_FSL_DPA_CHECKING
  2729. + if (!num || (num > 8))
  2730. + return -EINVAL;
  2731. + if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
  2732. + return -EINVAL;
  2733. +#endif
  2734. + /* Without stockpile, this API is a pass-through to the h/w operation */
  2735. + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
  2736. + return __bman_release(pool, bufs, num, flags);
  2737. +#ifdef CONFIG_FSL_DPA_CHECKING
  2738. + if (!atomic_dec_and_test(&pool->in_use)) {
  2739. + pr_crit("Parallel attempts to enter bman_released() detected.");
  2740. + panic("only one instance of bman_released/acquired allowed");
  2741. + }
  2742. +#endif
  2743. + /* Two movements of buffers are possible, and can occur in either order.
  2744. + * A: moving buffers from the caller to the stockpile.
  2745. + * B: moving buffers from the stockpile to hardware.
  2746. + * Order 1: if there is already enough space in the stockpile for A
  2747. + * then we want to do A first, and only do B if we trigger the
  2748. + * stockpile-high threshold.
  2749. + * Order 2: if there is not enough space in the stockpile for A, then
  2750. + * we want to do B first, then do A if B had succeeded. However in this
  2751. + * case B is dependent on how many buffers the user needs to release,
  2752. + * not the stockpile-high threshold.
  2753. + * Due to the different handling of B between the two cases, putting A
  2754. + * and B in a while() loop would require quite obscure logic, so handle
  2755. + * the different sequences explicitly. */
  2756. + if ((pool->sp_fill + num) <= BMAN_STOCKPILE_SZ) {
  2757. + /* Order 1: do A */
  2758. + copy_words(pool->sp + pool->sp_fill, bufs,
  2759. + sizeof(struct bm_buffer) * num);
  2760. + pool->sp_fill += num;
  2761. + /* do B relative to STOCKPILE_HIGH */
  2762. + while (pool->sp_fill >= BMAN_STOCKPILE_HIGH) {
  2763. + ret = __bman_release(pool,
  2764. + pool->sp + (pool->sp_fill - 8), 8,
  2765. + flags);
  2766. + if (ret >= 0)
  2767. + pool->sp_fill -= 8;
  2768. + }
  2769. + } else {
  2770. + /* Order 2: do B relative to 'num' */
  2771. + do {
  2772. + ret = __bman_release(pool,
  2773. + pool->sp + (pool->sp_fill - 8), 8,
  2774. + flags);
  2775. + if (ret < 0)
  2776. + /* failure */
  2777. + goto release_done;
  2778. + pool->sp_fill -= 8;
  2779. + } while ((pool->sp_fill + num) > BMAN_STOCKPILE_SZ);
  2780. + /* do A */
  2781. + copy_words(pool->sp + pool->sp_fill, bufs,
  2782. + sizeof(struct bm_buffer) * num);
  2783. + pool->sp_fill += num;
  2784. + }
  2785. + /* success */
  2786. + ret = 0;
  2787. +release_done:
  2788. +#ifdef CONFIG_FSL_DPA_CHECKING
  2789. + atomic_inc(&pool->in_use);
  2790. +#endif
  2791. + return ret;
  2792. +}
  2793. +EXPORT_SYMBOL(bman_release);
  2794. +
  2795. +static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
  2796. + u8 num)
  2797. +{
  2798. + struct bman_portal *p = get_affine_portal();
  2799. + struct bm_mc_command *mcc;
  2800. + struct bm_mc_result *mcr;
  2801. + __maybe_unused unsigned long irqflags;
  2802. + int ret, i;
  2803. +
  2804. + PORTAL_IRQ_LOCK(p, irqflags);
  2805. + mcc = bm_mc_start(&p->p);
  2806. + mcc->acquire.bpid = pool->params.bpid;
  2807. + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
  2808. + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
  2809. + while (!(mcr = bm_mc_result(&p->p)))
  2810. + cpu_relax();
  2811. + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
  2812. + if (bufs) {
  2813. + for (i = 0; i < num; i++)
  2814. + bufs[i].opaque =
  2815. + be64_to_cpu(mcr->acquire.bufs[i].opaque);
  2816. + }
  2817. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2818. + put_affine_portal();
  2819. + if (ret != num)
  2820. + ret = -ENOMEM;
  2821. + return ret;
  2822. +}
  2823. +
  2824. +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
  2825. + u32 flags)
  2826. +{
  2827. + int ret;
  2828. +#ifdef CONFIG_FSL_DPA_CHECKING
  2829. + if (!num || (num > 8))
  2830. + return -EINVAL;
  2831. + if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
  2832. + return -EINVAL;
  2833. +#endif
  2834. + /* Without stockpile, this API is a pass-through to the h/w operation */
  2835. + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
  2836. + return __bman_acquire(pool, bufs, num);
  2837. +#ifdef CONFIG_FSL_DPA_CHECKING
  2838. + if (!atomic_dec_and_test(&pool->in_use)) {
  2839. + pr_crit("Parallel attempts to enter bman_acquire() detected.");
  2840. + panic("only one instance of bman_released/acquired allowed");
  2841. + }
  2842. +#endif
  2843. + /* Two movements of buffers are possible, and can occur in either order.
  2844. + * A: moving buffers from stockpile to the caller.
  2845. + * B: moving buffers from hardware to the stockpile.
  2846. + * Order 1: if there are already enough buffers in the stockpile for A
  2847. + * then we want to do A first, and only do B if we trigger the
  2848. + * stockpile-low threshold.
  2849. + * Order 2: if there are not enough buffers in the stockpile for A,
  2850. + * then we want to do B first, then do A if B had succeeded. However in
  2851. + * this case B is dependent on how many buffers the user needs, not the
  2852. + * stockpile-low threshold.
  2853. + * Due to the different handling of B between the two cases, putting A
  2854. + * and B in a while() loop would require quite obscure logic, so handle
  2855. + * the different sequences explicitly. */
  2856. + if (num <= pool->sp_fill) {
  2857. + /* Order 1: do A */
  2858. + copy_words(bufs, pool->sp + (pool->sp_fill - num),
  2859. + sizeof(struct bm_buffer) * num);
  2860. + pool->sp_fill -= num;
  2861. + /* do B relative to STOCKPILE_LOW */
  2862. + while (pool->sp_fill <= BMAN_STOCKPILE_LOW) {
  2863. + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
  2864. + if (ret < 0)
  2865. + ret = __bman_acquire(pool,
  2866. + pool->sp + pool->sp_fill, 1);
  2867. + if (ret < 0)
  2868. + break;
  2869. + pool->sp_fill += ret;
  2870. + }
  2871. + } else {
  2872. + /* Order 2: do B relative to 'num' */
  2873. + do {
  2874. + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
  2875. + if (ret < 0)
  2876. + ret = __bman_acquire(pool,
  2877. + pool->sp + pool->sp_fill, 1);
  2878. + if (ret < 0)
  2879. + /* failure */
  2880. + goto acquire_done;
  2881. + pool->sp_fill += ret;
  2882. + } while (pool->sp_fill < num);
  2883. + /* do A */
  2884. + copy_words(bufs, pool->sp + (pool->sp_fill - num),
  2885. + sizeof(struct bm_buffer) * num);
  2886. + pool->sp_fill -= num;
  2887. + }
  2888. + /* success */
  2889. + ret = num;
  2890. +acquire_done:
  2891. +#ifdef CONFIG_FSL_DPA_CHECKING
  2892. + atomic_inc(&pool->in_use);
  2893. +#endif
  2894. + return ret;
  2895. +}
  2896. +EXPORT_SYMBOL(bman_acquire);
  2897. +
  2898. +int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
  2899. +{
  2900. + u8 num;
  2901. + int ret;
  2902. +
  2903. + while (pool->sp_fill) {
  2904. + num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
  2905. + ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
  2906. + num, flags);
  2907. + if (ret)
  2908. + return ret;
  2909. + pool->sp_fill -= num;
  2910. + }
  2911. + return 0;
  2912. +}
  2913. +EXPORT_SYMBOL(bman_flush_stockpile);
  2914. +
  2915. +int bman_query_pools(struct bm_pool_state *state)
  2916. +{
  2917. + struct bman_portal *p = get_affine_portal();
  2918. + struct bm_mc_result *mcr;
  2919. + __maybe_unused unsigned long irqflags;
  2920. +
  2921. + PORTAL_IRQ_LOCK(p, irqflags);
  2922. + bm_mc_start(&p->p);
  2923. + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
  2924. + while (!(mcr = bm_mc_result(&p->p)))
  2925. + cpu_relax();
  2926. + DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
  2927. + *state = mcr->query;
  2928. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2929. + put_affine_portal();
  2930. + return 0;
  2931. +}
  2932. +EXPORT_SYMBOL(bman_query_pools);
  2933. +
  2934. +#ifdef CONFIG_FSL_BMAN_CONFIG
  2935. +u32 bman_query_free_buffers(struct bman_pool *pool)
  2936. +{
  2937. + return bm_pool_free_buffers(pool->params.bpid);
  2938. +}
  2939. +EXPORT_SYMBOL(bman_query_free_buffers);
  2940. +
  2941. +int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
  2942. +{
  2943. + u32 bpid;
  2944. +
  2945. + bpid = bman_get_params(pool)->bpid;
  2946. +
  2947. + return bm_pool_set(bpid, thresholds);
  2948. +}
  2949. +EXPORT_SYMBOL(bman_update_pool_thresholds);
  2950. +#endif
  2951. +
  2952. +int bman_shutdown_pool(u32 bpid)
  2953. +{
  2954. + struct bman_portal *p = get_affine_portal();
  2955. + __maybe_unused unsigned long irqflags;
  2956. + int ret;
  2957. +
  2958. + PORTAL_IRQ_LOCK(p, irqflags);
  2959. + ret = bm_shutdown_pool(&p->p, bpid);
  2960. + PORTAL_IRQ_UNLOCK(p, irqflags);
  2961. + put_affine_portal();
  2962. + return ret;
  2963. +}
  2964. +EXPORT_SYMBOL(bman_shutdown_pool);
  2965. +
  2966. +const struct bm_portal_config *bman_get_bm_portal_config(
  2967. + struct bman_portal *portal)
  2968. +{
  2969. + return portal->sharing_redirect ? NULL : portal->config;
  2970. +}
  2971. --- /dev/null
  2972. +++ b/drivers/staging/fsl_qbman/bman_low.h
  2973. @@ -0,0 +1,559 @@
  2974. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  2975. + *
  2976. + * Redistribution and use in source and binary forms, with or without
  2977. + * modification, are permitted provided that the following conditions are met:
  2978. + * * Redistributions of source code must retain the above copyright
  2979. + * notice, this list of conditions and the following disclaimer.
  2980. + * * Redistributions in binary form must reproduce the above copyright
  2981. + * notice, this list of conditions and the following disclaimer in the
  2982. + * documentation and/or other materials provided with the distribution.
  2983. + * * Neither the name of Freescale Semiconductor nor the
  2984. + * names of its contributors may be used to endorse or promote products
  2985. + * derived from this software without specific prior written permission.
  2986. + *
  2987. + *
  2988. + * ALTERNATIVELY, this software may be distributed under the terms of the
  2989. + * GNU General Public License ("GPL") as published by the Free Software
  2990. + * Foundation, either version 2 of that License or (at your option) any
  2991. + * later version.
  2992. + *
  2993. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  2994. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  2995. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  2996. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  2997. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  2998. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  2999. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3000. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3001. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3002. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3003. + */
  3004. +
  3005. +#include "bman_private.h"
  3006. +
  3007. +/***************************/
  3008. +/* Portal register assists */
  3009. +/***************************/
  3010. +
  3011. +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
  3012. +
  3013. +/* Cache-inhibited register offsets */
  3014. +#define BM_REG_RCR_PI_CINH 0x0000
  3015. +#define BM_REG_RCR_CI_CINH 0x0004
  3016. +#define BM_REG_RCR_ITR 0x0008
  3017. +#define BM_REG_CFG 0x0100
  3018. +#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
  3019. +#define BM_REG_ISR 0x0e00
  3020. +#define BM_REG_IIR 0x0e0c
  3021. +
  3022. +/* Cache-enabled register offsets */
  3023. +#define BM_CL_CR 0x0000
  3024. +#define BM_CL_RR0 0x0100
  3025. +#define BM_CL_RR1 0x0140
  3026. +#define BM_CL_RCR 0x1000
  3027. +#define BM_CL_RCR_PI_CENA 0x3000
  3028. +#define BM_CL_RCR_CI_CENA 0x3100
  3029. +
  3030. +#endif
  3031. +
  3032. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  3033. +
  3034. +/* Cache-inhibited register offsets */
  3035. +#define BM_REG_RCR_PI_CINH 0x3000
  3036. +#define BM_REG_RCR_CI_CINH 0x3100
  3037. +#define BM_REG_RCR_ITR 0x3200
  3038. +#define BM_REG_CFG 0x3300
  3039. +#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
  3040. +#define BM_REG_ISR 0x3e00
  3041. +#define BM_REG_IIR 0x3ec0
  3042. +
  3043. +/* Cache-enabled register offsets */
  3044. +#define BM_CL_CR 0x0000
  3045. +#define BM_CL_RR0 0x0100
  3046. +#define BM_CL_RR1 0x0140
  3047. +#define BM_CL_RCR 0x1000
  3048. +#define BM_CL_RCR_PI_CENA 0x3000
  3049. +#define BM_CL_RCR_CI_CENA 0x3100
  3050. +
  3051. +#endif
  3052. +
  3053. +/* BTW, the drivers (and h/w programming model) already obtain the required
  3054. + * synchronisation for portal accesses via lwsync(), hwsync(), and
  3055. + * data-dependencies. Use of barrier()s or other order-preserving primitives
  3056. + * simply degrade performance. Hence the use of the __raw_*() interfaces, which
  3057. + * simply ensure that the compiler treats the portal registers as volatile (ie.
  3058. + * non-coherent). */
  3059. +
  3060. +/* Cache-inhibited register access. */
  3061. +#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o)))
  3062. +#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
  3063. + (bm)->addr_ci + (o));
  3064. +#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
  3065. +#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
  3066. +
  3067. +/* Cache-enabled (index) register access */
  3068. +#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
  3069. +#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
  3070. +#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o)))
  3071. +#define __bm_cl_out(bm, o, val) \
  3072. + do { \
  3073. + u32 *__tmpclout = (bm)->addr_ce + (o); \
  3074. + __raw_writel(cpu_to_be32(val), __tmpclout); \
  3075. + dcbf(__tmpclout); \
  3076. + } while (0)
  3077. +#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
  3078. +#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
  3079. +#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
  3080. +#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
  3081. +#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
  3082. +#define bm_cl_invalidate(reg)\
  3083. + __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
  3084. +
  3085. +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
  3086. + * analysis, look at using the "extra" bit in the ring index registers to avoid
  3087. + * cyclic issues. */
  3088. +static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
  3089. +{
  3090. + /* 'first' is included, 'last' is excluded */
  3091. + if (first <= last)
  3092. + return last - first;
  3093. + return ringsize + last - first;
  3094. +}
  3095. +
  3096. +/* Portal modes.
  3097. + * Enum types;
  3098. + * pmode == production mode
  3099. + * cmode == consumption mode,
  3100. + * Enum values use 3 letter codes. First letter matches the portal mode,
  3101. + * remaining two letters indicate;
  3102. + * ci == cache-inhibited portal register
  3103. + * ce == cache-enabled portal register
  3104. + * vb == in-band valid-bit (cache-enabled)
  3105. + */
  3106. +enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
  3107. + bm_rcr_pci = 0, /* PI index, cache-inhibited */
  3108. + bm_rcr_pce = 1, /* PI index, cache-enabled */
  3109. + bm_rcr_pvb = 2 /* valid-bit */
  3110. +};
  3111. +enum bm_rcr_cmode { /* s/w-only */
  3112. + bm_rcr_cci, /* CI index, cache-inhibited */
  3113. + bm_rcr_cce /* CI index, cache-enabled */
  3114. +};
  3115. +
  3116. +
  3117. +/* ------------------------- */
  3118. +/* --- Portal structures --- */
  3119. +
  3120. +#define BM_RCR_SIZE 8
  3121. +
  3122. +struct bm_rcr {
  3123. + struct bm_rcr_entry *ring, *cursor;
  3124. + u8 ci, available, ithresh, vbit;
  3125. +#ifdef CONFIG_FSL_DPA_CHECKING
  3126. + u32 busy;
  3127. + enum bm_rcr_pmode pmode;
  3128. + enum bm_rcr_cmode cmode;
  3129. +#endif
  3130. +};
  3131. +
  3132. +struct bm_mc {
  3133. + struct bm_mc_command *cr;
  3134. + struct bm_mc_result *rr;
  3135. + u8 rridx, vbit;
  3136. +#ifdef CONFIG_FSL_DPA_CHECKING
  3137. + enum {
  3138. + /* Can only be _mc_start()ed */
  3139. + mc_idle,
  3140. + /* Can only be _mc_commit()ed or _mc_abort()ed */
  3141. + mc_user,
  3142. + /* Can only be _mc_retry()ed */
  3143. + mc_hw
  3144. + } state;
  3145. +#endif
  3146. +};
  3147. +
  3148. +struct bm_addr {
  3149. + void __iomem *addr_ce; /* cache-enabled */
  3150. + void __iomem *addr_ci; /* cache-inhibited */
  3151. +};
  3152. +
  3153. +struct bm_portal {
  3154. + struct bm_addr addr;
  3155. + struct bm_rcr rcr;
  3156. + struct bm_mc mc;
  3157. + struct bm_portal_config config;
  3158. +} ____cacheline_aligned;
  3159. +
  3160. +
  3161. +/* --------------- */
  3162. +/* --- RCR API --- */
  3163. +
  3164. +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
  3165. +#define RCR_CARRYCLEAR(p) \
  3166. + (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
  3167. +
  3168. +/* Bit-wise logic to convert a ring pointer to a ring index */
  3169. +static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
  3170. +{
  3171. + return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
  3172. +}
  3173. +
  3174. +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
  3175. +static inline void RCR_INC(struct bm_rcr *rcr)
  3176. +{
  3177. + /* NB: this is odd-looking, but experiments show that it generates
  3178. + * fast code with essentially no branching overheads. We increment to
  3179. + * the next RCR pointer and handle overflow and 'vbit'. */
  3180. + struct bm_rcr_entry *partial = rcr->cursor + 1;
  3181. + rcr->cursor = RCR_CARRYCLEAR(partial);
  3182. + if (partial != rcr->cursor)
  3183. + rcr->vbit ^= BM_RCR_VERB_VBIT;
  3184. +}
  3185. +
  3186. +static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
  3187. + __maybe_unused enum bm_rcr_cmode cmode)
  3188. +{
  3189. + /* This use of 'register', as well as all other occurrences, is because
  3190. + * it has been observed to generate much faster code with gcc than is
  3191. + * otherwise the case. */
  3192. + register struct bm_rcr *rcr = &portal->rcr;
  3193. + u32 cfg;
  3194. + u8 pi;
  3195. +
  3196. + rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
  3197. + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
  3198. +
  3199. + pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
  3200. + rcr->cursor = rcr->ring + pi;
  3201. + rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
  3202. + rcr->available = BM_RCR_SIZE - 1
  3203. + - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
  3204. + rcr->ithresh = bm_in(RCR_ITR);
  3205. +#ifdef CONFIG_FSL_DPA_CHECKING
  3206. + rcr->busy = 0;
  3207. + rcr->pmode = pmode;
  3208. + rcr->cmode = cmode;
  3209. +#endif
  3210. + cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
  3211. + bm_out(CFG, cfg);
  3212. + return 0;
  3213. +}
  3214. +
  3215. +static inline void bm_rcr_finish(struct bm_portal *portal)
  3216. +{
  3217. + register struct bm_rcr *rcr = &portal->rcr;
  3218. + u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
  3219. + u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
  3220. + DPA_ASSERT(!rcr->busy);
  3221. + if (pi != RCR_PTR2IDX(rcr->cursor))
  3222. + pr_crit("losing uncommited RCR entries\n");
  3223. + if (ci != rcr->ci)
  3224. + pr_crit("missing existing RCR completions\n");
  3225. + if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
  3226. + pr_crit("RCR destroyed unquiesced\n");
  3227. +}
  3228. +
  3229. +static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
  3230. +{
  3231. + register struct bm_rcr *rcr = &portal->rcr;
  3232. + DPA_ASSERT(!rcr->busy);
  3233. + if (!rcr->available)
  3234. + return NULL;
  3235. +#ifdef CONFIG_FSL_DPA_CHECKING
  3236. + rcr->busy = 1;
  3237. +#endif
  3238. + dcbz_64(rcr->cursor);
  3239. + return rcr->cursor;
  3240. +}
  3241. +
  3242. +static inline void bm_rcr_abort(struct bm_portal *portal)
  3243. +{
  3244. + __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
  3245. + DPA_ASSERT(rcr->busy);
  3246. +#ifdef CONFIG_FSL_DPA_CHECKING
  3247. + rcr->busy = 0;
  3248. +#endif
  3249. +}
  3250. +
  3251. +static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
  3252. + struct bm_portal *portal, u8 myverb)
  3253. +{
  3254. + register struct bm_rcr *rcr = &portal->rcr;
  3255. + DPA_ASSERT(rcr->busy);
  3256. + DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
  3257. + if (rcr->available == 1)
  3258. + return NULL;
  3259. + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
  3260. + dcbf_64(rcr->cursor);
  3261. + RCR_INC(rcr);
  3262. + rcr->available--;
  3263. + dcbz_64(rcr->cursor);
  3264. + return rcr->cursor;
  3265. +}
  3266. +
  3267. +static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
  3268. +{
  3269. + register struct bm_rcr *rcr = &portal->rcr;
  3270. + DPA_ASSERT(rcr->busy);
  3271. + DPA_ASSERT(rcr->pmode == bm_rcr_pci);
  3272. + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
  3273. + RCR_INC(rcr);
  3274. + rcr->available--;
  3275. + hwsync();
  3276. + bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
  3277. +#ifdef CONFIG_FSL_DPA_CHECKING
  3278. + rcr->busy = 0;
  3279. +#endif
  3280. +}
  3281. +
  3282. +static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
  3283. +{
  3284. + __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
  3285. + DPA_ASSERT(rcr->pmode == bm_rcr_pce);
  3286. + bm_cl_invalidate(RCR_PI);
  3287. + bm_cl_touch_rw(RCR_PI);
  3288. +}
  3289. +
  3290. +static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
  3291. +{
  3292. + register struct bm_rcr *rcr = &portal->rcr;
  3293. + DPA_ASSERT(rcr->busy);
  3294. + DPA_ASSERT(rcr->pmode == bm_rcr_pce);
  3295. + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
  3296. + RCR_INC(rcr);
  3297. + rcr->available--;
  3298. + lwsync();
  3299. + bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
  3300. +#ifdef CONFIG_FSL_DPA_CHECKING
  3301. + rcr->busy = 0;
  3302. +#endif
  3303. +}
  3304. +
  3305. +static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
  3306. +{
  3307. + register struct bm_rcr *rcr = &portal->rcr;
  3308. + struct bm_rcr_entry *rcursor;
  3309. + DPA_ASSERT(rcr->busy);
  3310. + DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
  3311. + lwsync();
  3312. + rcursor = rcr->cursor;
  3313. + rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
  3314. + dcbf_64(rcursor);
  3315. + RCR_INC(rcr);
  3316. + rcr->available--;
  3317. +#ifdef CONFIG_FSL_DPA_CHECKING
  3318. + rcr->busy = 0;
  3319. +#endif
  3320. +}
  3321. +
  3322. +static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
  3323. +{
  3324. + register struct bm_rcr *rcr = &portal->rcr;
  3325. + u8 diff, old_ci = rcr->ci;
  3326. + DPA_ASSERT(rcr->cmode == bm_rcr_cci);
  3327. + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
  3328. + diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
  3329. + rcr->available += diff;
  3330. + return diff;
  3331. +}
  3332. +
  3333. +static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
  3334. +{
  3335. + __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
  3336. + DPA_ASSERT(rcr->cmode == bm_rcr_cce);
  3337. + bm_cl_touch_ro(RCR_CI);
  3338. +}
  3339. +
  3340. +static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
  3341. +{
  3342. + register struct bm_rcr *rcr = &portal->rcr;
  3343. + u8 diff, old_ci = rcr->ci;
  3344. + DPA_ASSERT(rcr->cmode == bm_rcr_cce);
  3345. + rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
  3346. + bm_cl_invalidate(RCR_CI);
  3347. + diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
  3348. + rcr->available += diff;
  3349. + return diff;
  3350. +}
  3351. +
  3352. +static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
  3353. +{
  3354. + register struct bm_rcr *rcr = &portal->rcr;
  3355. + return rcr->ithresh;
  3356. +}
  3357. +
  3358. +static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
  3359. +{
  3360. + register struct bm_rcr *rcr = &portal->rcr;
  3361. + rcr->ithresh = ithresh;
  3362. + bm_out(RCR_ITR, ithresh);
  3363. +}
  3364. +
  3365. +static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
  3366. +{
  3367. + register struct bm_rcr *rcr = &portal->rcr;
  3368. + return rcr->available;
  3369. +}
  3370. +
  3371. +static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
  3372. +{
  3373. + register struct bm_rcr *rcr = &portal->rcr;
  3374. + return BM_RCR_SIZE - 1 - rcr->available;
  3375. +}
  3376. +
  3377. +
  3378. +/* ------------------------------ */
  3379. +/* --- Management command API --- */
  3380. +
  3381. +static inline int bm_mc_init(struct bm_portal *portal)
  3382. +{
  3383. + register struct bm_mc *mc = &portal->mc;
  3384. + mc->cr = portal->addr.addr_ce + BM_CL_CR;
  3385. + mc->rr = portal->addr.addr_ce + BM_CL_RR0;
  3386. + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
  3387. + BM_MCC_VERB_VBIT) ? 0 : 1;
  3388. + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
  3389. +#ifdef CONFIG_FSL_DPA_CHECKING
  3390. + mc->state = mc_idle;
  3391. +#endif
  3392. + return 0;
  3393. +}
  3394. +
  3395. +static inline void bm_mc_finish(struct bm_portal *portal)
  3396. +{
  3397. + __maybe_unused register struct bm_mc *mc = &portal->mc;
  3398. + DPA_ASSERT(mc->state == mc_idle);
  3399. +#ifdef CONFIG_FSL_DPA_CHECKING
  3400. + if (mc->state != mc_idle)
  3401. + pr_crit("Losing incomplete MC command\n");
  3402. +#endif
  3403. +}
  3404. +
  3405. +static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
  3406. +{
  3407. + register struct bm_mc *mc = &portal->mc;
  3408. + DPA_ASSERT(mc->state == mc_idle);
  3409. +#ifdef CONFIG_FSL_DPA_CHECKING
  3410. + mc->state = mc_user;
  3411. +#endif
  3412. + dcbz_64(mc->cr);
  3413. + return mc->cr;
  3414. +}
  3415. +
  3416. +static inline void bm_mc_abort(struct bm_portal *portal)
  3417. +{
  3418. + __maybe_unused register struct bm_mc *mc = &portal->mc;
  3419. + DPA_ASSERT(mc->state == mc_user);
  3420. +#ifdef CONFIG_FSL_DPA_CHECKING
  3421. + mc->state = mc_idle;
  3422. +#endif
  3423. +}
  3424. +
  3425. +static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
  3426. +{
  3427. + register struct bm_mc *mc = &portal->mc;
  3428. + struct bm_mc_result *rr = mc->rr + mc->rridx;
  3429. + DPA_ASSERT(mc->state == mc_user);
  3430. + lwsync();
  3431. + mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
  3432. + dcbf(mc->cr);
  3433. + dcbit_ro(rr);
  3434. +#ifdef CONFIG_FSL_DPA_CHECKING
  3435. + mc->state = mc_hw;
  3436. +#endif
  3437. +}
  3438. +
  3439. +static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
  3440. +{
  3441. + register struct bm_mc *mc = &portal->mc;
  3442. + struct bm_mc_result *rr = mc->rr + mc->rridx;
  3443. + DPA_ASSERT(mc->state == mc_hw);
  3444. + /* The inactive response register's verb byte always returns zero until
  3445. + * its command is submitted and completed. This includes the valid-bit,
  3446. + * in case you were wondering... */
  3447. + if (!__raw_readb(&rr->verb)) {
  3448. + dcbit_ro(rr);
  3449. + return NULL;
  3450. + }
  3451. + mc->rridx ^= 1;
  3452. + mc->vbit ^= BM_MCC_VERB_VBIT;
  3453. +#ifdef CONFIG_FSL_DPA_CHECKING
  3454. + mc->state = mc_idle;
  3455. +#endif
  3456. + return rr;
  3457. +}
  3458. +
  3459. +
  3460. +/* ------------------------------------- */
  3461. +/* --- Portal interrupt register API --- */
  3462. +
  3463. +static inline int bm_isr_init(__always_unused struct bm_portal *portal)
  3464. +{
  3465. + return 0;
  3466. +}
  3467. +
  3468. +static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
  3469. +{
  3470. +}
  3471. +
  3472. +#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
  3473. +#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
  3474. +static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
  3475. + int enable)
  3476. +{
  3477. + u32 val;
  3478. + DPA_ASSERT(bpid < bman_pool_max);
  3479. + /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
  3480. + val = __bm_in(&portal->addr, SCN_REG(bpid));
  3481. + if (enable)
  3482. + val |= SCN_BIT(bpid);
  3483. + else
  3484. + val &= ~SCN_BIT(bpid);
  3485. + __bm_out(&portal->addr, SCN_REG(bpid), val);
  3486. +}
  3487. +
  3488. +static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
  3489. +{
  3490. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  3491. + return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
  3492. +#else
  3493. + return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
  3494. +#endif
  3495. +}
  3496. +
  3497. +static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
  3498. + u32 val)
  3499. +{
  3500. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  3501. + __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
  3502. +#else
  3503. + __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
  3504. +#endif
  3505. +}
  3506. +
  3507. +/* Buffer Pool Cleanup */
  3508. +static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
  3509. +{
  3510. + struct bm_mc_command *bm_cmd;
  3511. + struct bm_mc_result *bm_res;
  3512. +
  3513. + int aq_count = 0;
  3514. + bool stop = false;
  3515. + while (!stop) {
  3516. + /* Acquire buffers until empty */
  3517. + bm_cmd = bm_mc_start(p);
  3518. + bm_cmd->acquire.bpid = bpid;
  3519. + bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
  3520. + while (!(bm_res = bm_mc_result(p)))
  3521. + cpu_relax();
  3522. + if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
  3523. + /* Pool is empty */
  3524. + /* TBD : Should we do a few extra iterations in
  3525. + case some other some blocks keep buffers 'on deck',
  3526. + which may also be problematic */
  3527. + stop = true;
  3528. + } else
  3529. + ++aq_count;
  3530. + }
  3531. + return 0;
  3532. +}
  3533. --- /dev/null
  3534. +++ b/drivers/staging/fsl_qbman/bman_private.h
  3535. @@ -0,0 +1,166 @@
  3536. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  3537. + *
  3538. + * Redistribution and use in source and binary forms, with or without
  3539. + * modification, are permitted provided that the following conditions are met:
  3540. + * * Redistributions of source code must retain the above copyright
  3541. + * notice, this list of conditions and the following disclaimer.
  3542. + * * Redistributions in binary form must reproduce the above copyright
  3543. + * notice, this list of conditions and the following disclaimer in the
  3544. + * documentation and/or other materials provided with the distribution.
  3545. + * * Neither the name of Freescale Semiconductor nor the
  3546. + * names of its contributors may be used to endorse or promote products
  3547. + * derived from this software without specific prior written permission.
  3548. + *
  3549. + *
  3550. + * ALTERNATIVELY, this software may be distributed under the terms of the
  3551. + * GNU General Public License ("GPL") as published by the Free Software
  3552. + * Foundation, either version 2 of that License or (at your option) any
  3553. + * later version.
  3554. + *
  3555. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  3556. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  3557. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  3558. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  3559. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  3560. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  3561. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3562. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3563. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3564. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3565. + */
  3566. +
  3567. +#include "dpa_sys.h"
  3568. +#include <linux/fsl_bman.h>
  3569. +
  3570. +/* Revision info (for errata and feature handling) */
  3571. +#define BMAN_REV10 0x0100
  3572. +#define BMAN_REV20 0x0200
  3573. +#define BMAN_REV21 0x0201
  3574. +#define QBMAN_ANY_PORTAL_IDX 0xffffffff
  3575. +extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
  3576. +
  3577. +/*
  3578. + * Global variables of the max portal/pool number this bman version supported
  3579. + */
  3580. +extern u16 bman_pool_max;
  3581. +
  3582. +/* used by CCSR and portal interrupt code */
  3583. +enum bm_isr_reg {
  3584. + bm_isr_status = 0,
  3585. + bm_isr_enable = 1,
  3586. + bm_isr_disable = 2,
  3587. + bm_isr_inhibit = 3
  3588. +};
  3589. +
  3590. +struct bm_portal_config {
  3591. + /* Corenet portal addresses;
  3592. + * [0]==cache-enabled, [1]==cache-inhibited. */
  3593. + __iomem void *addr_virt[2];
  3594. + struct resource addr_phys[2];
  3595. + /* Allow these to be joined in lists */
  3596. + struct list_head list;
  3597. + /* User-visible portal configuration settings */
  3598. + struct bman_portal_config public_cfg;
  3599. + /* power management saved data */
  3600. + u32 saved_isdr;
  3601. +};
  3602. +
  3603. +#ifdef CONFIG_FSL_BMAN_CONFIG
  3604. +/* Hooks from bman_driver.c to bman_config.c */
  3605. +int bman_init_ccsr(struct device_node *node);
  3606. +#endif
  3607. +
  3608. +/* Hooks from bman_driver.c in to bman_high.c */
  3609. +struct bman_portal *bman_create_portal(
  3610. + struct bman_portal *portal,
  3611. + const struct bm_portal_config *config);
  3612. +struct bman_portal *bman_create_affine_portal(
  3613. + const struct bm_portal_config *config);
  3614. +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
  3615. + int cpu);
  3616. +void bman_destroy_portal(struct bman_portal *bm);
  3617. +
  3618. +const struct bm_portal_config *bman_destroy_affine_portal(void);
  3619. +
  3620. +/* Hooks from fsl_usdpaa.c to bman_driver.c */
  3621. +struct bm_portal_config *bm_get_unused_portal(void);
  3622. +struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
  3623. +void bm_put_unused_portal(struct bm_portal_config *pcfg);
  3624. +void bm_set_liodns(struct bm_portal_config *pcfg);
  3625. +
  3626. +/* Pool logic in the portal driver, during initialisation, needs to know if
  3627. + * there's access to CCSR or not (if not, it'll cripple the pool allocator). */
  3628. +#ifdef CONFIG_FSL_BMAN_CONFIG
  3629. +int bman_have_ccsr(void);
  3630. +#else
  3631. +#define bman_have_ccsr() 0
  3632. +#endif
  3633. +
  3634. +/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
  3635. + * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
  3636. + * might fail (if the buffer pool is depleted). So this value provides some
  3637. + * "stagger" in that the bman_acquire() function will only fail if lots of bufs
  3638. + * are requested at once or if h/w has been tested a couple of times without
  3639. + * luck. The _HIGH value: when bman_release() is called and the stockpile
  3640. + * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
  3641. + * the release ring is full). So this value provides some "stagger" so that
  3642. + * ring-access is retried a couple of times prior to the API returning a
  3643. + * failure. The following *must* be true;
  3644. + * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
  3645. + * (to avoid thrashing)
  3646. + * BMAN_STOCKPILE_SZ >= 16
  3647. + * (as the release logic expects to either send 8 buffers to hw prior to
  3648. + * adding the given buffers to the stockpile or add the buffers to the
  3649. + * stockpile before sending 8 to hw, as the API must be an all-or-nothing
  3650. + * success/fail.)
  3651. + */
  3652. +#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
  3653. +#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
  3654. +#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
  3655. +
  3656. +/*************************************************/
  3657. +/* BMan s/w corenet portal, low-level i/face */
  3658. +/*************************************************/
  3659. +
  3660. +/* Used by all portal interrupt registers except 'inhibit'
  3661. + * This mask contains all the "irqsource" bits visible to API users
  3662. + */
  3663. +#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
  3664. +
  3665. +/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
  3666. + * the disable register" rather than "disable the ability to write". */
  3667. +#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
  3668. +#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
  3669. +#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
  3670. +#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
  3671. +#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
  3672. +#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
  3673. +#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
  3674. +#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
  3675. +
  3676. +#ifdef CONFIG_FSL_BMAN_CONFIG
  3677. +/* Set depletion thresholds associated with a buffer pool. Requires that the
  3678. + * operating system have access to Bman CCSR (ie. compiled in support and
  3679. + * run-time access courtesy of the device-tree). */
  3680. +int bm_pool_set(u32 bpid, const u32 *thresholds);
  3681. +#define BM_POOL_THRESH_SW_ENTER 0
  3682. +#define BM_POOL_THRESH_SW_EXIT 1
  3683. +#define BM_POOL_THRESH_HW_ENTER 2
  3684. +#define BM_POOL_THRESH_HW_EXIT 3
  3685. +
  3686. +/* Read the free buffer count for a given buffer */
  3687. +u32 bm_pool_free_buffers(u32 bpid);
  3688. +
  3689. +__init int bman_init(void);
  3690. +__init int bman_resource_init(void);
  3691. +
  3692. +const struct bm_portal_config *bman_get_bm_portal_config(
  3693. + struct bman_portal *portal);
  3694. +
  3695. +/* power management */
  3696. +#ifdef CONFIG_SUSPEND
  3697. +void suspend_unused_bportal(void);
  3698. +void resume_unused_bportal(void);
  3699. +#endif
  3700. +
  3701. +#endif /* CONFIG_FSL_BMAN_CONFIG */
  3702. --- /dev/null
  3703. +++ b/drivers/staging/fsl_qbman/bman_test.c
  3704. @@ -0,0 +1,56 @@
  3705. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  3706. + *
  3707. + * Redistribution and use in source and binary forms, with or without
  3708. + * modification, are permitted provided that the following conditions are met:
  3709. + * * Redistributions of source code must retain the above copyright
  3710. + * notice, this list of conditions and the following disclaimer.
  3711. + * * Redistributions in binary form must reproduce the above copyright
  3712. + * notice, this list of conditions and the following disclaimer in the
  3713. + * documentation and/or other materials provided with the distribution.
  3714. + * * Neither the name of Freescale Semiconductor nor the
  3715. + * names of its contributors may be used to endorse or promote products
  3716. + * derived from this software without specific prior written permission.
  3717. + *
  3718. + *
  3719. + * ALTERNATIVELY, this software may be distributed under the terms of the
  3720. + * GNU General Public License ("GPL") as published by the Free Software
  3721. + * Foundation, either version 2 of that License or (at your option) any
  3722. + * later version.
  3723. + *
  3724. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  3725. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  3726. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  3727. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  3728. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  3729. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  3730. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3731. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3732. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3733. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3734. + */
  3735. +
  3736. +#include "bman_test.h"
  3737. +
  3738. +MODULE_AUTHOR("Geoff Thorpe");
  3739. +MODULE_LICENSE("Dual BSD/GPL");
  3740. +MODULE_DESCRIPTION("Bman testing");
  3741. +
  3742. +static int test_init(void)
  3743. +{
  3744. +#ifdef CONFIG_FSL_BMAN_TEST_HIGH
  3745. + int loop = 1;
  3746. + while (loop--)
  3747. + bman_test_high();
  3748. +#endif
  3749. +#ifdef CONFIG_FSL_BMAN_TEST_THRESH
  3750. + bman_test_thresh();
  3751. +#endif
  3752. + return 0;
  3753. +}
  3754. +
  3755. +static void test_exit(void)
  3756. +{
  3757. +}
  3758. +
  3759. +module_init(test_init);
  3760. +module_exit(test_exit);
  3761. --- /dev/null
  3762. +++ b/drivers/staging/fsl_qbman/bman_test.h
  3763. @@ -0,0 +1,44 @@
  3764. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  3765. + *
  3766. + * Redistribution and use in source and binary forms, with or without
  3767. + * modification, are permitted provided that the following conditions are met:
  3768. + * * Redistributions of source code must retain the above copyright
  3769. + * notice, this list of conditions and the following disclaimer.
  3770. + * * Redistributions in binary form must reproduce the above copyright
  3771. + * notice, this list of conditions and the following disclaimer in the
  3772. + * documentation and/or other materials provided with the distribution.
  3773. + * * Neither the name of Freescale Semiconductor nor the
  3774. + * names of its contributors may be used to endorse or promote products
  3775. + * derived from this software without specific prior written permission.
  3776. + *
  3777. + *
  3778. + * ALTERNATIVELY, this software may be distributed under the terms of the
  3779. + * GNU General Public License ("GPL") as published by the Free Software
  3780. + * Foundation, either version 2 of that License or (at your option) any
  3781. + * later version.
  3782. + *
  3783. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  3784. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  3785. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  3786. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  3787. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  3788. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  3789. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3790. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3791. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3792. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3793. + */
  3794. +
  3795. +#include <linux/kernel.h>
  3796. +#include <linux/errno.h>
  3797. +#include <linux/io.h>
  3798. +#include <linux/slab.h>
  3799. +#include <linux/module.h>
  3800. +#include <linux/interrupt.h>
  3801. +#include <linux/delay.h>
  3802. +#include <linux/kthread.h>
  3803. +
  3804. +#include <linux/fsl_bman.h>
  3805. +
  3806. +void bman_test_high(void);
  3807. +void bman_test_thresh(void);
  3808. --- /dev/null
  3809. +++ b/drivers/staging/fsl_qbman/bman_test_high.c
  3810. @@ -0,0 +1,183 @@
  3811. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  3812. + *
  3813. + * Redistribution and use in source and binary forms, with or without
  3814. + * modification, are permitted provided that the following conditions are met:
  3815. + * * Redistributions of source code must retain the above copyright
  3816. + * notice, this list of conditions and the following disclaimer.
  3817. + * * Redistributions in binary form must reproduce the above copyright
  3818. + * notice, this list of conditions and the following disclaimer in the
  3819. + * documentation and/or other materials provided with the distribution.
  3820. + * * Neither the name of Freescale Semiconductor nor the
  3821. + * names of its contributors may be used to endorse or promote products
  3822. + * derived from this software without specific prior written permission.
  3823. + *
  3824. + *
  3825. + * ALTERNATIVELY, this software may be distributed under the terms of the
  3826. + * GNU General Public License ("GPL") as published by the Free Software
  3827. + * Foundation, either version 2 of that License or (at your option) any
  3828. + * later version.
  3829. + *
  3830. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  3831. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  3832. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  3833. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  3834. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  3835. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  3836. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  3837. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3838. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  3839. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3840. + */
  3841. +
  3842. +#include "bman_test.h"
  3843. +#include "bman_private.h"
  3844. +
  3845. +/*************/
  3846. +/* constants */
  3847. +/*************/
  3848. +
  3849. +#define PORTAL_OPAQUE ((void *)0xf00dbeef)
  3850. +#define POOL_OPAQUE ((void *)0xdeadabba)
  3851. +#define NUM_BUFS 93
  3852. +#define LOOPS 3
  3853. +#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
  3854. +
  3855. +/***************/
  3856. +/* global vars */
  3857. +/***************/
  3858. +
  3859. +static struct bman_pool *pool;
  3860. +static int depleted;
  3861. +static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
  3862. +static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
  3863. +static int bufs_received;
  3864. +
  3865. +/* Predeclare the callback so we can instantiate pool parameters */
  3866. +static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
  3867. +
  3868. +/**********************/
  3869. +/* internal functions */
  3870. +/**********************/
  3871. +
  3872. +static void bufs_init(void)
  3873. +{
  3874. + int i;
  3875. + for (i = 0; i < NUM_BUFS; i++)
  3876. + bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
  3877. + bufs_received = 0;
  3878. +}
  3879. +
  3880. +static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
  3881. +{
  3882. + if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
  3883. +
  3884. + /* On SoCs with Bman revison 2.0, Bman only respects the 40
  3885. + * LS-bits of buffer addresses, masking off the upper 8-bits on
  3886. + * release commands. The API provides for 48-bit addresses
  3887. + * because some SoCs support all 48-bits. When generating
  3888. + * garbage addresses for testing, we either need to zero the
  3889. + * upper 8-bits when releasing to Bman (otherwise we'll be
  3890. + * disappointed when the buffers we acquire back from Bman
  3891. + * don't match), or we need to mask the upper 8-bits off when
  3892. + * comparing. We do the latter.
  3893. + */
  3894. + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
  3895. + < (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
  3896. + return -1;
  3897. + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
  3898. + > (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
  3899. + return 1;
  3900. + } else {
  3901. + if (bm_buffer_get64(a) < bm_buffer_get64(b))
  3902. + return -1;
  3903. + if (bm_buffer_get64(a) > bm_buffer_get64(b))
  3904. + return 1;
  3905. + }
  3906. +
  3907. + return 0;
  3908. +}
  3909. +
  3910. +static void bufs_confirm(void)
  3911. +{
  3912. + int i, j;
  3913. + for (i = 0; i < NUM_BUFS; i++) {
  3914. + int matches = 0;
  3915. + for (j = 0; j < NUM_BUFS; j++)
  3916. + if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
  3917. + matches++;
  3918. + BUG_ON(matches != 1);
  3919. + }
  3920. +}
  3921. +
  3922. +/********/
  3923. +/* test */
  3924. +/********/
  3925. +
  3926. +static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
  3927. + void *pool_ctx, int __depleted)
  3928. +{
  3929. + BUG_ON(__pool != pool);
  3930. + BUG_ON(pool_ctx != POOL_OPAQUE);
  3931. + depleted = __depleted;
  3932. +}
  3933. +
  3934. +void bman_test_high(void)
  3935. +{
  3936. + struct bman_pool_params pparams = {
  3937. + .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
  3938. + .cb = depletion_cb,
  3939. + .cb_ctx = POOL_OPAQUE,
  3940. + };
  3941. + int i, loops = LOOPS;
  3942. + struct bm_buffer tmp_buf;
  3943. +
  3944. + bufs_init();
  3945. +
  3946. + pr_info("BMAN: --- starting high-level test ---\n");
  3947. +
  3948. + pool = bman_new_pool(&pparams);
  3949. + BUG_ON(!pool);
  3950. +
  3951. + /*******************/
  3952. + /* Release buffers */
  3953. + /*******************/
  3954. +do_loop:
  3955. + i = 0;
  3956. + while (i < NUM_BUFS) {
  3957. + u32 flags = BMAN_RELEASE_FLAG_WAIT;
  3958. + int num = 8;
  3959. + if ((i + num) > NUM_BUFS)
  3960. + num = NUM_BUFS - i;
  3961. + if ((i + num) == NUM_BUFS)
  3962. + flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
  3963. + if (bman_release(pool, bufs_in + i, num, flags))
  3964. + panic("bman_release() failed\n");
  3965. + i += num;
  3966. + }
  3967. +
  3968. + /*******************/
  3969. + /* Acquire buffers */
  3970. + /*******************/
  3971. + while (i > 0) {
  3972. + int tmp, num = 8;
  3973. + if (num > i)
  3974. + num = i;
  3975. + tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
  3976. + BUG_ON(tmp != num);
  3977. + i -= num;
  3978. + }
  3979. +
  3980. + i = bman_acquire(pool, &tmp_buf, 1, 0);
  3981. + BUG_ON(i > 0);
  3982. +
  3983. + bufs_confirm();
  3984. +
  3985. + if (--loops)
  3986. + goto do_loop;
  3987. +
  3988. + /************/
  3989. + /* Clean up */
  3990. + /************/
  3991. + bman_free_pool(pool);
  3992. + pr_info("BMAN: --- finished high-level test ---\n");
  3993. +}
  3994. --- /dev/null
  3995. +++ b/drivers/staging/fsl_qbman/bman_test_thresh.c
  3996. @@ -0,0 +1,196 @@
  3997. +/* Copyright 2010-2011 Freescale Semiconductor, Inc.
  3998. + *
  3999. + * Redistribution and use in source and binary forms, with or without
  4000. + * modification, are permitted provided that the following conditions are met:
  4001. + * * Redistributions of source code must retain the above copyright
  4002. + * notice, this list of conditions and the following disclaimer.
  4003. + * * Redistributions in binary form must reproduce the above copyright
  4004. + * notice, this list of conditions and the following disclaimer in the
  4005. + * documentation and/or other materials provided with the distribution.
  4006. + * * Neither the name of Freescale Semiconductor nor the
  4007. + * names of its contributors may be used to endorse or promote products
  4008. + * derived from this software without specific prior written permission.
  4009. + *
  4010. + *
  4011. + * ALTERNATIVELY, this software may be distributed under the terms of the
  4012. + * GNU General Public License ("GPL") as published by the Free Software
  4013. + * Foundation, either version 2 of that License or (at your option) any
  4014. + * later version.
  4015. + *
  4016. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  4017. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  4018. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  4019. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  4020. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  4021. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  4022. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  4023. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4024. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  4025. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4026. + */
  4027. +
  4028. +#include "bman_test.h"
  4029. +
  4030. +/* Test constants */
  4031. +#define TEST_NUMBUFS 129728
  4032. +#define TEST_EXIT 129536
  4033. +#define TEST_ENTRY 129024
  4034. +
  4035. +struct affine_test_data {
  4036. + struct task_struct *t;
  4037. + int cpu;
  4038. + int expect_affinity;
  4039. + int drain;
  4040. + int num_enter;
  4041. + int num_exit;
  4042. + struct list_head node;
  4043. + struct completion wakethread;
  4044. + struct completion wakeparent;
  4045. +};
  4046. +
  4047. +static void cb_depletion(struct bman_portal *portal,
  4048. + struct bman_pool *pool,
  4049. + void *opaque,
  4050. + int depleted)
  4051. +{
  4052. + struct affine_test_data *data = opaque;
  4053. + int c = smp_processor_id();
  4054. + pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n",
  4055. + bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
  4056. + /* We should be executing on the CPU of the thread that owns the pool if
  4057. + * and that CPU has an affine portal (ie. it isn't slaved). */
  4058. + BUG_ON((c != data->cpu) && data->expect_affinity);
  4059. + BUG_ON((c == data->cpu) && !data->expect_affinity);
  4060. + if (depleted)
  4061. + data->num_enter++;
  4062. + else
  4063. + data->num_exit++;
  4064. +}
  4065. +
  4066. +/* Params used to set up a pool, this also dynamically allocates a BPID */
  4067. +static const struct bman_pool_params params_nocb = {
  4068. + .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
  4069. + .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
  4070. +};
  4071. +
  4072. +/* Params used to set up each cpu's pool with callbacks enabled */
  4073. +static struct bman_pool_params params_cb = {
  4074. + .bpid = 0, /* will be replaced to match pool_nocb */
  4075. + .flags = BMAN_POOL_FLAG_DEPLETION,
  4076. + .cb = cb_depletion
  4077. +};
  4078. +
  4079. +static struct bman_pool *pool_nocb;
  4080. +static LIST_HEAD(threads);
  4081. +
  4082. +static int affine_test(void *__data)
  4083. +{
  4084. + struct bman_pool *pool;
  4085. + struct affine_test_data *data = __data;
  4086. + struct bman_pool_params my_params = params_cb;
  4087. +
  4088. + pr_info("thread %d: starting\n", data->cpu);
  4089. + /* create the pool */
  4090. + my_params.cb_ctx = data;
  4091. + pool = bman_new_pool(&my_params);
  4092. + BUG_ON(!pool);
  4093. + complete(&data->wakeparent);
  4094. + wait_for_completion(&data->wakethread);
  4095. + init_completion(&data->wakethread);
  4096. +
  4097. + /* if we're the drainer, we get signalled for that */
  4098. + if (data->drain) {
  4099. + struct bm_buffer buf;
  4100. + int ret;
  4101. + pr_info("thread %d: draining...\n", data->cpu);
  4102. + do {
  4103. + ret = bman_acquire(pool, &buf, 1, 0);
  4104. + } while (ret > 0);
  4105. + pr_info("thread %d: draining done.\n", data->cpu);
  4106. + complete(&data->wakeparent);
  4107. + wait_for_completion(&data->wakethread);
  4108. + init_completion(&data->wakethread);
  4109. + }
  4110. +
  4111. + /* cleanup */
  4112. + bman_free_pool(pool);
  4113. + while (!kthread_should_stop())
  4114. + cpu_relax();
  4115. + pr_info("thread %d: exiting\n", data->cpu);
  4116. + return 0;
  4117. +}
  4118. +
  4119. +static struct affine_test_data *start_affine_test(int cpu, int drain)
  4120. +{
  4121. + struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
  4122. +
  4123. + if (!data)
  4124. + return NULL;
  4125. + data->cpu = cpu;
  4126. + data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
  4127. + data->drain = drain;
  4128. + data->num_enter = 0;
  4129. + data->num_exit = 0;
  4130. + init_completion(&data->wakethread);
  4131. + init_completion(&data->wakeparent);
  4132. + list_add_tail(&data->node, &threads);
  4133. + data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
  4134. + BUG_ON(IS_ERR(data->t));
  4135. + kthread_bind(data->t, cpu);
  4136. + wake_up_process(data->t);
  4137. + return data;
  4138. +}
  4139. +
  4140. +void bman_test_thresh(void)
  4141. +{
  4142. + int loop = TEST_NUMBUFS;
  4143. + int ret, num_cpus = 0;
  4144. + struct affine_test_data *data, *drainer = NULL;
  4145. +
  4146. + pr_info("bman_test_thresh: start\n");
  4147. +
  4148. + /* allocate a BPID and seed it */
  4149. + pool_nocb = bman_new_pool(&params_nocb);
  4150. + BUG_ON(!pool_nocb);
  4151. + while (loop--) {
  4152. + struct bm_buffer buf;
  4153. + bm_buffer_set64(&buf, 0x0badbeef + loop);
  4154. + ret = bman_release(pool_nocb, &buf, 1,
  4155. + BMAN_RELEASE_FLAG_WAIT);
  4156. + BUG_ON(ret);
  4157. + }
  4158. + while (!bman_rcr_is_empty())
  4159. + cpu_relax();
  4160. + pr_info("bman_test_thresh: buffers are in\n");
  4161. +
  4162. + /* create threads and wait for them to create pools */
  4163. + params_cb.bpid = bman_get_params(pool_nocb)->bpid;
  4164. + for_each_cpu(loop, cpu_online_mask) {
  4165. + data = start_affine_test(loop, drainer ? 0 : 1);
  4166. + BUG_ON(!data);
  4167. + if (!drainer)
  4168. + drainer = data;
  4169. + num_cpus++;
  4170. + wait_for_completion(&data->wakeparent);
  4171. + }
  4172. +
  4173. + /* signal the drainer to start draining */
  4174. + complete(&drainer->wakethread);
  4175. + wait_for_completion(&drainer->wakeparent);
  4176. + init_completion(&drainer->wakeparent);
  4177. +
  4178. + /* tear down */
  4179. + list_for_each_entry_safe(data, drainer, &threads, node) {
  4180. + complete(&data->wakethread);
  4181. + ret = kthread_stop(data->t);
  4182. + BUG_ON(ret);
  4183. + list_del(&data->node);
  4184. + /* check that we get the expected callbacks (and no others) */
  4185. + BUG_ON(data->num_enter != 1);
  4186. + BUG_ON(data->num_exit != 0);
  4187. + kfree(data);
  4188. + }
  4189. + bman_free_pool(pool_nocb);
  4190. +
  4191. + pr_info("bman_test_thresh: done\n");
  4192. +}
  4193. --- /dev/null
  4194. +++ b/drivers/staging/fsl_qbman/dpa_alloc.c
  4195. @@ -0,0 +1,706 @@
  4196. +/* Copyright 2009-2012 Freescale Semiconductor, Inc.
  4197. + *
  4198. + * Redistribution and use in source and binary forms, with or without
  4199. + * modification, are permitted provided that the following conditions are met:
  4200. + * * Redistributions of source code must retain the above copyright
  4201. + * notice, this list of conditions and the following disclaimer.
  4202. + * * Redistributions in binary form must reproduce the above copyright
  4203. + * notice, this list of conditions and the following disclaimer in the
  4204. + * documentation and/or other materials provided with the distribution.
  4205. + * * Neither the name of Freescale Semiconductor nor the
  4206. + * names of its contributors may be used to endorse or promote products
  4207. + * derived from this software without specific prior written permission.
  4208. + *
  4209. + *
  4210. + * ALTERNATIVELY, this software may be distributed under the terms of the
  4211. + * GNU General Public License ("GPL") as published by the Free Software
  4212. + * Foundation, either version 2 of that License or (at your option) any
  4213. + * later version.
  4214. + *
  4215. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  4216. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  4217. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  4218. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  4219. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  4220. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  4221. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  4222. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4223. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  4224. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4225. + */
  4226. +
  4227. +#include "dpa_sys.h"
  4228. +#include <linux/fsl_qman.h>
  4229. +#include <linux/fsl_bman.h>
  4230. +
  4231. +/* Qman and Bman APIs are front-ends to the common code; */
  4232. +
  4233. +static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */
  4234. +static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */
  4235. +static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */
  4236. +static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */
  4237. +static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */
  4238. +static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */
  4239. +static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */
  4240. +static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */
  4241. +
  4242. +/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing
  4243. + * FQIDs (probably from user-space), it can filter out those that aren't in the
  4244. + * OOS state (better to leak a h/w resource than to crash). This function
  4245. + * returns the number of invalid IDs that were not released. */
  4246. +static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count,
  4247. + int (*is_valid)(u32 id))
  4248. +{
  4249. + int valid_mode = 0;
  4250. + u32 loop = id, total_invalid = 0;
  4251. + while (loop < (id + count)) {
  4252. + int isvalid = is_valid ? is_valid(loop) : 1;
  4253. + if (!valid_mode) {
  4254. + /* We're looking for a valid ID to terminate an invalid
  4255. + * range */
  4256. + if (isvalid) {
  4257. + /* We finished a range of invalid IDs, a valid
  4258. + * range is now underway */
  4259. + valid_mode = 1;
  4260. + count -= (loop - id);
  4261. + id = loop;
  4262. + } else
  4263. + total_invalid++;
  4264. + } else {
  4265. + /* We're looking for an invalid ID to terminate a
  4266. + * valid range */
  4267. + if (!isvalid) {
  4268. + /* Release the range of valid IDs, an unvalid
  4269. + * range is now underway */
  4270. + if (loop > id)
  4271. + dpa_alloc_free(alloc, id, loop - id);
  4272. + valid_mode = 0;
  4273. + }
  4274. + }
  4275. + loop++;
  4276. + }
  4277. + /* Release any unterminated range of valid IDs */
  4278. + if (valid_mode && count)
  4279. + dpa_alloc_free(alloc, id, count);
  4280. + return total_invalid;
  4281. +}
  4282. +
  4283. +/* BPID allocator front-end */
  4284. +
  4285. +int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
  4286. +{
  4287. + return dpa_alloc_new(&bpalloc, result, count, align, partial);
  4288. +}
  4289. +EXPORT_SYMBOL(bman_alloc_bpid_range);
  4290. +
  4291. +static int bp_cleanup(u32 bpid)
  4292. +{
  4293. + return bman_shutdown_pool(bpid) == 0;
  4294. +}
  4295. +void bman_release_bpid_range(u32 bpid, u32 count)
  4296. +{
  4297. + u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
  4298. + if (total_invalid)
  4299. + pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
  4300. + bpid, bpid + count - 1, count, total_invalid);
  4301. +}
  4302. +EXPORT_SYMBOL(bman_release_bpid_range);
  4303. +
  4304. +void bman_seed_bpid_range(u32 bpid, u32 count)
  4305. +{
  4306. + dpa_alloc_seed(&bpalloc, bpid, count);
  4307. +}
  4308. +EXPORT_SYMBOL(bman_seed_bpid_range);
  4309. +
  4310. +int bman_reserve_bpid_range(u32 bpid, u32 count)
  4311. +{
  4312. + return dpa_alloc_reserve(&bpalloc, bpid, count);
  4313. +}
  4314. +EXPORT_SYMBOL(bman_reserve_bpid_range);
  4315. +
  4316. +
  4317. +/* FQID allocator front-end */
  4318. +
  4319. +int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
  4320. +{
  4321. + return dpa_alloc_new(&fqalloc, result, count, align, partial);
  4322. +}
  4323. +EXPORT_SYMBOL(qman_alloc_fqid_range);
  4324. +
  4325. +static int fq_cleanup(u32 fqid)
  4326. +{
  4327. + return qman_shutdown_fq(fqid) == 0;
  4328. +}
  4329. +void qman_release_fqid_range(u32 fqid, u32 count)
  4330. +{
  4331. + u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup);
  4332. + if (total_invalid)
  4333. + pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
  4334. + fqid, fqid + count - 1, count, total_invalid);
  4335. +}
  4336. +EXPORT_SYMBOL(qman_release_fqid_range);
  4337. +
  4338. +int qman_reserve_fqid_range(u32 fqid, u32 count)
  4339. +{
  4340. + return dpa_alloc_reserve(&fqalloc, fqid, count);
  4341. +}
  4342. +EXPORT_SYMBOL(qman_reserve_fqid_range);
  4343. +
  4344. +void qman_seed_fqid_range(u32 fqid, u32 count)
  4345. +{
  4346. + dpa_alloc_seed(&fqalloc, fqid, count);
  4347. +}
  4348. +EXPORT_SYMBOL(qman_seed_fqid_range);
  4349. +
  4350. +/* Pool-channel allocator front-end */
  4351. +
  4352. +int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
  4353. +{
  4354. + return dpa_alloc_new(&qpalloc, result, count, align, partial);
  4355. +}
  4356. +EXPORT_SYMBOL(qman_alloc_pool_range);
  4357. +
  4358. +static int qpool_cleanup(u32 qp)
  4359. +{
  4360. + /* We query all FQDs starting from
  4361. + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
  4362. + * whose destination channel is the pool-channel being released.
  4363. + * When a non-OOS FQD is found we attempt to clean it up */
  4364. + struct qman_fq fq = {
  4365. + .fqid = 1
  4366. + };
  4367. + int err;
  4368. + do {
  4369. + struct qm_mcr_queryfq_np np;
  4370. + err = qman_query_fq_np(&fq, &np);
  4371. + if (err)
  4372. + /* FQID range exceeded, found no problems */
  4373. + return 1;
  4374. + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
  4375. + struct qm_fqd fqd;
  4376. + err = qman_query_fq(&fq, &fqd);
  4377. + BUG_ON(err);
  4378. + if (fqd.dest.channel == qp) {
  4379. + /* The channel is the FQ's target, clean it */
  4380. + if (qman_shutdown_fq(fq.fqid) != 0)
  4381. + /* Couldn't shut down the FQ
  4382. + so the pool must be leaked */
  4383. + return 0;
  4384. + }
  4385. + }
  4386. + /* Move to the next FQID */
  4387. + fq.fqid++;
  4388. + } while (1);
  4389. +}
  4390. +void qman_release_pool_range(u32 qp, u32 count)
  4391. +{
  4392. + u32 total_invalid = release_id_range(&qpalloc, qp,
  4393. + count, qpool_cleanup);
  4394. + if (total_invalid) {
  4395. + /* Pool channels are almost always used individually */
  4396. + if (count == 1)
  4397. + pr_err("Pool channel 0x%x had %d leaks\n",
  4398. + qp, total_invalid);
  4399. + else
  4400. + pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
  4401. + qp, qp + count - 1, count, total_invalid);
  4402. + }
  4403. +}
  4404. +EXPORT_SYMBOL(qman_release_pool_range);
  4405. +
  4406. +
  4407. +void qman_seed_pool_range(u32 poolid, u32 count)
  4408. +{
  4409. + dpa_alloc_seed(&qpalloc, poolid, count);
  4410. +
  4411. +}
  4412. +EXPORT_SYMBOL(qman_seed_pool_range);
  4413. +
  4414. +int qman_reserve_pool_range(u32 poolid, u32 count)
  4415. +{
  4416. + return dpa_alloc_reserve(&qpalloc, poolid, count);
  4417. +}
  4418. +EXPORT_SYMBOL(qman_reserve_pool_range);
  4419. +
  4420. +
  4421. +/* CGR ID allocator front-end */
  4422. +
  4423. +int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
  4424. +{
  4425. + return dpa_alloc_new(&cgralloc, result, count, align, partial);
  4426. +}
  4427. +EXPORT_SYMBOL(qman_alloc_cgrid_range);
  4428. +
  4429. +static int cqr_cleanup(u32 cgrid)
  4430. +{
  4431. + /* We query all FQDs starting from
  4432. + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
  4433. + * whose CGR is the CGR being released.
  4434. + */
  4435. + struct qman_fq fq = {
  4436. + .fqid = 1
  4437. + };
  4438. + int err;
  4439. + do {
  4440. + struct qm_mcr_queryfq_np np;
  4441. + err = qman_query_fq_np(&fq, &np);
  4442. + if (err)
  4443. + /* FQID range exceeded, found no problems */
  4444. + return 1;
  4445. + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
  4446. + struct qm_fqd fqd;
  4447. + err = qman_query_fq(&fq, &fqd);
  4448. + BUG_ON(err);
  4449. + if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
  4450. + (fqd.cgid == cgrid)) {
  4451. + pr_err("CRGID 0x%x is being used by FQID 0x%x,"
  4452. + " CGR will be leaked\n",
  4453. + cgrid, fq.fqid);
  4454. + return 1;
  4455. + }
  4456. + }
  4457. + /* Move to the next FQID */
  4458. + fq.fqid++;
  4459. + } while (1);
  4460. +}
  4461. +
  4462. +void qman_release_cgrid_range(u32 cgrid, u32 count)
  4463. +{
  4464. + u32 total_invalid = release_id_range(&cgralloc, cgrid,
  4465. + count, cqr_cleanup);
  4466. + if (total_invalid)
  4467. + pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
  4468. + cgrid, cgrid + count - 1, count, total_invalid);
  4469. +}
  4470. +EXPORT_SYMBOL(qman_release_cgrid_range);
  4471. +
  4472. +void qman_seed_cgrid_range(u32 cgrid, u32 count)
  4473. +{
  4474. + dpa_alloc_seed(&cgralloc, cgrid, count);
  4475. +
  4476. +}
  4477. +EXPORT_SYMBOL(qman_seed_cgrid_range);
  4478. +
  4479. +/* CEETM CHANNEL ID allocator front-end */
  4480. +int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
  4481. + int partial)
  4482. +{
  4483. + return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial);
  4484. +}
  4485. +EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range);
  4486. +
  4487. +int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
  4488. + int partial)
  4489. +{
  4490. + return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial);
  4491. +}
  4492. +EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range);
  4493. +
  4494. +void qman_release_ceetm0_channel_range(u32 channelid, u32 count)
  4495. +{
  4496. + u32 total_invalid;
  4497. +
  4498. + total_invalid = release_id_range(&ceetm0_challoc, channelid, count,
  4499. + NULL);
  4500. + if (total_invalid)
  4501. + pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
  4502. + channelid, channelid + count - 1, count, total_invalid);
  4503. +}
  4504. +EXPORT_SYMBOL(qman_release_ceetm0_channel_range);
  4505. +
  4506. +void qman_seed_ceetm0_channel_range(u32 channelid, u32 count)
  4507. +{
  4508. + dpa_alloc_seed(&ceetm0_challoc, channelid, count);
  4509. +
  4510. +}
  4511. +EXPORT_SYMBOL(qman_seed_ceetm0_channel_range);
  4512. +
  4513. +void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
  4514. +{
  4515. + u32 total_invalid;
  4516. + total_invalid = release_id_range(&ceetm1_challoc, channelid, count,
  4517. + NULL);
  4518. + if (total_invalid)
  4519. + pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
  4520. + channelid, channelid + count - 1, count, total_invalid);
  4521. +}
  4522. +EXPORT_SYMBOL(qman_release_ceetm1_channel_range);
  4523. +
  4524. +void qman_seed_ceetm1_channel_range(u32 channelid, u32 count)
  4525. +{
  4526. + dpa_alloc_seed(&ceetm1_challoc, channelid, count);
  4527. +
  4528. +}
  4529. +EXPORT_SYMBOL(qman_seed_ceetm1_channel_range);
  4530. +
  4531. +/* CEETM LFQID allocator front-end */
  4532. +int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
  4533. + int partial)
  4534. +{
  4535. + return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial);
  4536. +}
  4537. +EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range);
  4538. +
  4539. +int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
  4540. + int partial)
  4541. +{
  4542. + return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial);
  4543. +}
  4544. +EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range);
  4545. +
  4546. +void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count)
  4547. +{
  4548. + u32 total_invalid;
  4549. +
  4550. + total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count,
  4551. + NULL);
  4552. + if (total_invalid)
  4553. + pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
  4554. + lfqid, lfqid + count - 1, count, total_invalid);
  4555. +}
  4556. +EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range);
  4557. +
  4558. +void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count)
  4559. +{
  4560. + dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count);
  4561. +
  4562. +}
  4563. +EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range);
  4564. +
  4565. +void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
  4566. +{
  4567. + u32 total_invalid;
  4568. +
  4569. + total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count,
  4570. + NULL);
  4571. + if (total_invalid)
  4572. + pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
  4573. + lfqid, lfqid + count - 1, count, total_invalid);
  4574. +}
  4575. +EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range);
  4576. +
  4577. +void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count)
  4578. +{
  4579. + dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count);
  4580. +
  4581. +}
  4582. +EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range);
  4583. +
  4584. +
  4585. +/* Everything else is the common backend to all the allocators */
  4586. +
  4587. +/* The allocator is a (possibly-empty) list of these; */
  4588. +struct alloc_node {
  4589. + struct list_head list;
  4590. + u32 base;
  4591. + u32 num;
  4592. + /* refcount and is_alloced are only set
  4593. + when the node is in the used list */
  4594. + unsigned int refcount;
  4595. + int is_alloced;
  4596. +};
  4597. +
  4598. +/* #define DPA_ALLOC_DEBUG */
  4599. +
  4600. +#ifdef DPA_ALLOC_DEBUG
  4601. +#define DPRINT pr_info
  4602. +static void DUMP(struct dpa_alloc *alloc)
  4603. +{
  4604. + int off = 0;
  4605. + char buf[256];
  4606. + struct alloc_node *p;
  4607. + pr_info("Free Nodes\n");
  4608. + list_for_each_entry(p, &alloc->free, list) {
  4609. + if (off < 255)
  4610. + off += snprintf(buf + off, 255-off, "{%d,%d}",
  4611. + p->base, p->base + p->num - 1);
  4612. + }
  4613. + pr_info("%s\n", buf);
  4614. +
  4615. + off = 0;
  4616. + pr_info("Used Nodes\n");
  4617. + list_for_each_entry(p, &alloc->used, list) {
  4618. + if (off < 255)
  4619. + off += snprintf(buf + off, 255-off, "{%d,%d}",
  4620. + p->base, p->base + p->num - 1);
  4621. + }
  4622. + pr_info("%s\n", buf);
  4623. +
  4624. +
  4625. +
  4626. +}
  4627. +#else
  4628. +#define DPRINT(x...)
  4629. +#define DUMP(a)
  4630. +#endif
  4631. +
  4632. +int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
  4633. + int partial)
  4634. +{
  4635. + struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
  4636. + u32 base, next_best_base = 0, num = 0, next_best_num = 0;
  4637. + struct alloc_node *margin_left, *margin_right;
  4638. +
  4639. + *result = (u32)-1;
  4640. + DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
  4641. + DUMP(alloc);
  4642. + /* If 'align' is 0, it should behave as though it was 1 */
  4643. + if (!align)
  4644. + align = 1;
  4645. + margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
  4646. + if (!margin_left)
  4647. + goto err;
  4648. + margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
  4649. + if (!margin_right) {
  4650. + kfree(margin_left);
  4651. + goto err;
  4652. + }
  4653. + spin_lock_irq(&alloc->lock);
  4654. + list_for_each_entry(i, &alloc->free, list) {
  4655. + base = (i->base + align - 1) / align;
  4656. + base *= align;
  4657. + if ((base - i->base) >= i->num)
  4658. + /* alignment is impossible, regardless of count */
  4659. + continue;
  4660. + num = i->num - (base - i->base);
  4661. + if (num >= count) {
  4662. + /* this one will do nicely */
  4663. + num = count;
  4664. + goto done;
  4665. + }
  4666. + if (num > next_best_num) {
  4667. + next_best = i;
  4668. + next_best_base = base;
  4669. + next_best_num = num;
  4670. + }
  4671. + }
  4672. + if (partial && next_best) {
  4673. + i = next_best;
  4674. + base = next_best_base;
  4675. + num = next_best_num;
  4676. + } else
  4677. + i = NULL;
  4678. +done:
  4679. + if (i) {
  4680. + if (base != i->base) {
  4681. + margin_left->base = i->base;
  4682. + margin_left->num = base - i->base;
  4683. + list_add_tail(&margin_left->list, &i->list);
  4684. + } else
  4685. + kfree(margin_left);
  4686. + if ((base + num) < (i->base + i->num)) {
  4687. + margin_right->base = base + num;
  4688. + margin_right->num = (i->base + i->num) -
  4689. + (base + num);
  4690. + list_add(&margin_right->list, &i->list);
  4691. + } else
  4692. + kfree(margin_right);
  4693. + list_del(&i->list);
  4694. + kfree(i);
  4695. + *result = base;
  4696. + } else {
  4697. + spin_unlock_irq(&alloc->lock);
  4698. + kfree(margin_left);
  4699. + kfree(margin_right);
  4700. + }
  4701. +
  4702. +err:
  4703. + DPRINT("returning %d\n", i ? num : -ENOMEM);
  4704. + DUMP(alloc);
  4705. + if (!i)
  4706. + return -ENOMEM;
  4707. +
  4708. + /* Add the allocation to the used list with a refcount of 1 */
  4709. + used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
  4710. + if (!used_node) {
  4711. + spin_unlock_irq(&alloc->lock);
  4712. + return -ENOMEM;
  4713. + }
  4714. + used_node->base = *result;
  4715. + used_node->num = num;
  4716. + used_node->refcount = 1;
  4717. + used_node->is_alloced = 1;
  4718. + list_add_tail(&used_node->list, &alloc->used);
  4719. + spin_unlock_irq(&alloc->lock);
  4720. + return (int)num;
  4721. +}
  4722. +
  4723. +/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
  4724. + * forcing error-handling on to users in the deallocation path. */
  4725. +static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
  4726. +{
  4727. + struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
  4728. + BUG_ON(!node);
  4729. + DPRINT("release_range(%d,%d)\n", base_id, count);
  4730. + DUMP(alloc);
  4731. + BUG_ON(!count);
  4732. + spin_lock_irq(&alloc->lock);
  4733. +
  4734. +
  4735. + node->base = base_id;
  4736. + node->num = count;
  4737. + list_for_each_entry(i, &alloc->free, list) {
  4738. + if (i->base >= node->base) {
  4739. + /* BUG_ON(any overlapping) */
  4740. + BUG_ON(i->base < (node->base + node->num));
  4741. + list_add_tail(&node->list, &i->list);
  4742. + goto done;
  4743. + }
  4744. + }
  4745. + list_add_tail(&node->list, &alloc->free);
  4746. +done:
  4747. + /* Merge to the left */
  4748. + i = list_entry(node->list.prev, struct alloc_node, list);
  4749. + if (node->list.prev != &alloc->free) {
  4750. + BUG_ON((i->base + i->num) > node->base);
  4751. + if ((i->base + i->num) == node->base) {
  4752. + node->base = i->base;
  4753. + node->num += i->num;
  4754. + list_del(&i->list);
  4755. + kfree(i);
  4756. + }
  4757. + }
  4758. + /* Merge to the right */
  4759. + i = list_entry(node->list.next, struct alloc_node, list);
  4760. + if (node->list.next != &alloc->free) {
  4761. + BUG_ON((node->base + node->num) > i->base);
  4762. + if ((node->base + node->num) == i->base) {
  4763. + node->num += i->num;
  4764. + list_del(&i->list);
  4765. + kfree(i);
  4766. + }
  4767. + }
  4768. + spin_unlock_irq(&alloc->lock);
  4769. + DUMP(alloc);
  4770. +}
  4771. +
  4772. +
  4773. +void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
  4774. +{
  4775. + struct alloc_node *i = NULL;
  4776. + spin_lock_irq(&alloc->lock);
  4777. +
  4778. + /* First find the node in the used list and decrement its ref count */
  4779. + list_for_each_entry(i, &alloc->used, list) {
  4780. + if (i->base == base_id && i->num == count) {
  4781. + --i->refcount;
  4782. + if (i->refcount == 0) {
  4783. + list_del(&i->list);
  4784. + spin_unlock_irq(&alloc->lock);
  4785. + if (i->is_alloced)
  4786. + _dpa_alloc_free(alloc, base_id, count);
  4787. + kfree(i);
  4788. + return;
  4789. + }
  4790. + spin_unlock_irq(&alloc->lock);
  4791. + return;
  4792. + }
  4793. + }
  4794. + /* Couldn't find the allocation */
  4795. + pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
  4796. + base_id, count);
  4797. + spin_unlock_irq(&alloc->lock);
  4798. +}
  4799. +
  4800. +void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
  4801. +{
  4802. + /* Same as free but no previous allocation checking is needed */
  4803. + _dpa_alloc_free(alloc, base_id, count);
  4804. +}
  4805. +
  4806. +
  4807. +int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
  4808. +{
  4809. + struct alloc_node *i = NULL, *used_node;
  4810. +
  4811. + DPRINT("alloc_reserve(%d,%d)\n", base, num);
  4812. + DUMP(alloc);
  4813. +
  4814. + spin_lock_irq(&alloc->lock);
  4815. +
  4816. + /* Check for the node in the used list.
  4817. + If found, increase it's refcount */
  4818. + list_for_each_entry(i, &alloc->used, list) {
  4819. + if ((i->base == base) && (i->num == num)) {
  4820. + ++i->refcount;
  4821. + spin_unlock_irq(&alloc->lock);
  4822. + return 0;
  4823. + }
  4824. + if ((base >= i->base) && (base < (i->base + i->num))) {
  4825. + /* This is an attempt to reserve a region that was
  4826. + already reserved or alloced with a different
  4827. + base or num */
  4828. + pr_err("Cannot reserve %d - %d, it overlaps with"
  4829. + " existing reservation from %d - %d\n",
  4830. + base, base + num - 1, i->base,
  4831. + i->base + i->num - 1);
  4832. + spin_unlock_irq(&alloc->lock);
  4833. + return -1;
  4834. + }
  4835. + }
  4836. + /* Check to make sure this ID isn't in the free list */
  4837. + list_for_each_entry(i, &alloc->free, list) {
  4838. + if ((base >= i->base) && (base < (i->base + i->num))) {
  4839. + /* yep, the reservation is within this node */
  4840. + pr_err("Cannot reserve %d - %d, it overlaps with"
  4841. + " free range %d - %d and must be alloced\n",
  4842. + base, base + num - 1,
  4843. + i->base, i->base + i->num - 1);
  4844. + spin_unlock_irq(&alloc->lock);
  4845. + return -1;
  4846. + }
  4847. + }
  4848. + /* Add the allocation to the used list with a refcount of 1 */
  4849. + used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
  4850. + if (!used_node) {
  4851. + spin_unlock_irq(&alloc->lock);
  4852. + return -ENOMEM;
  4853. +
  4854. + }
  4855. + used_node->base = base;
  4856. + used_node->num = num;
  4857. + used_node->refcount = 1;
  4858. + used_node->is_alloced = 0;
  4859. + list_add_tail(&used_node->list, &alloc->used);
  4860. + spin_unlock_irq(&alloc->lock);
  4861. + return 0;
  4862. +}
  4863. +
  4864. +
  4865. +int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
  4866. +{
  4867. + struct alloc_node *i = NULL;
  4868. + DPRINT("alloc_pop()\n");
  4869. + DUMP(alloc);
  4870. + spin_lock_irq(&alloc->lock);
  4871. + if (!list_empty(&alloc->free)) {
  4872. + i = list_entry(alloc->free.next, struct alloc_node, list);
  4873. + list_del(&i->list);
  4874. + }
  4875. + spin_unlock_irq(&alloc->lock);
  4876. + DPRINT("returning %d\n", i ? 0 : -ENOMEM);
  4877. + DUMP(alloc);
  4878. + if (!i)
  4879. + return -ENOMEM;
  4880. + *result = i->base;
  4881. + *count = i->num;
  4882. + kfree(i);
  4883. + return 0;
  4884. +}
  4885. +
  4886. +int dpa_alloc_check(struct dpa_alloc *list_head, u32 item)
  4887. +{
  4888. + struct alloc_node *i = NULL;
  4889. + int res = 0;
  4890. + DPRINT("alloc_check()\n");
  4891. + spin_lock_irq(&list_head->lock);
  4892. +
  4893. + list_for_each_entry(i, &list_head->free, list) {
  4894. + if ((item >= i->base) && (item < (i->base + i->num))) {
  4895. + res = 1;
  4896. + break;
  4897. + }
  4898. + }
  4899. + spin_unlock_irq(&list_head->lock);
  4900. + return res;
  4901. +}
  4902. --- /dev/null
  4903. +++ b/drivers/staging/fsl_qbman/dpa_sys.h
  4904. @@ -0,0 +1,259 @@
  4905. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  4906. + *
  4907. + * Redistribution and use in source and binary forms, with or without
  4908. + * modification, are permitted provided that the following conditions are met:
  4909. + * * Redistributions of source code must retain the above copyright
  4910. + * notice, this list of conditions and the following disclaimer.
  4911. + * * Redistributions in binary form must reproduce the above copyright
  4912. + * notice, this list of conditions and the following disclaimer in the
  4913. + * documentation and/or other materials provided with the distribution.
  4914. + * * Neither the name of Freescale Semiconductor nor the
  4915. + * names of its contributors may be used to endorse or promote products
  4916. + * derived from this software without specific prior written permission.
  4917. + *
  4918. + *
  4919. + * ALTERNATIVELY, this software may be distributed under the terms of the
  4920. + * GNU General Public License ("GPL") as published by the Free Software
  4921. + * Foundation, either version 2 of that License or (at your option) any
  4922. + * later version.
  4923. + *
  4924. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  4925. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  4926. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  4927. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  4928. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  4929. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  4930. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  4931. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4932. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  4933. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4934. + */
  4935. +
  4936. +#ifndef DPA_SYS_H
  4937. +#define DPA_SYS_H
  4938. +
  4939. +#include <linux/kernel.h>
  4940. +#include <linux/errno.h>
  4941. +#include <linux/io.h>
  4942. +#include <linux/dma-mapping.h>
  4943. +#include <linux/bootmem.h>
  4944. +#include <linux/slab.h>
  4945. +#include <linux/module.h>
  4946. +#include <linux/init.h>
  4947. +#include <linux/interrupt.h>
  4948. +#include <linux/delay.h>
  4949. +#include <linux/of_platform.h>
  4950. +#include <linux/of_address.h>
  4951. +#include <linux/of_irq.h>
  4952. +#include <linux/kthread.h>
  4953. +#include <linux/memblock.h>
  4954. +#include <linux/completion.h>
  4955. +#include <linux/log2.h>
  4956. +#include <linux/types.h>
  4957. +#include <linux/ioctl.h>
  4958. +#include <linux/miscdevice.h>
  4959. +#include <linux/uaccess.h>
  4960. +#include <linux/debugfs.h>
  4961. +#include <linux/seq_file.h>
  4962. +#include <linux/device.h>
  4963. +#include <linux/uio_driver.h>
  4964. +#include <linux/smp.h>
  4965. +#include <linux/fsl_hypervisor.h>
  4966. +#include <linux/vmalloc.h>
  4967. +#include <linux/ctype.h>
  4968. +#include <linux/math64.h>
  4969. +#include <linux/bitops.h>
  4970. +
  4971. +#include <linux/fsl_usdpaa.h>
  4972. +
  4973. +/* When copying aligned words or shorts, try to avoid memcpy() */
  4974. +#define CONFIG_TRY_BETTER_MEMCPY
  4975. +
  4976. +/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
  4977. +#define DPA_PORTAL_CE 0
  4978. +#define DPA_PORTAL_CI 1
  4979. +
  4980. +/***********************/
  4981. +/* Misc inline assists */
  4982. +/***********************/
  4983. +
  4984. +#if defined CONFIG_PPC32
  4985. +#include "dpa_sys_ppc32.h"
  4986. +#elif defined CONFIG_PPC64
  4987. +#include "dpa_sys_ppc64.h"
  4988. +#elif defined CONFIG_ARM
  4989. +#include "dpa_sys_arm.h"
  4990. +#elif defined CONFIG_ARM64
  4991. +#include "dpa_sys_arm64.h"
  4992. +#endif
  4993. +
  4994. +
  4995. +#ifdef CONFIG_FSL_DPA_CHECKING
  4996. +#define DPA_ASSERT(x) \
  4997. + do { \
  4998. + if (!(x)) { \
  4999. + pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \
  5000. + __stringify_1(x)); \
  5001. + dump_stack(); \
  5002. + panic("assertion failure"); \
  5003. + } \
  5004. + } while (0)
  5005. +#else
  5006. +#define DPA_ASSERT(x)
  5007. +#endif
  5008. +
  5009. +/* memcpy() stuff - when you know alignments in advance */
  5010. +#ifdef CONFIG_TRY_BETTER_MEMCPY
  5011. +static inline void copy_words(void *dest, const void *src, size_t sz)
  5012. +{
  5013. + u32 *__dest = dest;
  5014. + const u32 *__src = src;
  5015. + size_t __sz = sz >> 2;
  5016. + BUG_ON((unsigned long)dest & 0x3);
  5017. + BUG_ON((unsigned long)src & 0x3);
  5018. + BUG_ON(sz & 0x3);
  5019. + while (__sz--)
  5020. + *(__dest++) = *(__src++);
  5021. +}
  5022. +static inline void copy_shorts(void *dest, const void *src, size_t sz)
  5023. +{
  5024. + u16 *__dest = dest;
  5025. + const u16 *__src = src;
  5026. + size_t __sz = sz >> 1;
  5027. + BUG_ON((unsigned long)dest & 0x1);
  5028. + BUG_ON((unsigned long)src & 0x1);
  5029. + BUG_ON(sz & 0x1);
  5030. + while (__sz--)
  5031. + *(__dest++) = *(__src++);
  5032. +}
  5033. +static inline void copy_bytes(void *dest, const void *src, size_t sz)
  5034. +{
  5035. + u8 *__dest = dest;
  5036. + const u8 *__src = src;
  5037. + while (sz--)
  5038. + *(__dest++) = *(__src++);
  5039. +}
  5040. +#else
  5041. +#define copy_words memcpy
  5042. +#define copy_shorts memcpy
  5043. +#define copy_bytes memcpy
  5044. +#endif
  5045. +
  5046. +/************/
  5047. +/* RB-trees */
  5048. +/************/
  5049. +
  5050. +/* We encapsulate RB-trees so that its easier to use non-linux forms in
  5051. + * non-linux systems. This also encapsulates the extra plumbing that linux code
  5052. + * usually provides when using RB-trees. This encapsulation assumes that the
  5053. + * data type held by the tree is u32. */
  5054. +
  5055. +struct dpa_rbtree {
  5056. + struct rb_root root;
  5057. +};
  5058. +#define DPA_RBTREE { .root = RB_ROOT }
  5059. +
  5060. +static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
  5061. +{
  5062. + tree->root = RB_ROOT;
  5063. +}
  5064. +
  5065. +#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
  5066. +static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
  5067. +{ \
  5068. + struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
  5069. + while (*p) { \
  5070. + u32 item; \
  5071. + parent = *p; \
  5072. + item = rb_entry(parent, type, node_field)->val_field; \
  5073. + if (obj->val_field < item) \
  5074. + p = &parent->rb_left; \
  5075. + else if (obj->val_field > item) \
  5076. + p = &parent->rb_right; \
  5077. + else \
  5078. + return -EBUSY; \
  5079. + } \
  5080. + rb_link_node(&obj->node_field, parent, p); \
  5081. + rb_insert_color(&obj->node_field, &tree->root); \
  5082. + return 0; \
  5083. +} \
  5084. +static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
  5085. +{ \
  5086. + rb_erase(&obj->node_field, &tree->root); \
  5087. +} \
  5088. +static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
  5089. +{ \
  5090. + type *ret; \
  5091. + struct rb_node *p = tree->root.rb_node; \
  5092. + while (p) { \
  5093. + ret = rb_entry(p, type, node_field); \
  5094. + if (val < ret->val_field) \
  5095. + p = p->rb_left; \
  5096. + else if (val > ret->val_field) \
  5097. + p = p->rb_right; \
  5098. + else \
  5099. + return ret; \
  5100. + } \
  5101. + return NULL; \
  5102. +}
  5103. +
  5104. +/************/
  5105. +/* Bootargs */
  5106. +/************/
  5107. +
  5108. +/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax
  5109. + * though; a comma-separated list of items, each item being a cpu index and/or a
  5110. + * range of cpu indices, and each item optionally be prefixed by "s" to indicate
  5111. + * that the portal associated with that cpu should be shared. See bman_driver.c
  5112. + * for more specifics. */
  5113. +static int __parse_portals_cpu(const char **s, unsigned int *cpu)
  5114. +{
  5115. + *cpu = 0;
  5116. + if (!isdigit(**s))
  5117. + return -EINVAL;
  5118. + while (isdigit(**s))
  5119. + *cpu = *cpu * 10 + (*((*s)++) - '0');
  5120. + return 0;
  5121. +}
  5122. +static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
  5123. + struct cpumask *want_unshared,
  5124. + const char *argname)
  5125. +{
  5126. + const char *s = str;
  5127. + unsigned int shared, cpu1, cpu2, loop;
  5128. +
  5129. +keep_going:
  5130. + if (*s == 's') {
  5131. + shared = 1;
  5132. + s++;
  5133. + } else
  5134. + shared = 0;
  5135. + if (__parse_portals_cpu(&s, &cpu1))
  5136. + goto err;
  5137. + if (*s == '-') {
  5138. + s++;
  5139. + if (__parse_portals_cpu(&s, &cpu2))
  5140. + goto err;
  5141. + if (cpu2 < cpu1)
  5142. + goto err;
  5143. + } else
  5144. + cpu2 = cpu1;
  5145. + for (loop = cpu1; loop <= cpu2; loop++)
  5146. + cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
  5147. + if (*s == ',') {
  5148. + s++;
  5149. + goto keep_going;
  5150. + } else if ((*s == '\0') || isspace(*s))
  5151. + return 0;
  5152. +err:
  5153. + pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
  5154. + (unsigned long)s - (unsigned long)str);
  5155. + return -EINVAL;
  5156. +}
  5157. +
  5158. +/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */
  5159. +int usdpaa_get_portal_config(struct file *filp, void *cinh,
  5160. + enum usdpaa_portal_type ptype, unsigned int *irq,
  5161. + void **iir_reg);
  5162. +
  5163. +#endif /* DPA_SYS_H */
  5164. --- /dev/null
  5165. +++ b/drivers/staging/fsl_qbman/dpa_sys_arm.h
  5166. @@ -0,0 +1,95 @@
  5167. +/* Copyright 2016 Freescale Semiconductor, Inc.
  5168. + *
  5169. + * Redistribution and use in source and binary forms, with or without
  5170. + * modification, are permitted provided that the following conditions are met:
  5171. + * * Redistributions of source code must retain the above copyright
  5172. + * notice, this list of conditions and the following disclaimer.
  5173. + * * Redistributions in binary form must reproduce the above copyright
  5174. + * notice, this list of conditions and the following disclaimer in the
  5175. + * documentation and/or other materials provided with the distribution.
  5176. + * * Neither the name of Freescale Semiconductor nor the
  5177. + * names of its contributors may be used to endorse or promote products
  5178. + * derived from this software without specific prior written permission.
  5179. + *
  5180. + *
  5181. + * ALTERNATIVELY, this software may be distributed under the terms of the
  5182. + * GNU General Public License ("GPL") as published by the Free Software
  5183. + * Foundation, either version 2 of that License or (at your option) any
  5184. + * later version.
  5185. + *
  5186. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  5187. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  5188. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  5189. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  5190. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  5191. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  5192. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  5193. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5194. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  5195. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5196. + */
  5197. +
  5198. +#ifndef DPA_SYS_ARM_H
  5199. +#define DPA_SYS_ARM_H
  5200. +
  5201. +#include <asm/cacheflush.h>
  5202. +#include <asm/barrier.h>
  5203. +
  5204. +/* Implementation of ARM specific routines */
  5205. +
  5206. +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
  5207. + * barriers and that dcb*() won't fall victim to compiler or execution
  5208. + * reordering with respect to other code/instructions that manipulate the same
  5209. + * cacheline. */
  5210. +#define hwsync() { asm volatile("dmb st" : : : "memory"); }
  5211. +#define lwsync() { asm volatile("dmb st" : : : "memory"); }
  5212. +#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); }
  5213. +#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); }
  5214. +#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); }
  5215. +#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); }
  5216. +
  5217. +#define dcbz_64(p) { memset(p, 0, sizeof(*p)); }
  5218. +
  5219. +#define dcbf_64(p) \
  5220. + do { \
  5221. + dcbf((u32)p); \
  5222. + } while (0)
  5223. +/* Commonly used combo */
  5224. +#define dcbit_ro(p) \
  5225. + do { \
  5226. + dcbi((u32)p); \
  5227. + dcbt_ro((u32)p); \
  5228. + } while (0)
  5229. +
  5230. +static inline u64 mfatb(void)
  5231. +{
  5232. + return get_cycles();
  5233. +}
  5234. +
  5235. +static inline u32 in_be32(volatile void *addr)
  5236. +{
  5237. + return be32_to_cpu(*((volatile u32 *) addr));
  5238. +}
  5239. +
  5240. +static inline void out_be32(void *addr, u32 val)
  5241. +{
  5242. + *((u32 *) addr) = cpu_to_be32(val);
  5243. +}
  5244. +
  5245. +
  5246. +static inline void set_bits(unsigned long mask, volatile unsigned long *p)
  5247. +{
  5248. + *p |= mask;
  5249. +}
  5250. +static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
  5251. +{
  5252. + *p &= ~mask;
  5253. +}
  5254. +
  5255. +static inline void flush_dcache_range(unsigned long start, unsigned long stop)
  5256. +{
  5257. + __cpuc_flush_dcache_area((void *) start, stop - start);
  5258. +}
  5259. +
  5260. +#define hard_smp_processor_id() raw_smp_processor_id()
  5261. +#endif
  5262. --- /dev/null
  5263. +++ b/drivers/staging/fsl_qbman/dpa_sys_arm64.h
  5264. @@ -0,0 +1,102 @@
  5265. +/* Copyright 2014 Freescale Semiconductor, Inc.
  5266. + *
  5267. + * Redistribution and use in source and binary forms, with or without
  5268. + * modification, are permitted provided that the following conditions are met:
  5269. + * * Redistributions of source code must retain the above copyright
  5270. + * notice, this list of conditions and the following disclaimer.
  5271. + * * Redistributions in binary form must reproduce the above copyright
  5272. + * notice, this list of conditions and the following disclaimer in the
  5273. + * documentation and/or other materials provided with the distribution.
  5274. + * * Neither the name of Freescale Semiconductor nor the
  5275. + * names of its contributors may be used to endorse or promote products
  5276. + * derived from this software without specific prior written permission.
  5277. + *
  5278. + *
  5279. + * ALTERNATIVELY, this software may be distributed under the terms of the
  5280. + * GNU General Public License ("GPL") as published by the Free Software
  5281. + * Foundation, either version 2 of that License or (at your option) any
  5282. + * later version.
  5283. + *
  5284. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  5285. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  5286. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  5287. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  5288. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  5289. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  5290. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  5291. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5292. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  5293. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5294. + */
  5295. +
  5296. +#ifndef DPA_SYS_ARM64_H
  5297. +#define DPA_SYS_ARM64_H
  5298. +
  5299. +#include <asm/cacheflush.h>
  5300. +#include <asm/barrier.h>
  5301. +
  5302. +/* Implementation of ARM 64 bit specific routines */
  5303. +
  5304. +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
  5305. + * barriers and that dcb*() won't fall victim to compiler or execution
  5306. + * reordering with respect to other code/instructions that manipulate the same
  5307. + * cacheline. */
  5308. +#define hwsync() { asm volatile("dmb st" : : : "memory"); }
  5309. +#define lwsync() { asm volatile("dmb st" : : : "memory"); }
  5310. +#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
  5311. +#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); }
  5312. +#define dcbt_rw(p) { asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); }
  5313. +#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
  5314. +#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
  5315. +
  5316. +#define dcbz_64(p) \
  5317. + do { \
  5318. + dcbz(p); \
  5319. + } while (0)
  5320. +
  5321. +#define dcbf_64(p) \
  5322. + do { \
  5323. + dcbf(p); \
  5324. + } while (0)
  5325. +/* Commonly used combo */
  5326. +#define dcbit_ro(p) \
  5327. + do { \
  5328. + dcbi(p); \
  5329. + dcbt_ro(p); \
  5330. + } while (0)
  5331. +
  5332. +static inline u64 mfatb(void)
  5333. +{
  5334. + return get_cycles();
  5335. +}
  5336. +
  5337. +static inline u32 in_be32(volatile void *addr)
  5338. +{
  5339. + return be32_to_cpu(*((volatile u32 *) addr));
  5340. +}
  5341. +
  5342. +static inline void out_be32(void *addr, u32 val)
  5343. +{
  5344. + *((u32 *) addr) = cpu_to_be32(val);
  5345. +}
  5346. +
  5347. +
  5348. +static inline void set_bits(unsigned long mask, volatile unsigned long *p)
  5349. +{
  5350. + *p |= mask;
  5351. +}
  5352. +static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
  5353. +{
  5354. + *p &= ~mask;
  5355. +}
  5356. +
  5357. +static inline void flush_dcache_range(unsigned long start, unsigned long stop)
  5358. +{
  5359. + __flush_dcache_area((void *) start, stop - start);
  5360. +}
  5361. +
  5362. +#define hard_smp_processor_id() raw_smp_processor_id()
  5363. +
  5364. +
  5365. +
  5366. +#endif
  5367. --- /dev/null
  5368. +++ b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h
  5369. @@ -0,0 +1,70 @@
  5370. +/* Copyright 2014 Freescale Semiconductor, Inc.
  5371. + *
  5372. + * Redistribution and use in source and binary forms, with or without
  5373. + * modification, are permitted provided that the following conditions are met:
  5374. + * * Redistributions of source code must retain the above copyright
  5375. + * notice, this list of conditions and the following disclaimer.
  5376. + * * Redistributions in binary form must reproduce the above copyright
  5377. + * notice, this list of conditions and the following disclaimer in the
  5378. + * documentation and/or other materials provided with the distribution.
  5379. + * * Neither the name of Freescale Semiconductor nor the
  5380. + * names of its contributors may be used to endorse or promote products
  5381. + * derived from this software without specific prior written permission.
  5382. + *
  5383. + *
  5384. + * ALTERNATIVELY, this software may be distributed under the terms of the
  5385. + * GNU General Public License ("GPL") as published by the Free Software
  5386. + * Foundation, either version 2 of that License or (at your option) any
  5387. + * later version.
  5388. + *
  5389. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  5390. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  5391. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  5392. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  5393. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  5394. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  5395. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  5396. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5397. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  5398. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5399. + */
  5400. +
  5401. +#ifndef DPA_SYS_PPC32_H
  5402. +#define DPA_SYS_PPC32_H
  5403. +
  5404. +/* Implementation of PowerPC 32 bit specific routines */
  5405. +
  5406. +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
  5407. + * barriers and that dcb*() won't fall victim to compiler or execution
  5408. + * reordering with respect to other code/instructions that manipulate the same
  5409. + * cacheline. */
  5410. +#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
  5411. +#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
  5412. +#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
  5413. +#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
  5414. +#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
  5415. +#define dcbi(p) dcbf(p)
  5416. +
  5417. +#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
  5418. +#define dcbz_64(p) dcbzl(p)
  5419. +#define dcbf_64(p) dcbf(p)
  5420. +
  5421. +/* Commonly used combo */
  5422. +#define dcbit_ro(p) \
  5423. + do { \
  5424. + dcbi(p); \
  5425. + dcbt_ro(p); \
  5426. + } while (0)
  5427. +
  5428. +static inline u64 mfatb(void)
  5429. +{
  5430. + u32 hi, lo, chk;
  5431. + do {
  5432. + hi = mfspr(SPRN_ATBU);
  5433. + lo = mfspr(SPRN_ATBL);
  5434. + chk = mfspr(SPRN_ATBU);
  5435. + } while (unlikely(hi != chk));
  5436. + return ((u64)hi << 32) | (u64)lo;
  5437. +}
  5438. +
  5439. +#endif
  5440. --- /dev/null
  5441. +++ b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h
  5442. @@ -0,0 +1,79 @@
  5443. +/* Copyright 2014 Freescale Semiconductor, Inc.
  5444. + *
  5445. + * Redistribution and use in source and binary forms, with or without
  5446. + * modification, are permitted provided that the following conditions are met:
  5447. + * * Redistributions of source code must retain the above copyright
  5448. + * notice, this list of conditions and the following disclaimer.
  5449. + * * Redistributions in binary form must reproduce the above copyright
  5450. + * notice, this list of conditions and the following disclaimer in the
  5451. + * documentation and/or other materials provided with the distribution.
  5452. + * * Neither the name of Freescale Semiconductor nor the
  5453. + * names of its contributors may be used to endorse or promote products
  5454. + * derived from this software without specific prior written permission.
  5455. + *
  5456. + *
  5457. + * ALTERNATIVELY, this software may be distributed under the terms of the
  5458. + * GNU General Public License ("GPL") as published by the Free Software
  5459. + * Foundation, either version 2 of that License or (at your option) any
  5460. + * later version.
  5461. + *
  5462. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  5463. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  5464. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  5465. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  5466. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  5467. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  5468. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  5469. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5470. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  5471. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5472. + */
  5473. +
  5474. +#ifndef DPA_SYS_PPC64_H
  5475. +#define DPA_SYS_PPC64_H
  5476. +
  5477. +/* Implementation of PowerPC 64 bit specific routines */
  5478. +
  5479. +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
  5480. + * barriers and that dcb*() won't fall victim to compiler or execution
  5481. + * reordering with respect to other code/instructions that manipulate the same
  5482. + * cacheline. */
  5483. +#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
  5484. +#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
  5485. +#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
  5486. +#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
  5487. +#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
  5488. +#define dcbi(p) dcbf(p)
  5489. +
  5490. +#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
  5491. +#define dcbz_64(p) \
  5492. + do { \
  5493. + dcbz((void*)p + 32); \
  5494. + dcbz(p); \
  5495. + } while (0)
  5496. +#define dcbf_64(p) \
  5497. + do { \
  5498. + dcbf((void*)p + 32); \
  5499. + dcbf(p); \
  5500. + } while (0)
  5501. +/* Commonly used combo */
  5502. +#define dcbit_ro(p) \
  5503. + do { \
  5504. + dcbi(p); \
  5505. + dcbi((void*)p + 32); \
  5506. + dcbt_ro(p); \
  5507. + dcbt_ro((void*)p + 32); \
  5508. + } while (0)
  5509. +
  5510. +static inline u64 mfatb(void)
  5511. +{
  5512. + u32 hi, lo, chk;
  5513. + do {
  5514. + hi = mfspr(SPRN_ATBU);
  5515. + lo = mfspr(SPRN_ATBL);
  5516. + chk = mfspr(SPRN_ATBU);
  5517. + } while (unlikely(hi != chk));
  5518. + return ((u64)hi << 32) | (u64)lo;
  5519. +}
  5520. +
  5521. +#endif
  5522. --- /dev/null
  5523. +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
  5524. @@ -0,0 +1,1982 @@
  5525. +/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
  5526. + * Authors: Andy Fleming <afleming@freescale.com>
  5527. + * Timur Tabi <timur@freescale.com>
  5528. + * Geoff Thorpe <Geoff.Thorpe@freescale.com>
  5529. + *
  5530. + * This file is licensed under the terms of the GNU General Public License
  5531. + * version 2. This program is licensed "as is" without any warranty of any
  5532. + * kind, whether express or implied.
  5533. + */
  5534. +
  5535. +
  5536. +#include <linux/miscdevice.h>
  5537. +#include <linux/fs.h>
  5538. +#include <linux/cdev.h>
  5539. +#include <linux/mm.h>
  5540. +#include <linux/of.h>
  5541. +#include <linux/memblock.h>
  5542. +#include <linux/slab.h>
  5543. +#include <linux/mman.h>
  5544. +#include <linux/of_reserved_mem.h>
  5545. +
  5546. +#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
  5547. +#include <mm/mmu_decl.h>
  5548. +#endif
  5549. +
  5550. +#include "dpa_sys.h"
  5551. +#include <linux/fsl_usdpaa.h>
  5552. +#include "bman_low.h"
  5553. +#include "qman_low.h"
  5554. +
  5555. +/* Physical address range of the memory reservation, exported for mm/mem.c */
  5556. +static u64 phys_start;
  5557. +static u64 phys_size;
  5558. +static u64 arg_phys_size;
  5559. +
  5560. +/* PFN versions of the above */
  5561. +static unsigned long pfn_start;
  5562. +static unsigned long pfn_size;
  5563. +
  5564. +/* Memory reservations are manipulated under this spinlock (which is why 'refs'
  5565. + * isn't atomic_t). */
  5566. +static DEFINE_SPINLOCK(mem_lock);
  5567. +
  5568. +/* The range of TLB1 indices */
  5569. +static unsigned int first_tlb;
  5570. +static unsigned int num_tlb = 1;
  5571. +static unsigned int current_tlb; /* loops around for fault handling */
  5572. +
  5573. +/* Memory reservation is represented as a list of 'mem_fragment's, some of which
  5574. + * may be mapped. Unmapped fragments are always merged where possible. */
  5575. +static LIST_HEAD(mem_list);
  5576. +
  5577. +struct mem_mapping;
  5578. +
  5579. +/* Memory fragments are in 'mem_list'. */
  5580. +struct mem_fragment {
  5581. + u64 base;
  5582. + u64 len;
  5583. + unsigned long pfn_base; /* PFN version of 'base' */
  5584. + unsigned long pfn_len; /* PFN version of 'len' */
  5585. + unsigned int refs; /* zero if unmapped */
  5586. + u64 root_len; /* Size of the orignal fragment */
  5587. + unsigned long root_pfn; /* PFN of the orignal fragment */
  5588. + struct list_head list;
  5589. + /* if mapped, flags+name captured at creation time */
  5590. + u32 flags;
  5591. + char name[USDPAA_DMA_NAME_MAX];
  5592. + u64 map_len;
  5593. + /* support multi-process locks per-memory-fragment. */
  5594. + int has_locking;
  5595. + wait_queue_head_t wq;
  5596. + struct mem_mapping *owner;
  5597. +};
  5598. +
  5599. +/* Mappings of memory fragments in 'struct ctx'. These are created from
  5600. + * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a
  5601. + * mmap(). */
  5602. +struct mem_mapping {
  5603. + struct mem_fragment *root_frag;
  5604. + u32 frag_count;
  5605. + u64 total_size;
  5606. + struct list_head list;
  5607. + int refs;
  5608. + void *virt_addr;
  5609. +};
  5610. +
  5611. +struct portal_mapping {
  5612. + struct usdpaa_ioctl_portal_map user;
  5613. + union {
  5614. + struct qm_portal_config *qportal;
  5615. + struct bm_portal_config *bportal;
  5616. + };
  5617. + /* Declare space for the portals in case the process
  5618. + exits unexpectedly and needs to be cleaned by the kernel */
  5619. + union {
  5620. + struct qm_portal qman_portal_low;
  5621. + struct bm_portal bman_portal_low;
  5622. + };
  5623. + struct list_head list;
  5624. + struct resource *phys;
  5625. + struct iommu_domain *iommu_domain;
  5626. +};
  5627. +
  5628. +/* Track the DPAA resources the process is using */
  5629. +struct active_resource {
  5630. + struct list_head list;
  5631. + u32 id;
  5632. + u32 num;
  5633. + unsigned int refcount;
  5634. +};
  5635. +
  5636. +/* Per-FD state (which should also be per-process but we don't enforce that) */
  5637. +struct ctx {
  5638. + /* Lock to protect the context */
  5639. + spinlock_t lock;
  5640. + /* Allocated resources get put here for accounting */
  5641. + struct list_head resources[usdpaa_id_max];
  5642. + /* list of DMA maps */
  5643. + struct list_head maps;
  5644. + /* list of portal maps */
  5645. + struct list_head portals;
  5646. +};
  5647. +
  5648. +/* Different resource classes */
  5649. +static const struct alloc_backend {
  5650. + enum usdpaa_id_type id_type;
  5651. + int (*alloc)(u32 *, u32, u32, int);
  5652. + void (*release)(u32 base, unsigned int count);
  5653. + int (*reserve)(u32 base, unsigned int count);
  5654. + const char *acronym;
  5655. +} alloc_backends[] = {
  5656. + {
  5657. + .id_type = usdpaa_id_fqid,
  5658. + .alloc = qman_alloc_fqid_range,
  5659. + .release = qman_release_fqid_range,
  5660. + .reserve = qman_reserve_fqid_range,
  5661. + .acronym = "FQID"
  5662. + },
  5663. + {
  5664. + .id_type = usdpaa_id_bpid,
  5665. + .alloc = bman_alloc_bpid_range,
  5666. + .release = bman_release_bpid_range,
  5667. + .reserve = bman_reserve_bpid_range,
  5668. + .acronym = "BPID"
  5669. + },
  5670. + {
  5671. + .id_type = usdpaa_id_qpool,
  5672. + .alloc = qman_alloc_pool_range,
  5673. + .release = qman_release_pool_range,
  5674. + .reserve = qman_reserve_pool_range,
  5675. + .acronym = "QPOOL"
  5676. + },
  5677. + {
  5678. + .id_type = usdpaa_id_cgrid,
  5679. + .alloc = qman_alloc_cgrid_range,
  5680. + .release = qman_release_cgrid_range,
  5681. + .acronym = "CGRID"
  5682. + },
  5683. + {
  5684. + .id_type = usdpaa_id_ceetm0_lfqid,
  5685. + .alloc = qman_alloc_ceetm0_lfqid_range,
  5686. + .release = qman_release_ceetm0_lfqid_range,
  5687. + .acronym = "CEETM0_LFQID"
  5688. + },
  5689. + {
  5690. + .id_type = usdpaa_id_ceetm0_channelid,
  5691. + .alloc = qman_alloc_ceetm0_channel_range,
  5692. + .release = qman_release_ceetm0_channel_range,
  5693. + .acronym = "CEETM0_LFQID"
  5694. + },
  5695. + {
  5696. + .id_type = usdpaa_id_ceetm1_lfqid,
  5697. + .alloc = qman_alloc_ceetm1_lfqid_range,
  5698. + .release = qman_release_ceetm1_lfqid_range,
  5699. + .acronym = "CEETM1_LFQID"
  5700. + },
  5701. + {
  5702. + .id_type = usdpaa_id_ceetm1_channelid,
  5703. + .alloc = qman_alloc_ceetm1_channel_range,
  5704. + .release = qman_release_ceetm1_channel_range,
  5705. + .acronym = "CEETM1_LFQID"
  5706. + },
  5707. + {
  5708. + /* This terminates the array */
  5709. + .id_type = usdpaa_id_max
  5710. + }
  5711. +};
  5712. +
  5713. +/* Determines the largest acceptable page size for a given size
  5714. + The sizes are determined by what the TLB1 acceptable page sizes are */
  5715. +static u32 largest_page_size(u32 size)
  5716. +{
  5717. + int shift = 30; /* Start at 1G size */
  5718. + if (size < 4096)
  5719. + return 0;
  5720. + do {
  5721. + if (size >= (1<<shift))
  5722. + return 1<<shift;
  5723. + shift -= 2;
  5724. + } while (shift >= 12); /* Up to 4k */
  5725. + return 0;
  5726. +}
  5727. +
  5728. +/* Determine if value is power of 4 */
  5729. +static inline bool is_power_of_4(u64 x)
  5730. +{
  5731. + if (x == 0 || ((x & (x - 1)) != 0))
  5732. + return false;
  5733. + return !!(x & 0x5555555555555555ull);
  5734. +}
  5735. +
  5736. +/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This
  5737. + * splits the fragment into 4 and returns the upper-most. (The caller can loop
  5738. + * until it has a suitable fragment size.) */
  5739. +static struct mem_fragment *split_frag(struct mem_fragment *frag)
  5740. +{
  5741. + struct mem_fragment *x[3];
  5742. +
  5743. + x[0] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
  5744. + x[1] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
  5745. + x[2] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
  5746. + if (!x[0] || !x[1] || !x[2]) {
  5747. + kfree(x[0]);
  5748. + kfree(x[1]);
  5749. + kfree(x[2]);
  5750. + return NULL;
  5751. + }
  5752. + BUG_ON(frag->refs);
  5753. + frag->len >>= 2;
  5754. + frag->pfn_len >>= 2;
  5755. + x[0]->base = frag->base + frag->len;
  5756. + x[1]->base = x[0]->base + frag->len;
  5757. + x[2]->base = x[1]->base + frag->len;
  5758. + x[0]->len = x[1]->len = x[2]->len = frag->len;
  5759. + x[0]->pfn_base = frag->pfn_base + frag->pfn_len;
  5760. + x[1]->pfn_base = x[0]->pfn_base + frag->pfn_len;
  5761. + x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len;
  5762. + x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len;
  5763. + x[0]->refs = x[1]->refs = x[2]->refs = 0;
  5764. + x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len;
  5765. + x[0]->root_pfn = x[1]->root_pfn = x[2]->root_pfn = frag->root_pfn;
  5766. + x[0]->name[0] = x[1]->name[0] = x[2]->name[0] = 0;
  5767. + list_add_tail(&x[0]->list, &frag->list);
  5768. + list_add_tail(&x[1]->list, &x[0]->list);
  5769. + list_add_tail(&x[2]->list, &x[1]->list);
  5770. + return x[2];
  5771. +}
  5772. +
  5773. +static __maybe_unused void dump_frags(void)
  5774. +{
  5775. + struct mem_fragment *frag;
  5776. + int i = 0;
  5777. + list_for_each_entry(frag, &mem_list, list) {
  5778. + pr_info("FRAG %d: base 0x%llx pfn_base 0x%lx len 0x%llx root_len 0x%llx root_pfn 0x%lx refs %d name %s\n",
  5779. + i, frag->base, frag->pfn_base,
  5780. + frag->len, frag->root_len, frag->root_pfn,
  5781. + frag->refs, frag->name);
  5782. + ++i;
  5783. + }
  5784. +}
  5785. +
  5786. +/* Walk the list of fragments and adjoin neighbouring segments if possible */
  5787. +static void compress_frags(void)
  5788. +{
  5789. + /* Walk the fragment list and combine fragments */
  5790. + struct mem_fragment *frag, *nxtfrag;
  5791. + u64 len = 0;
  5792. +
  5793. + int i, numfrags;
  5794. +
  5795. +
  5796. + frag = list_entry(mem_list.next, struct mem_fragment, list);
  5797. +
  5798. + while (&frag->list != &mem_list) {
  5799. + /* Must combine consecutive fragemenst with
  5800. + same root_pfn such that they are power of 4 */
  5801. + if (frag->refs != 0) {
  5802. + frag = list_entry(frag->list.next,
  5803. + struct mem_fragment, list);
  5804. + continue; /* Not this window */
  5805. + }
  5806. + len = frag->len;
  5807. + numfrags = 0;
  5808. + nxtfrag = list_entry(frag->list.next,
  5809. + struct mem_fragment, list);
  5810. + while (true) {
  5811. + if (&nxtfrag->list == &mem_list) {
  5812. + numfrags = 0;
  5813. + break; /* End of list */
  5814. + }
  5815. + if (nxtfrag->refs) {
  5816. + numfrags = 0;
  5817. + break; /* In use still */
  5818. + }
  5819. + if (nxtfrag->root_pfn != frag->root_pfn) {
  5820. + numfrags = 0;
  5821. + break; /* Crosses root fragment boundary */
  5822. + }
  5823. + len += nxtfrag->len;
  5824. + numfrags++;
  5825. + if (is_power_of_4(len)) {
  5826. + /* These fragments can be combined */
  5827. + break;
  5828. + }
  5829. + nxtfrag = list_entry(nxtfrag->list.next,
  5830. + struct mem_fragment, list);
  5831. + }
  5832. + if (numfrags == 0) {
  5833. + frag = list_entry(frag->list.next,
  5834. + struct mem_fragment, list);
  5835. + continue; /* try the next window */
  5836. + }
  5837. + for (i = 0; i < numfrags; i++) {
  5838. + struct mem_fragment *todel =
  5839. + list_entry(nxtfrag->list.prev,
  5840. + struct mem_fragment, list);
  5841. + nxtfrag->len += todel->len;
  5842. + nxtfrag->pfn_len += todel->pfn_len;
  5843. + list_del(&todel->list);
  5844. + }
  5845. + /* Re evaluate the list, things may merge now */
  5846. + frag = list_entry(mem_list.next, struct mem_fragment, list);
  5847. + }
  5848. +}
  5849. +
  5850. +/* Hook from arch/powerpc/mm/mem.c */
  5851. +int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size)
  5852. +{
  5853. + struct mem_fragment *frag;
  5854. + int idx = -1;
  5855. + if ((pfn < pfn_start) || (pfn >= (pfn_start + pfn_size)))
  5856. + return -1;
  5857. + /* It's in-range, we need to find the fragment */
  5858. + spin_lock(&mem_lock);
  5859. + list_for_each_entry(frag, &mem_list, list) {
  5860. + if ((pfn >= frag->pfn_base) && (pfn < (frag->pfn_base +
  5861. + frag->pfn_len))) {
  5862. + *phys_addr = frag->base;
  5863. + *size = frag->len;
  5864. + idx = current_tlb++;
  5865. + if (current_tlb >= (first_tlb + num_tlb))
  5866. + current_tlb = first_tlb;
  5867. + break;
  5868. + }
  5869. + }
  5870. + spin_unlock(&mem_lock);
  5871. + return idx;
  5872. +}
  5873. +
  5874. +static int usdpaa_open(struct inode *inode, struct file *filp)
  5875. +{
  5876. + const struct alloc_backend *backend = &alloc_backends[0];
  5877. + struct ctx *ctx = kmalloc(sizeof(struct ctx), GFP_KERNEL);
  5878. + if (!ctx)
  5879. + return -ENOMEM;
  5880. + filp->private_data = ctx;
  5881. +
  5882. + while (backend->id_type != usdpaa_id_max) {
  5883. + INIT_LIST_HEAD(&ctx->resources[backend->id_type]);
  5884. + backend++;
  5885. + }
  5886. +
  5887. + INIT_LIST_HEAD(&ctx->maps);
  5888. + INIT_LIST_HEAD(&ctx->portals);
  5889. + spin_lock_init(&ctx->lock);
  5890. +
  5891. + //filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi;
  5892. +
  5893. + return 0;
  5894. +}
  5895. +
  5896. +#define DQRR_MAXFILL 15
  5897. +
  5898. +/* Reset a QMan portal to its default state */
  5899. +static int init_qm_portal(struct qm_portal_config *config,
  5900. + struct qm_portal *portal)
  5901. +{
  5902. + const struct qm_dqrr_entry *dqrr = NULL;
  5903. + int i;
  5904. +
  5905. + portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
  5906. + portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
  5907. +
  5908. + /* Make sure interrupts are inhibited */
  5909. + qm_out(IIR, 1);
  5910. +
  5911. + /* Initialize the DQRR. This will stop any dequeue
  5912. + commands that are in progress */
  5913. + if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb,
  5914. + qm_dqrr_cdc, DQRR_MAXFILL)) {
  5915. + pr_err("qm_dqrr_init() failed when trying to"
  5916. + " recover portal, portal will be leaked\n");
  5917. + return 1;
  5918. + }
  5919. +
  5920. + /* Discard any entries on the DQRR */
  5921. + /* If we consume the ring twice something is wrong */
  5922. + for (i = 0; i < DQRR_MAXFILL * 2; i++) {
  5923. + qm_dqrr_pvb_update(portal);
  5924. + dqrr = qm_dqrr_current(portal);
  5925. + if (!dqrr)
  5926. + break;
  5927. + qm_dqrr_cdc_consume_1ptr(portal, dqrr, 0);
  5928. + qm_dqrr_pvb_update(portal);
  5929. + qm_dqrr_next(portal);
  5930. + }
  5931. + /* Initialize the EQCR */
  5932. + if (qm_eqcr_init(portal, qm_eqcr_pvb,
  5933. + qm_eqcr_get_ci_stashing(portal), 1)) {
  5934. + pr_err("Qman EQCR initialisation failed\n");
  5935. + return 1;
  5936. + }
  5937. + /* initialize the MR */
  5938. + if (qm_mr_init(portal, qm_mr_pvb, qm_mr_cci)) {
  5939. + pr_err("Qman MR initialisation failed\n");
  5940. + return 1;
  5941. + }
  5942. + qm_mr_pvb_update(portal);
  5943. + while (qm_mr_current(portal)) {
  5944. + qm_mr_next(portal);
  5945. + qm_mr_cci_consume_to_current(portal);
  5946. + qm_mr_pvb_update(portal);
  5947. + }
  5948. +
  5949. + if (qm_mc_init(portal)) {
  5950. + pr_err("Qman MC initialisation failed\n");
  5951. + return 1;
  5952. + }
  5953. + return 0;
  5954. +}
  5955. +
  5956. +static int init_bm_portal(struct bm_portal_config *config,
  5957. + struct bm_portal *portal)
  5958. +{
  5959. + portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
  5960. + portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
  5961. +
  5962. + if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) {
  5963. + pr_err("Bman RCR initialisation failed\n");
  5964. + return 1;
  5965. + }
  5966. + if (bm_mc_init(portal)) {
  5967. + pr_err("Bman MC initialisation failed\n");
  5968. + return 1;
  5969. + }
  5970. + return 0;
  5971. +}
  5972. +
  5973. +/* Function that will scan all FQ's in the system. For each FQ that is not
  5974. + OOS it will call the check_channel helper to determine if the FQ should
  5975. + be torn down. If the check_channel helper returns true the FQ will be
  5976. + transitioned to the OOS state */
  5977. +static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
  5978. + bool (*check_channel)(void*, u32))
  5979. +{
  5980. + u32 fq_id = 0;
  5981. + while (1) {
  5982. + struct qm_mc_command *mcc;
  5983. + struct qm_mc_result *mcr;
  5984. + u8 state;
  5985. + u32 channel;
  5986. +
  5987. + /* Determine the channel for the FQID */
  5988. + mcc = qm_mc_start(portal);
  5989. + mcc->queryfq.fqid = fq_id;
  5990. + qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ);
  5991. + while (!(mcr = qm_mc_result(portal)))
  5992. + cpu_relax();
  5993. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
  5994. + == QM_MCR_VERB_QUERYFQ);
  5995. + if (mcr->result != QM_MCR_RESULT_OK)
  5996. + break; /* End of valid FQIDs */
  5997. +
  5998. + channel = mcr->queryfq.fqd.dest.channel;
  5999. + /* Determine the state of the FQID */
  6000. + mcc = qm_mc_start(portal);
  6001. + mcc->queryfq_np.fqid = fq_id;
  6002. + qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP);
  6003. + while (!(mcr = qm_mc_result(portal)))
  6004. + cpu_relax();
  6005. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
  6006. + == QM_MCR_VERB_QUERYFQ_NP);
  6007. + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
  6008. + if (state == QM_MCR_NP_STATE_OOS)
  6009. + /* Already OOS, no need to do anymore checks */
  6010. + goto next;
  6011. +
  6012. + if (check_channel(ctx, channel))
  6013. + qm_shutdown_fq(&portal, 1, fq_id);
  6014. + next:
  6015. + ++fq_id;
  6016. + }
  6017. + return 0;
  6018. +}
  6019. +
  6020. +static bool check_channel_device(void *_ctx, u32 channel)
  6021. +{
  6022. + struct ctx *ctx = _ctx;
  6023. + struct portal_mapping *portal, *tmpportal;
  6024. + struct active_resource *res;
  6025. +
  6026. + /* See if the FQ is destined for one of the portals we're cleaning up */
  6027. + list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
  6028. + if (portal->user.type == usdpaa_portal_qman) {
  6029. + if (portal->qportal->public_cfg.channel == channel) {
  6030. + /* This FQs destination is a portal
  6031. + we're cleaning, send a retire */
  6032. + return true;
  6033. + }
  6034. + }
  6035. + }
  6036. +
  6037. + /* Check the pool channels that will be released as well */
  6038. + list_for_each_entry(res, &ctx->resources[usdpaa_id_qpool], list) {
  6039. + if ((res->id >= channel) &&
  6040. + ((res->id + res->num - 1) <= channel))
  6041. + return true;
  6042. + }
  6043. + return false;
  6044. +}
  6045. +
  6046. +static bool check_portal_channel(void *ctx, u32 channel)
  6047. +{
  6048. + u32 portal_channel = *(u32 *)ctx;
  6049. + if (portal_channel == channel) {
  6050. + /* This FQs destination is a portal
  6051. + we're cleaning, send a retire */
  6052. + return true;
  6053. + }
  6054. + return false;
  6055. +}
  6056. +
  6057. +
  6058. +
  6059. +
  6060. +static int usdpaa_release(struct inode *inode, struct file *filp)
  6061. +{
  6062. + struct ctx *ctx = filp->private_data;
  6063. + struct mem_mapping *map, *tmpmap;
  6064. + struct portal_mapping *portal, *tmpportal;
  6065. + const struct alloc_backend *backend = &alloc_backends[0];
  6066. + struct active_resource *res;
  6067. + struct qm_portal *qm_cleanup_portal = NULL;
  6068. + struct bm_portal *bm_cleanup_portal = NULL;
  6069. + struct qm_portal_config *qm_alloced_portal = NULL;
  6070. + struct bm_portal_config *bm_alloced_portal = NULL;
  6071. +
  6072. + struct qm_portal *portal_array[qman_portal_max];
  6073. + int portal_count = 0;
  6074. +
  6075. + /* Ensure the release operation cannot be migrated to another
  6076. + CPU as CPU specific variables may be needed during cleanup */
  6077. +#ifdef CONFIG_PREEMPT_RT_FULL
  6078. + migrate_disable();
  6079. +#endif
  6080. + /* The following logic is used to recover resources that were not
  6081. + correctly released by the process that is closing the FD.
  6082. + Step 1: syncronize the HW with the qm_portal/bm_portal structures
  6083. + in the kernel
  6084. + */
  6085. +
  6086. + list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
  6087. + /* Try to recover any portals that weren't shut down */
  6088. + if (portal->user.type == usdpaa_portal_qman) {
  6089. + portal_array[portal_count] = &portal->qman_portal_low;
  6090. + ++portal_count;
  6091. + init_qm_portal(portal->qportal,
  6092. + &portal->qman_portal_low);
  6093. + if (!qm_cleanup_portal) {
  6094. + qm_cleanup_portal = &portal->qman_portal_low;
  6095. + } else {
  6096. + /* Clean FQs on the dedicated channel */
  6097. + u32 chan = portal->qportal->public_cfg.channel;
  6098. + qm_check_and_destroy_fqs(
  6099. + &portal->qman_portal_low, &chan,
  6100. + check_portal_channel);
  6101. + }
  6102. + } else {
  6103. + /* BMAN */
  6104. + init_bm_portal(portal->bportal,
  6105. + &portal->bman_portal_low);
  6106. + if (!bm_cleanup_portal)
  6107. + bm_cleanup_portal = &portal->bman_portal_low;
  6108. + }
  6109. + }
  6110. + /* If no portal was found, allocate one for cleanup */
  6111. + if (!qm_cleanup_portal) {
  6112. + qm_alloced_portal = qm_get_unused_portal();
  6113. + if (!qm_alloced_portal) {
  6114. + pr_crit("No QMan portal avalaible for cleanup\n");
  6115. +#ifdef CONFIG_PREEMPT_RT_FULL
  6116. + migrate_enable();
  6117. +#endif
  6118. + return -1;
  6119. + }
  6120. + qm_cleanup_portal = kmalloc(sizeof(struct qm_portal),
  6121. + GFP_KERNEL);
  6122. + if (!qm_cleanup_portal) {
  6123. +#ifdef CONFIG_PREEMPT_RT_FULL
  6124. + migrate_enable();
  6125. +#endif
  6126. + return -ENOMEM;
  6127. + }
  6128. + init_qm_portal(qm_alloced_portal, qm_cleanup_portal);
  6129. + portal_array[portal_count] = qm_cleanup_portal;
  6130. + ++portal_count;
  6131. + }
  6132. + if (!bm_cleanup_portal) {
  6133. + bm_alloced_portal = bm_get_unused_portal();
  6134. + if (!bm_alloced_portal) {
  6135. + pr_crit("No BMan portal avalaible for cleanup\n");
  6136. +#ifdef CONFIG_PREEMPT_RT_FULL
  6137. + migrate_enable();
  6138. +#endif
  6139. + return -1;
  6140. + }
  6141. + bm_cleanup_portal = kmalloc(sizeof(struct bm_portal),
  6142. + GFP_KERNEL);
  6143. + if (!bm_cleanup_portal) {
  6144. +#ifdef CONFIG_PREEMPT_RT_FULL
  6145. + migrate_enable();
  6146. +#endif
  6147. + return -ENOMEM;
  6148. + }
  6149. + init_bm_portal(bm_alloced_portal, bm_cleanup_portal);
  6150. + }
  6151. +
  6152. + /* OOS the FQs associated with this process */
  6153. + qm_check_and_destroy_fqs(qm_cleanup_portal, ctx, check_channel_device);
  6154. +
  6155. + while (backend->id_type != usdpaa_id_max) {
  6156. + int leaks = 0;
  6157. + list_for_each_entry(res, &ctx->resources[backend->id_type],
  6158. + list) {
  6159. + if (backend->id_type == usdpaa_id_fqid) {
  6160. + int i = 0;
  6161. + for (; i < res->num; i++) {
  6162. + /* Clean FQs with the cleanup portal */
  6163. + qm_shutdown_fq(portal_array,
  6164. + portal_count,
  6165. + res->id + i);
  6166. + }
  6167. + }
  6168. + leaks += res->num;
  6169. + backend->release(res->id, res->num);
  6170. + }
  6171. + if (leaks)
  6172. + pr_crit("USDPAA process leaking %d %s%s\n", leaks,
  6173. + backend->acronym, (leaks > 1) ? "s" : "");
  6174. + backend++;
  6175. + }
  6176. + /* Release any DMA regions */
  6177. + spin_lock(&mem_lock);
  6178. + list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) {
  6179. + struct mem_fragment *current_frag = map->root_frag;
  6180. + int i;
  6181. + if (map->root_frag->has_locking &&
  6182. + (map->root_frag->owner == map)) {
  6183. + map->root_frag->owner = NULL;
  6184. + wake_up(&map->root_frag->wq);
  6185. + }
  6186. + /* Check each fragment and merge if the ref count is 0 */
  6187. + for (i = 0; i < map->frag_count; i++) {
  6188. + --current_frag->refs;
  6189. + current_frag = list_entry(current_frag->list.prev,
  6190. + struct mem_fragment, list);
  6191. + }
  6192. +
  6193. + compress_frags();
  6194. + list_del(&map->list);
  6195. + kfree(map);
  6196. + }
  6197. + spin_unlock(&mem_lock);
  6198. +
  6199. + /* Return portals */
  6200. + list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
  6201. + if (portal->user.type == usdpaa_portal_qman) {
  6202. + /* Give the portal back to the allocator */
  6203. + init_qm_portal(portal->qportal,
  6204. + &portal->qman_portal_low);
  6205. + qm_put_unused_portal(portal->qportal);
  6206. + } else {
  6207. + init_bm_portal(portal->bportal,
  6208. + &portal->bman_portal_low);
  6209. + bm_put_unused_portal(portal->bportal);
  6210. + }
  6211. + list_del(&portal->list);
  6212. + kfree(portal);
  6213. + }
  6214. + if (qm_alloced_portal) {
  6215. + qm_put_unused_portal(qm_alloced_portal);
  6216. + kfree(qm_cleanup_portal);
  6217. + }
  6218. + if (bm_alloced_portal) {
  6219. + bm_put_unused_portal(bm_alloced_portal);
  6220. + kfree(bm_cleanup_portal);
  6221. + }
  6222. +
  6223. + kfree(ctx);
  6224. +#ifdef CONFIG_PREEMPT_RT_FULL
  6225. + migrate_enable();
  6226. +#endif
  6227. + return 0;
  6228. +}
  6229. +
  6230. +static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
  6231. + int *match, unsigned long *pfn)
  6232. +{
  6233. + struct mem_mapping *map;
  6234. +
  6235. + list_for_each_entry(map, &ctx->maps, list) {
  6236. + int i;
  6237. + struct mem_fragment *frag = map->root_frag;
  6238. +
  6239. + for (i = 0; i < map->frag_count; i++) {
  6240. + if (frag->pfn_base == vma->vm_pgoff) {
  6241. + *match = 1;
  6242. + *pfn = frag->pfn_base;
  6243. + return 0;
  6244. + }
  6245. + frag = list_entry(frag->list.next, struct mem_fragment,
  6246. + list);
  6247. + }
  6248. + }
  6249. + *match = 0;
  6250. + return 0;
  6251. +}
  6252. +
  6253. +static int check_mmap_resource(struct resource *res, struct vm_area_struct *vma,
  6254. + int *match, unsigned long *pfn)
  6255. +{
  6256. + *pfn = res->start >> PAGE_SHIFT;
  6257. + if (*pfn == vma->vm_pgoff) {
  6258. + *match = 1;
  6259. + if ((vma->vm_end - vma->vm_start) != resource_size(res))
  6260. + return -EINVAL;
  6261. + } else
  6262. + *match = 0;
  6263. + return 0;
  6264. +}
  6265. +
  6266. +static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma,
  6267. + int *match, unsigned long *pfn)
  6268. +{
  6269. + struct portal_mapping *portal;
  6270. + int ret;
  6271. +
  6272. + list_for_each_entry(portal, &ctx->portals, list) {
  6273. + ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CE], vma,
  6274. + match, pfn);
  6275. + if (*match) {
  6276. + vma->vm_page_prot =
  6277. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  6278. + pgprot_cached_ns(vma->vm_page_prot);
  6279. +#else
  6280. + pgprot_cached_noncoherent(vma->vm_page_prot);
  6281. +#endif
  6282. + return ret;
  6283. + }
  6284. + ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CI], vma,
  6285. + match, pfn);
  6286. + if (*match) {
  6287. + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  6288. + return ret;
  6289. + }
  6290. + }
  6291. + *match = 0;
  6292. + return 0;
  6293. +}
  6294. +
  6295. +static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma)
  6296. +{
  6297. + struct ctx *ctx = filp->private_data;
  6298. + unsigned long pfn = 0;
  6299. + int match, ret;
  6300. +
  6301. + spin_lock(&mem_lock);
  6302. + ret = check_mmap_dma(ctx, vma, &match, &pfn);
  6303. + if (!match)
  6304. + ret = check_mmap_portal(ctx, vma, &match, &pfn);
  6305. + spin_unlock(&mem_lock);
  6306. + if (!match)
  6307. + return -EINVAL;
  6308. + if (!ret)
  6309. + ret = remap_pfn_range(vma, vma->vm_start, pfn,
  6310. + vma->vm_end - vma->vm_start,
  6311. + vma->vm_page_prot);
  6312. + return ret;
  6313. +}
  6314. +
  6315. +/* Return the nearest rounded-up address >= 'addr' that is 'sz'-aligned. 'sz'
  6316. + * must be a power of 2, but both 'addr' and 'sz' can be expressions. */
  6317. +#define USDPAA_MEM_ROUNDUP(addr, sz) \
  6318. + ({ \
  6319. + unsigned long foo_align = (sz) - 1; \
  6320. + ((addr) + foo_align) & ~foo_align; \
  6321. + })
  6322. +/* Searching for a size-aligned virtual address range starting from 'addr' */
  6323. +static unsigned long usdpaa_get_unmapped_area(struct file *file,
  6324. + unsigned long addr,
  6325. + unsigned long len,
  6326. + unsigned long pgoff,
  6327. + unsigned long flags)
  6328. +{
  6329. + struct vm_area_struct *vma;
  6330. +
  6331. + if (len % PAGE_SIZE)
  6332. + return -EINVAL;
  6333. + if (!len)
  6334. + return -EINVAL;
  6335. +
  6336. + /* Need to align the address to the largest pagesize of the mapping
  6337. + * because the MMU requires the virtual address to have the same
  6338. + * alignment as the physical address */
  6339. + addr = USDPAA_MEM_ROUNDUP(addr, largest_page_size(len));
  6340. + vma = find_vma(current->mm, addr);
  6341. + /* Keep searching until we reach the end of currently-used virtual
  6342. + * address-space or we find a big enough gap. */
  6343. + while (vma) {
  6344. + if ((addr + len) < vma->vm_start)
  6345. + return addr;
  6346. +
  6347. + addr = USDPAA_MEM_ROUNDUP(vma->vm_end, largest_page_size(len));
  6348. + vma = vma->vm_next;
  6349. + }
  6350. + if ((TASK_SIZE - len) < addr)
  6351. + return -ENOMEM;
  6352. + return addr;
  6353. +}
  6354. +
  6355. +static long ioctl_id_alloc(struct ctx *ctx, void __user *arg)
  6356. +{
  6357. + struct usdpaa_ioctl_id_alloc i;
  6358. + const struct alloc_backend *backend;
  6359. + struct active_resource *res;
  6360. + int ret = copy_from_user(&i, arg, sizeof(i));
  6361. + if (ret)
  6362. + return ret;
  6363. + if ((i.id_type >= usdpaa_id_max) || !i.num)
  6364. + return -EINVAL;
  6365. + backend = &alloc_backends[i.id_type];
  6366. + /* Allocate the required resource type */
  6367. + ret = backend->alloc(&i.base, i.num, i.align, i.partial);
  6368. + if (ret < 0)
  6369. + return ret;
  6370. + i.num = ret;
  6371. + /* Copy the result to user-space */
  6372. + ret = copy_to_user(arg, &i, sizeof(i));
  6373. + if (ret) {
  6374. + backend->release(i.base, i.num);
  6375. + return ret;
  6376. + }
  6377. + /* Assign the allocated range to the FD accounting */
  6378. + res = kmalloc(sizeof(*res), GFP_KERNEL);
  6379. + if (!res) {
  6380. + backend->release(i.base, i.num);
  6381. + return -ENOMEM;
  6382. + }
  6383. + spin_lock(&ctx->lock);
  6384. + res->id = i.base;
  6385. + res->num = i.num;
  6386. + res->refcount = 1;
  6387. + list_add(&res->list, &ctx->resources[i.id_type]);
  6388. + spin_unlock(&ctx->lock);
  6389. + return 0;
  6390. +}
  6391. +
  6392. +static long ioctl_id_release(struct ctx *ctx, void __user *arg)
  6393. +{
  6394. + struct usdpaa_ioctl_id_release i;
  6395. + const struct alloc_backend *backend;
  6396. + struct active_resource *tmp, *pos;
  6397. +
  6398. + int ret = copy_from_user(&i, arg, sizeof(i));
  6399. + if (ret)
  6400. + return ret;
  6401. + if ((i.id_type >= usdpaa_id_max) || !i.num)
  6402. + return -EINVAL;
  6403. + backend = &alloc_backends[i.id_type];
  6404. + /* Pull the range out of the FD accounting - the range is valid iff this
  6405. + * succeeds. */
  6406. + spin_lock(&ctx->lock);
  6407. + list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
  6408. + if (pos->id == i.base && pos->num == i.num) {
  6409. + pos->refcount--;
  6410. + if (pos->refcount) {
  6411. + spin_unlock(&ctx->lock);
  6412. + return 0; /* Still being used */
  6413. + }
  6414. + list_del(&pos->list);
  6415. + kfree(pos);
  6416. + spin_unlock(&ctx->lock);
  6417. + goto found;
  6418. + }
  6419. + }
  6420. + /* Failed to find the resource */
  6421. + spin_unlock(&ctx->lock);
  6422. + pr_err("Couldn't find resource type %d base 0x%x num %d\n",
  6423. + i.id_type, i.base, i.num);
  6424. + return -EINVAL;
  6425. +found:
  6426. + /* Release the resource to the backend */
  6427. + backend->release(i.base, i.num);
  6428. + return 0;
  6429. +}
  6430. +
  6431. +static long ioctl_id_reserve(struct ctx *ctx, void __user *arg)
  6432. +{
  6433. + struct usdpaa_ioctl_id_reserve i;
  6434. + const struct alloc_backend *backend;
  6435. + struct active_resource *tmp, *pos;
  6436. +
  6437. + int ret = copy_from_user(&i, arg, sizeof(i));
  6438. + if (ret)
  6439. + return ret;
  6440. + if ((i.id_type >= usdpaa_id_max) || !i.num)
  6441. + return -EINVAL;
  6442. + backend = &alloc_backends[i.id_type];
  6443. + if (!backend->reserve)
  6444. + return -EINVAL;
  6445. + /* Pull the range out of the FD accounting - the range is valid iff this
  6446. + * succeeds. */
  6447. + spin_lock(&ctx->lock);
  6448. + list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
  6449. + if (pos->id == i.base && pos->num == i.num) {
  6450. + pos->refcount++;
  6451. + spin_unlock(&ctx->lock);
  6452. + return 0;
  6453. + }
  6454. + }
  6455. +
  6456. + /* Failed to find the resource */
  6457. + spin_unlock(&ctx->lock);
  6458. +
  6459. + /* Reserve the resource in the backend */
  6460. + ret = backend->reserve(i.base, i.num);
  6461. + if (ret)
  6462. + return ret;
  6463. + /* Assign the reserved range to the FD accounting */
  6464. + pos = kmalloc(sizeof(*pos), GFP_KERNEL);
  6465. + if (!pos) {
  6466. + backend->release(i.base, i.num);
  6467. + return -ENOMEM;
  6468. + }
  6469. + spin_lock(&ctx->lock);
  6470. + pos->id = i.base;
  6471. + pos->num = i.num;
  6472. + pos->refcount = 1;
  6473. + list_add(&pos->list, &ctx->resources[i.id_type]);
  6474. + spin_unlock(&ctx->lock);
  6475. + return 0;
  6476. +}
  6477. +
  6478. +static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
  6479. + struct usdpaa_ioctl_dma_map *i)
  6480. +{
  6481. + struct mem_fragment *frag, *start_frag, *next_frag;
  6482. + struct mem_mapping *map, *tmp;
  6483. + int ret = 0;
  6484. + u32 largest_page, so_far = 0;
  6485. + int frag_count = 0;
  6486. + unsigned long next_addr = PAGE_SIZE, populate;
  6487. +
  6488. + /* error checking to ensure values copied from user space are valid */
  6489. + if (i->len % PAGE_SIZE)
  6490. + return -EINVAL;
  6491. +
  6492. + map = kmalloc(sizeof(*map), GFP_KERNEL);
  6493. + if (!map)
  6494. + return -ENOMEM;
  6495. +
  6496. + spin_lock(&mem_lock);
  6497. + if (i->flags & USDPAA_DMA_FLAG_SHARE) {
  6498. + list_for_each_entry(frag, &mem_list, list) {
  6499. + if (frag->refs && (frag->flags &
  6500. + USDPAA_DMA_FLAG_SHARE) &&
  6501. + !strncmp(i->name, frag->name,
  6502. + USDPAA_DMA_NAME_MAX)) {
  6503. + /* Matching entry */
  6504. + if ((i->flags & USDPAA_DMA_FLAG_CREATE) &&
  6505. + !(i->flags & USDPAA_DMA_FLAG_LAZY)) {
  6506. + ret = -EBUSY;
  6507. + goto out;
  6508. + }
  6509. +
  6510. + /* Check to ensure size matches record */
  6511. + if (i->len != frag->map_len && i->len) {
  6512. + pr_err("ioctl_dma_map() Size requested does not match %s and is none zero. This usage will be disallowed in future release\n",
  6513. + frag->name);
  6514. + }
  6515. +
  6516. + /* Check if this has already been mapped
  6517. + to this process */
  6518. + list_for_each_entry(tmp, &ctx->maps, list)
  6519. + if (tmp->root_frag == frag) {
  6520. + /* Already mapped, just need to
  6521. + inc ref count */
  6522. + tmp->refs++;
  6523. + kfree(map);
  6524. + i->did_create = 0;
  6525. + i->len = tmp->total_size;
  6526. + i->phys_addr = frag->base;
  6527. + i->ptr = tmp->virt_addr;
  6528. + spin_unlock(&mem_lock);
  6529. + return 0;
  6530. + }
  6531. + /* Matching entry - just need to map */
  6532. + i->has_locking = frag->has_locking;
  6533. + i->did_create = 0;
  6534. + i->len = frag->map_len;
  6535. + start_frag = frag;
  6536. + goto do_map;
  6537. + }
  6538. + }
  6539. + /* No matching entry */
  6540. + if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) {
  6541. + pr_err("ioctl_dma_map() No matching entry\n");
  6542. + ret = -ENOMEM;
  6543. + goto out;
  6544. + }
  6545. + }
  6546. + /* New fragment required, size must be provided. */
  6547. + if (!i->len) {
  6548. + ret = -EINVAL;
  6549. + goto out;
  6550. + }
  6551. +
  6552. + /* Find one of more contiguous fragments that satisfy the total length
  6553. + trying to minimize the number of fragments
  6554. + compute the largest page size that the allocation could use */
  6555. + largest_page = largest_page_size(i->len);
  6556. + start_frag = NULL;
  6557. + while (largest_page &&
  6558. + largest_page <= largest_page_size(phys_size) &&
  6559. + start_frag == NULL) {
  6560. + /* Search the list for a frag of that size */
  6561. + list_for_each_entry(frag, &mem_list, list) {
  6562. + if (!frag->refs && (frag->len == largest_page)) {
  6563. + /* See if the next x fragments are free
  6564. + and can accomidate the size */
  6565. + u32 found_size = largest_page;
  6566. + next_frag = list_entry(frag->list.prev,
  6567. + struct mem_fragment,
  6568. + list);
  6569. + /* If the fragement is too small check
  6570. + if the neighbours cab support it */
  6571. + while (found_size < i->len) {
  6572. + if (&mem_list == &next_frag->list)
  6573. + break; /* End of list */
  6574. + if (next_frag->refs != 0 ||
  6575. + next_frag->len == 0)
  6576. + break; /* not enough space */
  6577. + found_size += next_frag->len;
  6578. + next_frag = list_entry(
  6579. + next_frag->list.prev,
  6580. + struct mem_fragment,
  6581. + list);
  6582. + }
  6583. + if (found_size >= i->len) {
  6584. + /* Success! there is enough contigous
  6585. + free space */
  6586. + start_frag = frag;
  6587. + break;
  6588. + }
  6589. + }
  6590. + } /* next frag loop */
  6591. + /* Couldn't statisfy the request with this
  6592. + largest page size, try a smaller one */
  6593. + largest_page <<= 2;
  6594. + }
  6595. + if (start_frag == NULL) {
  6596. + /* Couldn't find proper amount of space */
  6597. + ret = -ENOMEM;
  6598. + goto out;
  6599. + }
  6600. + i->did_create = 1;
  6601. +do_map:
  6602. + /* Verify there is sufficient space to do the mapping */
  6603. + down_write(&current->mm->mmap_sem);
  6604. + next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0);
  6605. + up_write(&current->mm->mmap_sem);
  6606. +
  6607. + if (next_addr & ~PAGE_MASK) {
  6608. + ret = -ENOMEM;
  6609. + goto out;
  6610. + }
  6611. +
  6612. + /* We may need to divide the final fragment to accomidate the mapping */
  6613. + next_frag = start_frag;
  6614. + while (so_far != i->len) {
  6615. + BUG_ON(next_frag->len == 0);
  6616. + while ((next_frag->len + so_far) > i->len) {
  6617. + /* Split frag until they match */
  6618. + split_frag(next_frag);
  6619. + }
  6620. + so_far += next_frag->len;
  6621. + next_frag->refs++;
  6622. + ++frag_count;
  6623. + next_frag = list_entry(next_frag->list.prev,
  6624. + struct mem_fragment, list);
  6625. + }
  6626. + if (i->did_create) {
  6627. + size_t name_len = 0;
  6628. + start_frag->flags = i->flags;
  6629. + strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX);
  6630. + name_len = strnlen(start_frag->name, USDPAA_DMA_NAME_MAX);
  6631. + if (name_len >= USDPAA_DMA_NAME_MAX) {
  6632. + ret = -EFAULT;
  6633. + goto out;
  6634. + }
  6635. + start_frag->map_len = i->len;
  6636. + start_frag->has_locking = i->has_locking;
  6637. + init_waitqueue_head(&start_frag->wq);
  6638. + start_frag->owner = NULL;
  6639. + }
  6640. +
  6641. + /* Setup the map entry */
  6642. + map->root_frag = start_frag;
  6643. + map->total_size = i->len;
  6644. + map->frag_count = frag_count;
  6645. + map->refs = 1;
  6646. + list_add(&map->list, &ctx->maps);
  6647. + i->phys_addr = start_frag->base;
  6648. +out:
  6649. + spin_unlock(&mem_lock);
  6650. +
  6651. + if (!ret) {
  6652. + unsigned long longret;
  6653. + down_write(&current->mm->mmap_sem);
  6654. + longret = do_mmap_pgoff(fp, next_addr, map->total_size,
  6655. + PROT_READ |
  6656. + (i->flags &
  6657. + USDPAA_DMA_FLAG_RDONLY ? 0
  6658. + : PROT_WRITE),
  6659. + MAP_SHARED,
  6660. + start_frag->pfn_base,
  6661. + &populate);
  6662. + up_write(&current->mm->mmap_sem);
  6663. + if (longret & ~PAGE_MASK) {
  6664. + ret = (int)longret;
  6665. + } else {
  6666. + i->ptr = (void *)longret;
  6667. + map->virt_addr = i->ptr;
  6668. + }
  6669. + } else
  6670. + kfree(map);
  6671. + return ret;
  6672. +}
  6673. +
  6674. +static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg)
  6675. +{
  6676. + struct mem_mapping *map;
  6677. + struct vm_area_struct *vma;
  6678. + int ret, i;
  6679. + struct mem_fragment *current_frag;
  6680. + size_t sz;
  6681. + unsigned long base;
  6682. + unsigned long vaddr;
  6683. +
  6684. + down_write(&current->mm->mmap_sem);
  6685. + vma = find_vma(current->mm, (unsigned long)arg);
  6686. + if (!vma || (vma->vm_start > (unsigned long)arg)) {
  6687. + up_write(&current->mm->mmap_sem);
  6688. + return -EFAULT;
  6689. + }
  6690. + spin_lock(&mem_lock);
  6691. + list_for_each_entry(map, &ctx->maps, list) {
  6692. + if (map->root_frag->pfn_base == vma->vm_pgoff) {
  6693. + /* Drop the map lock if we hold it */
  6694. + if (map->root_frag->has_locking &&
  6695. + (map->root_frag->owner == map)) {
  6696. + map->root_frag->owner = NULL;
  6697. + wake_up(&map->root_frag->wq);
  6698. + }
  6699. + goto map_match;
  6700. + }
  6701. + }
  6702. + /* Failed to find a matching mapping for this process */
  6703. + ret = -EFAULT;
  6704. + spin_unlock(&mem_lock);
  6705. + goto out;
  6706. +map_match:
  6707. + map->refs--;
  6708. + if (map->refs != 0) {
  6709. + /* Another call the dma_map is referencing this */
  6710. + ret = 0;
  6711. + spin_unlock(&mem_lock);
  6712. + goto out;
  6713. + }
  6714. +
  6715. + current_frag = map->root_frag;
  6716. + vaddr = (unsigned long) map->virt_addr;
  6717. + for (i = 0; i < map->frag_count; i++) {
  6718. + DPA_ASSERT(current_frag->refs > 0);
  6719. + --current_frag->refs;
  6720. +#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
  6721. + /*
  6722. + * Make sure we invalidate the TLB entry for
  6723. + * this fragment, otherwise a remap of a different
  6724. + * page to this vaddr would give acces to an
  6725. + * incorrect piece of memory
  6726. + */
  6727. + cleartlbcam(vaddr, mfspr(SPRN_PID));
  6728. +#endif
  6729. + vaddr += current_frag->len;
  6730. + current_frag = list_entry(current_frag->list.prev,
  6731. + struct mem_fragment, list);
  6732. + }
  6733. + map->root_frag->name[0] = 0;
  6734. + list_del(&map->list);
  6735. + compress_frags();
  6736. + spin_unlock(&mem_lock);
  6737. +
  6738. + base = vma->vm_start;
  6739. + sz = vma->vm_end - vma->vm_start;
  6740. + do_munmap(current->mm, base, sz);
  6741. + ret = 0;
  6742. + out:
  6743. + up_write(&current->mm->mmap_sem);
  6744. + return ret;
  6745. +}
  6746. +
  6747. +static long ioctl_dma_stats(struct ctx *ctx, void __user *arg)
  6748. +{
  6749. + struct mem_fragment *frag;
  6750. + struct usdpaa_ioctl_dma_used result;
  6751. +
  6752. + result.free_bytes = 0;
  6753. + result.total_bytes = phys_size;
  6754. +
  6755. + list_for_each_entry(frag, &mem_list, list) {
  6756. + if (frag->refs == 0)
  6757. + result.free_bytes += frag->len;
  6758. + }
  6759. +
  6760. + return copy_to_user(arg, &result, sizeof(result)); }
  6761. +
  6762. +static int test_lock(struct mem_mapping *map)
  6763. +{
  6764. + int ret = 0;
  6765. + spin_lock(&mem_lock);
  6766. + if (!map->root_frag->owner) {
  6767. + map->root_frag->owner = map;
  6768. + ret = 1;
  6769. + }
  6770. + spin_unlock(&mem_lock);
  6771. + return ret;
  6772. +}
  6773. +
  6774. +static long ioctl_dma_lock(struct ctx *ctx, void __user *arg)
  6775. +{
  6776. + struct mem_mapping *map;
  6777. + struct vm_area_struct *vma;
  6778. +
  6779. + down_read(&current->mm->mmap_sem);
  6780. + vma = find_vma(current->mm, (unsigned long)arg);
  6781. + if (!vma || (vma->vm_start > (unsigned long)arg)) {
  6782. + up_read(&current->mm->mmap_sem);
  6783. + return -EFAULT;
  6784. + }
  6785. + spin_lock(&mem_lock);
  6786. + list_for_each_entry(map, &ctx->maps, list) {
  6787. + if (map->root_frag->pfn_base == vma->vm_pgoff)
  6788. + goto map_match;
  6789. + }
  6790. + map = NULL;
  6791. +map_match:
  6792. + spin_unlock(&mem_lock);
  6793. + up_read(&current->mm->mmap_sem);
  6794. +
  6795. + if (!map)
  6796. + return -EFAULT;
  6797. + if (!map->root_frag->has_locking)
  6798. + return -ENODEV;
  6799. + return wait_event_interruptible(map->root_frag->wq, test_lock(map));
  6800. +}
  6801. +
  6802. +static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg)
  6803. +{
  6804. + struct mem_mapping *map;
  6805. + struct vm_area_struct *vma;
  6806. + int ret;
  6807. +
  6808. + down_read(&current->mm->mmap_sem);
  6809. + vma = find_vma(current->mm, (unsigned long)arg);
  6810. + if (!vma || (vma->vm_start > (unsigned long)arg))
  6811. + ret = -EFAULT;
  6812. + else {
  6813. + spin_lock(&mem_lock);
  6814. + list_for_each_entry(map, &ctx->maps, list) {
  6815. + if (map->root_frag->pfn_base == vma->vm_pgoff) {
  6816. + if (!map->root_frag->has_locking)
  6817. + ret = -ENODEV;
  6818. + else if (map->root_frag->owner == map) {
  6819. + map->root_frag->owner = NULL;
  6820. + wake_up(&map->root_frag->wq);
  6821. + ret = 0;
  6822. + } else
  6823. + ret = -EBUSY;
  6824. + goto map_match;
  6825. + }
  6826. + }
  6827. + ret = -EINVAL;
  6828. +map_match:
  6829. + spin_unlock(&mem_lock);
  6830. + }
  6831. + up_read(&current->mm->mmap_sem);
  6832. + return ret;
  6833. +}
  6834. +
  6835. +static int portal_mmap(struct file *fp, struct resource *res, void **ptr)
  6836. +{
  6837. + unsigned long longret = 0, populate;
  6838. + resource_size_t len;
  6839. +
  6840. + down_write(&current->mm->mmap_sem);
  6841. + len = resource_size(res);
  6842. + if (len != (unsigned long)len)
  6843. + return -EINVAL;
  6844. + longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len,
  6845. + PROT_READ | PROT_WRITE, MAP_SHARED,
  6846. + res->start >> PAGE_SHIFT, &populate);
  6847. + up_write(&current->mm->mmap_sem);
  6848. +
  6849. + if (longret & ~PAGE_MASK)
  6850. + return (int)longret;
  6851. +
  6852. + *ptr = (void *) longret;
  6853. + return 0;
  6854. +}
  6855. +
  6856. +static void portal_munmap(struct resource *res, void *ptr)
  6857. +{
  6858. + down_write(&current->mm->mmap_sem);
  6859. + do_munmap(current->mm, (unsigned long)ptr, resource_size(res));
  6860. + up_write(&current->mm->mmap_sem);
  6861. +}
  6862. +
  6863. +static long ioctl_portal_map(struct file *fp, struct ctx *ctx,
  6864. + struct usdpaa_ioctl_portal_map *arg)
  6865. +{
  6866. + struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
  6867. + int ret;
  6868. +
  6869. + if (!mapping)
  6870. + return -ENOMEM;
  6871. +
  6872. + mapping->user = *arg;
  6873. + mapping->iommu_domain = NULL;
  6874. +
  6875. + if (mapping->user.type == usdpaa_portal_qman) {
  6876. + mapping->qportal =
  6877. + qm_get_unused_portal_idx(mapping->user.index);
  6878. + if (!mapping->qportal) {
  6879. + ret = -ENODEV;
  6880. + goto err_get_portal;
  6881. + }
  6882. + mapping->phys = &mapping->qportal->addr_phys[0];
  6883. + mapping->user.channel = mapping->qportal->public_cfg.channel;
  6884. + mapping->user.pools = mapping->qportal->public_cfg.pools;
  6885. + mapping->user.index = mapping->qportal->public_cfg.index;
  6886. + } else if (mapping->user.type == usdpaa_portal_bman) {
  6887. + mapping->bportal =
  6888. + bm_get_unused_portal_idx(mapping->user.index);
  6889. + if (!mapping->bportal) {
  6890. + ret = -ENODEV;
  6891. + goto err_get_portal;
  6892. + }
  6893. + mapping->phys = &mapping->bportal->addr_phys[0];
  6894. + mapping->user.index = mapping->bportal->public_cfg.index;
  6895. + } else {
  6896. + ret = -EINVAL;
  6897. + goto err_copy_from_user;
  6898. + }
  6899. + /* Need to put pcfg in ctx's list before the mmaps because the mmap
  6900. + * handlers look it up. */
  6901. + spin_lock(&mem_lock);
  6902. + list_add(&mapping->list, &ctx->portals);
  6903. + spin_unlock(&mem_lock);
  6904. + ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CE],
  6905. + &mapping->user.addr.cena);
  6906. + if (ret)
  6907. + goto err_mmap_cena;
  6908. + ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CI],
  6909. + &mapping->user.addr.cinh);
  6910. + if (ret)
  6911. + goto err_mmap_cinh;
  6912. + *arg = mapping->user;
  6913. + return ret;
  6914. +
  6915. +err_mmap_cinh:
  6916. + portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
  6917. +err_mmap_cena:
  6918. + if ((mapping->user.type == usdpaa_portal_qman) && mapping->qportal)
  6919. + qm_put_unused_portal(mapping->qportal);
  6920. + else if ((mapping->user.type == usdpaa_portal_bman) && mapping->bportal)
  6921. + bm_put_unused_portal(mapping->bportal);
  6922. + spin_lock(&mem_lock);
  6923. + list_del(&mapping->list);
  6924. + spin_unlock(&mem_lock);
  6925. +err_get_portal:
  6926. +err_copy_from_user:
  6927. + kfree(mapping);
  6928. + return ret;
  6929. +}
  6930. +
  6931. +static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i)
  6932. +{
  6933. + struct portal_mapping *mapping;
  6934. + struct vm_area_struct *vma;
  6935. + unsigned long pfn;
  6936. + u32 channel;
  6937. +
  6938. + /* Get the PFN corresponding to one of the virt addresses */
  6939. + down_read(&current->mm->mmap_sem);
  6940. + vma = find_vma(current->mm, (unsigned long)i->cinh);
  6941. + if (!vma || (vma->vm_start > (unsigned long)i->cinh)) {
  6942. + up_read(&current->mm->mmap_sem);
  6943. + return -EFAULT;
  6944. + }
  6945. + pfn = vma->vm_pgoff;
  6946. + up_read(&current->mm->mmap_sem);
  6947. +
  6948. + /* Find the corresponding portal */
  6949. + spin_lock(&mem_lock);
  6950. + list_for_each_entry(mapping, &ctx->portals, list) {
  6951. + if (pfn == (mapping->phys[DPA_PORTAL_CI].start >> PAGE_SHIFT))
  6952. + goto found;
  6953. + }
  6954. + mapping = NULL;
  6955. +found:
  6956. + if (mapping)
  6957. + list_del(&mapping->list);
  6958. + spin_unlock(&mem_lock);
  6959. + if (!mapping)
  6960. + return -ENODEV;
  6961. + portal_munmap(&mapping->phys[DPA_PORTAL_CI], mapping->user.addr.cinh);
  6962. + portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
  6963. + if (mapping->user.type == usdpaa_portal_qman) {
  6964. + init_qm_portal(mapping->qportal,
  6965. + &mapping->qman_portal_low);
  6966. +
  6967. + /* Tear down any FQs this portal is referencing */
  6968. + channel = mapping->qportal->public_cfg.channel;
  6969. + qm_check_and_destroy_fqs(&mapping->qman_portal_low,
  6970. + &channel,
  6971. + check_portal_channel);
  6972. + qm_put_unused_portal(mapping->qportal);
  6973. + } else if (mapping->user.type == usdpaa_portal_bman) {
  6974. + init_bm_portal(mapping->bportal,
  6975. + &mapping->bman_portal_low);
  6976. + bm_put_unused_portal(mapping->bportal);
  6977. + }
  6978. + kfree(mapping);
  6979. + return 0;
  6980. +}
  6981. +
  6982. +static void portal_config_pamu(struct qm_portal_config *pcfg, uint8_t sdest,
  6983. + uint32_t cpu, uint32_t cache, uint32_t window)
  6984. +{
  6985. +#ifdef CONFIG_FSL_PAMU
  6986. + int ret;
  6987. + int window_count = 1;
  6988. + struct iommu_domain_geometry geom_attr;
  6989. + struct pamu_stash_attribute stash_attr;
  6990. +
  6991. + pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
  6992. + if (!pcfg->iommu_domain) {
  6993. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
  6994. + __func__);
  6995. + goto _no_iommu;
  6996. + }
  6997. + geom_attr.aperture_start = 0;
  6998. + geom_attr.aperture_end =
  6999. + ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
  7000. + geom_attr.force_aperture = true;
  7001. + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
  7002. + &geom_attr);
  7003. + if (ret < 0) {
  7004. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  7005. + __func__, ret);
  7006. + goto _iommu_domain_free;
  7007. + }
  7008. + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
  7009. + &window_count);
  7010. + if (ret < 0) {
  7011. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  7012. + __func__, ret);
  7013. + goto _iommu_domain_free;
  7014. + }
  7015. + stash_attr.cpu = cpu;
  7016. + stash_attr.cache = cache;
  7017. + /* set stash information for the window */
  7018. + stash_attr.window = 0;
  7019. +
  7020. + ret = iommu_domain_set_attr(pcfg->iommu_domain,
  7021. + DOMAIN_ATTR_FSL_PAMU_STASH,
  7022. + &stash_attr);
  7023. + if (ret < 0) {
  7024. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  7025. + __func__, ret);
  7026. + goto _iommu_domain_free;
  7027. + }
  7028. + ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
  7029. + IOMMU_READ | IOMMU_WRITE);
  7030. + if (ret < 0) {
  7031. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
  7032. + __func__, ret);
  7033. + goto _iommu_domain_free;
  7034. + }
  7035. + ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
  7036. + if (ret < 0) {
  7037. + pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
  7038. + __func__, ret);
  7039. + goto _iommu_domain_free;
  7040. + }
  7041. + ret = iommu_domain_set_attr(pcfg->iommu_domain,
  7042. + DOMAIN_ATTR_FSL_PAMU_ENABLE,
  7043. + &window_count);
  7044. + if (ret < 0) {
  7045. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  7046. + __func__, ret);
  7047. + goto _iommu_detach_device;
  7048. + }
  7049. +_no_iommu:
  7050. +#endif
  7051. +
  7052. +#ifdef CONFIG_FSL_QMAN_CONFIG
  7053. + if (qman_set_sdest(pcfg->public_cfg.channel, sdest))
  7054. +#endif
  7055. + pr_warn("Failed to set QMan portal's stash request queue\n");
  7056. +
  7057. + return;
  7058. +
  7059. +#ifdef CONFIG_FSL_PAMU
  7060. +_iommu_detach_device:
  7061. + iommu_detach_device(pcfg->iommu_domain, NULL);
  7062. +_iommu_domain_free:
  7063. + iommu_domain_free(pcfg->iommu_domain);
  7064. +#endif
  7065. +}
  7066. +
  7067. +static long ioctl_allocate_raw_portal(struct file *fp, struct ctx *ctx,
  7068. + struct usdpaa_ioctl_raw_portal *arg)
  7069. +{
  7070. + struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
  7071. + int ret;
  7072. +
  7073. + if (!mapping)
  7074. + return -ENOMEM;
  7075. +
  7076. + mapping->user.type = arg->type;
  7077. + mapping->iommu_domain = NULL;
  7078. + if (arg->type == usdpaa_portal_qman) {
  7079. + mapping->qportal = qm_get_unused_portal_idx(arg->index);
  7080. + if (!mapping->qportal) {
  7081. + ret = -ENODEV;
  7082. + goto err;
  7083. + }
  7084. + mapping->phys = &mapping->qportal->addr_phys[0];
  7085. + arg->index = mapping->qportal->public_cfg.index;
  7086. + arg->cinh = mapping->qportal->addr_phys[DPA_PORTAL_CI].start;
  7087. + arg->cena = mapping->qportal->addr_phys[DPA_PORTAL_CE].start;
  7088. + if (arg->enable_stash) {
  7089. + /* Setup the PAMU with the supplied parameters */
  7090. + portal_config_pamu(mapping->qportal, arg->sdest,
  7091. + arg->cpu, arg->cache, arg->window);
  7092. + }
  7093. + } else if (mapping->user.type == usdpaa_portal_bman) {
  7094. + mapping->bportal =
  7095. + bm_get_unused_portal_idx(arg->index);
  7096. + if (!mapping->bportal) {
  7097. + ret = -ENODEV;
  7098. + goto err;
  7099. + }
  7100. + mapping->phys = &mapping->bportal->addr_phys[0];
  7101. + arg->index = mapping->bportal->public_cfg.index;
  7102. + arg->cinh = mapping->bportal->addr_phys[DPA_PORTAL_CI].start;
  7103. + arg->cena = mapping->bportal->addr_phys[DPA_PORTAL_CE].start;
  7104. + } else {
  7105. + ret = -EINVAL;
  7106. + goto err;
  7107. + }
  7108. + /* Need to put pcfg in ctx's list before the mmaps because the mmap
  7109. + * handlers look it up. */
  7110. + spin_lock(&mem_lock);
  7111. + list_add(&mapping->list, &ctx->portals);
  7112. + spin_unlock(&mem_lock);
  7113. + return 0;
  7114. +err:
  7115. + kfree(mapping);
  7116. + return ret;
  7117. +}
  7118. +
  7119. +static long ioctl_free_raw_portal(struct file *fp, struct ctx *ctx,
  7120. + struct usdpaa_ioctl_raw_portal *arg)
  7121. +{
  7122. + struct portal_mapping *mapping;
  7123. + u32 channel;
  7124. +
  7125. + /* Find the corresponding portal */
  7126. + spin_lock(&mem_lock);
  7127. + list_for_each_entry(mapping, &ctx->portals, list) {
  7128. + if (mapping->phys[DPA_PORTAL_CI].start == arg->cinh)
  7129. + goto found;
  7130. + }
  7131. + mapping = NULL;
  7132. +found:
  7133. + if (mapping)
  7134. + list_del(&mapping->list);
  7135. + spin_unlock(&mem_lock);
  7136. + if (!mapping)
  7137. + return -ENODEV;
  7138. + if (mapping->user.type == usdpaa_portal_qman) {
  7139. + init_qm_portal(mapping->qportal,
  7140. + &mapping->qman_portal_low);
  7141. +
  7142. + /* Tear down any FQs this portal is referencing */
  7143. + channel = mapping->qportal->public_cfg.channel;
  7144. + qm_check_and_destroy_fqs(&mapping->qman_portal_low,
  7145. + &channel,
  7146. + check_portal_channel);
  7147. + qm_put_unused_portal(mapping->qportal);
  7148. + } else if (mapping->user.type == usdpaa_portal_bman) {
  7149. + init_bm_portal(mapping->bportal,
  7150. + &mapping->bman_portal_low);
  7151. + bm_put_unused_portal(mapping->bportal);
  7152. + }
  7153. + kfree(mapping);
  7154. + return 0;
  7155. +}
  7156. +
  7157. +static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
  7158. +{
  7159. + struct ctx *ctx = fp->private_data;
  7160. + void __user *a = (void __user *)arg;
  7161. + switch (cmd) {
  7162. + case USDPAA_IOCTL_ID_ALLOC:
  7163. + return ioctl_id_alloc(ctx, a);
  7164. + case USDPAA_IOCTL_ID_RELEASE:
  7165. + return ioctl_id_release(ctx, a);
  7166. + case USDPAA_IOCTL_ID_RESERVE:
  7167. + return ioctl_id_reserve(ctx, a);
  7168. + case USDPAA_IOCTL_DMA_MAP:
  7169. + {
  7170. + struct usdpaa_ioctl_dma_map input;
  7171. + int ret;
  7172. + if (copy_from_user(&input, a, sizeof(input)))
  7173. + return -EFAULT;
  7174. + ret = ioctl_dma_map(fp, ctx, &input);
  7175. + if (copy_to_user(a, &input, sizeof(input)))
  7176. + return -EFAULT;
  7177. + return ret;
  7178. + }
  7179. + case USDPAA_IOCTL_DMA_UNMAP:
  7180. + return ioctl_dma_unmap(ctx, a);
  7181. + case USDPAA_IOCTL_DMA_LOCK:
  7182. + return ioctl_dma_lock(ctx, a);
  7183. + case USDPAA_IOCTL_DMA_UNLOCK:
  7184. + return ioctl_dma_unlock(ctx, a);
  7185. + case USDPAA_IOCTL_PORTAL_MAP:
  7186. + {
  7187. + struct usdpaa_ioctl_portal_map input;
  7188. + int ret;
  7189. + if (copy_from_user(&input, a, sizeof(input)))
  7190. + return -EFAULT;
  7191. + ret = ioctl_portal_map(fp, ctx, &input);
  7192. + if (copy_to_user(a, &input, sizeof(input)))
  7193. + return -EFAULT;
  7194. + return ret;
  7195. + }
  7196. + case USDPAA_IOCTL_PORTAL_UNMAP:
  7197. + {
  7198. + struct usdpaa_portal_map input;
  7199. + if (copy_from_user(&input, a, sizeof(input)))
  7200. + return -EFAULT;
  7201. + return ioctl_portal_unmap(ctx, &input);
  7202. + }
  7203. + case USDPAA_IOCTL_DMA_USED:
  7204. + return ioctl_dma_stats(ctx, a);
  7205. + case USDPAA_IOCTL_ALLOC_RAW_PORTAL:
  7206. + {
  7207. + struct usdpaa_ioctl_raw_portal input;
  7208. + int ret;
  7209. + if (copy_from_user(&input, a, sizeof(input)))
  7210. + return -EFAULT;
  7211. + ret = ioctl_allocate_raw_portal(fp, ctx, &input);
  7212. + if (copy_to_user(a, &input, sizeof(input)))
  7213. + return -EFAULT;
  7214. + return ret;
  7215. + }
  7216. + case USDPAA_IOCTL_FREE_RAW_PORTAL:
  7217. + {
  7218. + struct usdpaa_ioctl_raw_portal input;
  7219. + if (copy_from_user(&input, a, sizeof(input)))
  7220. + return -EFAULT;
  7221. + return ioctl_free_raw_portal(fp, ctx, &input);
  7222. + }
  7223. + }
  7224. + return -EINVAL;
  7225. +}
  7226. +
  7227. +static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
  7228. + unsigned long arg)
  7229. +{
  7230. +#ifdef CONFIG_COMPAT
  7231. + struct ctx *ctx = fp->private_data;
  7232. + void __user *a = (void __user *)arg;
  7233. +#endif
  7234. + switch (cmd) {
  7235. +#ifdef CONFIG_COMPAT
  7236. + case USDPAA_IOCTL_DMA_MAP_COMPAT:
  7237. + {
  7238. + int ret;
  7239. + struct usdpaa_ioctl_dma_map_compat input;
  7240. + struct usdpaa_ioctl_dma_map converted;
  7241. +
  7242. + if (copy_from_user(&input, a, sizeof(input)))
  7243. + return -EFAULT;
  7244. +
  7245. + converted.ptr = compat_ptr(input.ptr);
  7246. + converted.phys_addr = input.phys_addr;
  7247. + converted.len = input.len;
  7248. + converted.flags = input.flags;
  7249. + strncpy(converted.name, input.name, USDPAA_DMA_NAME_MAX);
  7250. + converted.has_locking = input.has_locking;
  7251. + converted.did_create = input.did_create;
  7252. +
  7253. + ret = ioctl_dma_map(fp, ctx, &converted);
  7254. + input.ptr = ptr_to_compat(converted.ptr);
  7255. + input.phys_addr = converted.phys_addr;
  7256. + input.len = converted.len;
  7257. + input.flags = converted.flags;
  7258. + strncpy(input.name, converted.name, USDPAA_DMA_NAME_MAX);
  7259. + input.has_locking = converted.has_locking;
  7260. + input.did_create = converted.did_create;
  7261. + if (copy_to_user(a, &input, sizeof(input)))
  7262. + return -EFAULT;
  7263. + return ret;
  7264. + }
  7265. + case USDPAA_IOCTL_PORTAL_MAP_COMPAT:
  7266. + {
  7267. + int ret;
  7268. + struct compat_usdpaa_ioctl_portal_map input;
  7269. + struct usdpaa_ioctl_portal_map converted;
  7270. + if (copy_from_user(&input, a, sizeof(input)))
  7271. + return -EFAULT;
  7272. + converted.type = input.type;
  7273. + converted.index = input.index;
  7274. + ret = ioctl_portal_map(fp, ctx, &converted);
  7275. + input.addr.cinh = ptr_to_compat(converted.addr.cinh);
  7276. + input.addr.cena = ptr_to_compat(converted.addr.cena);
  7277. + input.channel = converted.channel;
  7278. + input.pools = converted.pools;
  7279. + input.index = converted.index;
  7280. + if (copy_to_user(a, &input, sizeof(input)))
  7281. + return -EFAULT;
  7282. + return ret;
  7283. + }
  7284. + case USDPAA_IOCTL_PORTAL_UNMAP_COMPAT:
  7285. + {
  7286. + struct usdpaa_portal_map_compat input;
  7287. + struct usdpaa_portal_map converted;
  7288. +
  7289. + if (copy_from_user(&input, a, sizeof(input)))
  7290. + return -EFAULT;
  7291. + converted.cinh = compat_ptr(input.cinh);
  7292. + converted.cena = compat_ptr(input.cena);
  7293. + return ioctl_portal_unmap(ctx, &converted);
  7294. + }
  7295. + case USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT:
  7296. + {
  7297. + int ret;
  7298. + struct usdpaa_ioctl_raw_portal converted;
  7299. + struct compat_ioctl_raw_portal input;
  7300. + if (copy_from_user(&input, a, sizeof(input)))
  7301. + return -EFAULT;
  7302. + converted.type = input.type;
  7303. + converted.index = input.index;
  7304. + converted.enable_stash = input.enable_stash;
  7305. + converted.cpu = input.cpu;
  7306. + converted.cache = input.cache;
  7307. + converted.window = input.window;
  7308. + converted.sdest = input.sdest;
  7309. + ret = ioctl_allocate_raw_portal(fp, ctx, &converted);
  7310. +
  7311. + input.cinh = converted.cinh;
  7312. + input.cena = converted.cena;
  7313. + input.index = converted.index;
  7314. +
  7315. + if (copy_to_user(a, &input, sizeof(input)))
  7316. + return -EFAULT;
  7317. + return ret;
  7318. + }
  7319. + case USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT:
  7320. + {
  7321. + struct usdpaa_ioctl_raw_portal converted;
  7322. + struct compat_ioctl_raw_portal input;
  7323. + if (copy_from_user(&input, a, sizeof(input)))
  7324. + return -EFAULT;
  7325. + converted.type = input.type;
  7326. + converted.index = input.index;
  7327. + converted.cinh = input.cinh;
  7328. + converted.cena = input.cena;
  7329. + return ioctl_free_raw_portal(fp, ctx, &converted);
  7330. + }
  7331. +#endif
  7332. + default:
  7333. + return usdpaa_ioctl(fp, cmd, arg);
  7334. + }
  7335. + return -EINVAL;
  7336. +}
  7337. +
  7338. +int usdpaa_get_portal_config(struct file *filp, void *cinh,
  7339. + enum usdpaa_portal_type ptype, unsigned int *irq,
  7340. + void **iir_reg)
  7341. +{
  7342. + /* Walk the list of portals for filp and return the config
  7343. + for the portal that matches the hint */
  7344. + struct ctx *context;
  7345. + struct portal_mapping *portal;
  7346. +
  7347. + /* First sanitize the filp */
  7348. + if (filp->f_op->open != usdpaa_open)
  7349. + return -ENODEV;
  7350. + context = filp->private_data;
  7351. + spin_lock(&context->lock);
  7352. + list_for_each_entry(portal, &context->portals, list) {
  7353. + if (portal->user.type == ptype &&
  7354. + portal->user.addr.cinh == cinh) {
  7355. + if (ptype == usdpaa_portal_qman) {
  7356. + *irq = portal->qportal->public_cfg.irq;
  7357. + *iir_reg = portal->qportal->addr_virt[1] +
  7358. + QM_REG_IIR;
  7359. + } else {
  7360. + *irq = portal->bportal->public_cfg.irq;
  7361. + *iir_reg = portal->bportal->addr_virt[1] +
  7362. + BM_REG_IIR;
  7363. + }
  7364. + spin_unlock(&context->lock);
  7365. + return 0;
  7366. + }
  7367. + }
  7368. + spin_unlock(&context->lock);
  7369. + return -EINVAL;
  7370. +}
  7371. +
  7372. +static const struct file_operations usdpaa_fops = {
  7373. + .open = usdpaa_open,
  7374. + .release = usdpaa_release,
  7375. + .mmap = usdpaa_mmap,
  7376. + .get_unmapped_area = usdpaa_get_unmapped_area,
  7377. + .unlocked_ioctl = usdpaa_ioctl,
  7378. + .compat_ioctl = usdpaa_ioctl_compat
  7379. +};
  7380. +
  7381. +static struct miscdevice usdpaa_miscdev = {
  7382. + .name = "fsl-usdpaa",
  7383. + .fops = &usdpaa_fops,
  7384. + .minor = MISC_DYNAMIC_MINOR,
  7385. +};
  7386. +
  7387. +/* Early-boot memory allocation. The boot-arg "usdpaa_mem=<x>" is used to
  7388. + * indicate how much memory (if any) to allocate during early boot. If the
  7389. + * format "usdpaa_mem=<x>,<y>" is used, then <y> will be interpreted as the
  7390. + * number of TLB1 entries to reserve (default is 1). If there are more mappings
  7391. + * than there are TLB1 entries, fault-handling will occur. */
  7392. +
  7393. +static __init int usdpaa_mem(char *arg)
  7394. +{
  7395. + pr_warn("uspdaa_mem argument is depracated\n");
  7396. + arg_phys_size = memparse(arg, &arg);
  7397. + num_tlb = 1;
  7398. + if (*arg == ',') {
  7399. + unsigned long ul;
  7400. + int err = kstrtoul(arg + 1, 0, &ul);
  7401. + if (err < 0) {
  7402. + num_tlb = 1;
  7403. + pr_warn("ERROR, usdpaa_mem arg is invalid\n");
  7404. + } else
  7405. + num_tlb = (unsigned int)ul;
  7406. + }
  7407. + return 0;
  7408. +}
  7409. +early_param("usdpaa_mem", usdpaa_mem);
  7410. +
  7411. +static int usdpaa_mem_init(struct reserved_mem *rmem)
  7412. +{
  7413. + phys_start = rmem->base;
  7414. + phys_size = rmem->size;
  7415. +
  7416. + WARN_ON(!(phys_start && phys_size));
  7417. +
  7418. + return 0;
  7419. +}
  7420. +RESERVEDMEM_OF_DECLARE(usdpaa_mem_init, "fsl,usdpaa-mem", usdpaa_mem_init);
  7421. +
  7422. +__init int fsl_usdpaa_init_early(void)
  7423. +{
  7424. + if (!phys_size || !phys_start) {
  7425. + pr_info("No USDPAA memory, no 'fsl,usdpaa-mem' in device-tree\n");
  7426. + return 0;
  7427. + }
  7428. + if (phys_size % PAGE_SIZE) {
  7429. + pr_err("'fsl,usdpaa-mem' size must be a multiple of page size\n");
  7430. + phys_size = 0;
  7431. + return 0;
  7432. + }
  7433. + if (arg_phys_size && phys_size != arg_phys_size) {
  7434. + pr_err("'usdpaa_mem argument size (0x%llx) does not match device tree size (0x%llx)\n",
  7435. + arg_phys_size, phys_size);
  7436. + phys_size = 0;
  7437. + return 0;
  7438. + }
  7439. + pfn_start = phys_start >> PAGE_SHIFT;
  7440. + pfn_size = phys_size >> PAGE_SHIFT;
  7441. +#ifdef CONFIG_PPC
  7442. + first_tlb = current_tlb = tlbcam_index;
  7443. + tlbcam_index += num_tlb;
  7444. +#endif
  7445. + pr_info("USDPAA region at %llx:%llx(%lx:%lx), %d TLB1 entries)\n",
  7446. + phys_start, phys_size, pfn_start, pfn_size, num_tlb);
  7447. + return 0;
  7448. +}
  7449. +subsys_initcall(fsl_usdpaa_init_early);
  7450. +
  7451. +
  7452. +static int __init usdpaa_init(void)
  7453. +{
  7454. + struct mem_fragment *frag;
  7455. + int ret;
  7456. + u64 tmp_size = phys_size;
  7457. + u64 tmp_start = phys_start;
  7458. + u64 tmp_pfn_size = pfn_size;
  7459. + u64 tmp_pfn_start = pfn_start;
  7460. +
  7461. + pr_info("Freescale USDPAA process driver\n");
  7462. + if (!phys_start) {
  7463. + pr_warn("fsl-usdpaa: no region found\n");
  7464. + return 0;
  7465. + }
  7466. +
  7467. + while (tmp_size != 0) {
  7468. + u32 frag_size = largest_page_size(tmp_size);
  7469. + frag = kmalloc(sizeof(*frag), GFP_KERNEL);
  7470. + if (!frag) {
  7471. + pr_err("Failed to setup USDPAA memory accounting\n");
  7472. + return -ENOMEM;
  7473. + }
  7474. + frag->base = tmp_start;
  7475. + frag->len = frag->root_len = frag_size;
  7476. + frag->root_pfn = tmp_pfn_start;
  7477. + frag->pfn_base = tmp_pfn_start;
  7478. + frag->pfn_len = frag_size / PAGE_SIZE;
  7479. + frag->refs = 0;
  7480. + init_waitqueue_head(&frag->wq);
  7481. + frag->owner = NULL;
  7482. + list_add(&frag->list, &mem_list);
  7483. +
  7484. + /* Adjust for this frag */
  7485. + tmp_start += frag_size;
  7486. + tmp_size -= frag_size;
  7487. + tmp_pfn_start += frag_size / PAGE_SIZE;
  7488. + tmp_pfn_size -= frag_size / PAGE_SIZE;
  7489. + }
  7490. + ret = misc_register(&usdpaa_miscdev);
  7491. + if (ret)
  7492. + pr_err("fsl-usdpaa: failed to register misc device\n");
  7493. + return ret;
  7494. +}
  7495. +
  7496. +static void __exit usdpaa_exit(void)
  7497. +{
  7498. + misc_deregister(&usdpaa_miscdev);
  7499. +}
  7500. +
  7501. +module_init(usdpaa_init);
  7502. +module_exit(usdpaa_exit);
  7503. +
  7504. +MODULE_LICENSE("GPL");
  7505. +MODULE_AUTHOR("Freescale Semiconductor");
  7506. +MODULE_DESCRIPTION("Freescale USDPAA process driver");
  7507. --- /dev/null
  7508. +++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
  7509. @@ -0,0 +1,289 @@
  7510. +/* Copyright (c) 2013 Freescale Semiconductor, Inc.
  7511. + * All rights reserved.
  7512. + *
  7513. + * Redistribution and use in source and binary forms, with or without
  7514. + * modification, are permitted provided that the following conditions are met:
  7515. + * * Redistributions of source code must retain the above copyright
  7516. + * notice, this list of conditions and the following disclaimer.
  7517. + * * Redistributions in binary form must reproduce the above copyright
  7518. + * notice, this list of conditions and the following disclaimer in the
  7519. + * documentation and/or other materials provided with the distribution.
  7520. + * * Neither the name of Freescale Semiconductor nor the
  7521. + * names of its contributors may be used to endorse or promote products
  7522. + * derived from this software without specific prior written permission.
  7523. + *
  7524. + *
  7525. + * ALTERNATIVELY, this software may be distributed under the terms of the
  7526. + * GNU General Public License ("GPL") as published by the Free Software
  7527. + * Foundation, either version 2 of that License or (at your option) any
  7528. + * later version.
  7529. + *
  7530. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  7531. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  7532. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  7533. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  7534. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  7535. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  7536. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  7537. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7538. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  7539. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7540. + */
  7541. +
  7542. +/* define a device that allows USPDAA processes to open a file
  7543. + descriptor and specify which IRQ it wants to montior using an ioctl()
  7544. + When an IRQ is received, the device becomes readable so that a process
  7545. + can use read() or select() type calls to monitor for IRQs */
  7546. +
  7547. +#include <linux/miscdevice.h>
  7548. +#include <linux/fs.h>
  7549. +#include <linux/cdev.h>
  7550. +#include <linux/slab.h>
  7551. +#include <linux/interrupt.h>
  7552. +#include <linux/poll.h>
  7553. +#include <linux/uaccess.h>
  7554. +#include <linux/fsl_usdpaa.h>
  7555. +#include <linux/module.h>
  7556. +#include <linux/fdtable.h>
  7557. +#include <linux/file.h>
  7558. +
  7559. +#include "qman_low.h"
  7560. +#include "bman_low.h"
  7561. +
  7562. +struct usdpaa_irq_ctx {
  7563. + int irq_set; /* Set to true once the irq is set via ioctl */
  7564. + unsigned int irq_num;
  7565. + u32 last_irq_count; /* Last value returned from read */
  7566. + u32 irq_count; /* Number of irqs since last read */
  7567. + wait_queue_head_t wait_queue; /* Waiting processes */
  7568. + spinlock_t lock;
  7569. + void *inhibit_addr; /* inhibit register address */
  7570. + struct file *usdpaa_filp;
  7571. + char irq_name[128];
  7572. +};
  7573. +
  7574. +static int usdpaa_irq_open(struct inode *inode, struct file *filp)
  7575. +{
  7576. + struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  7577. + if (!ctx)
  7578. + return -ENOMEM;
  7579. + ctx->irq_set = 0;
  7580. + ctx->irq_count = 0;
  7581. + ctx->last_irq_count = 0;
  7582. + init_waitqueue_head(&ctx->wait_queue);
  7583. + spin_lock_init(&ctx->lock);
  7584. + filp->private_data = ctx;
  7585. + return 0;
  7586. +}
  7587. +
  7588. +static int usdpaa_irq_release(struct inode *inode, struct file *filp)
  7589. +{
  7590. + struct usdpaa_irq_ctx *ctx = filp->private_data;
  7591. + if (ctx->irq_set) {
  7592. + /* Inhibit the IRQ */
  7593. + out_be32(ctx->inhibit_addr, 0x1);
  7594. + irq_set_affinity_hint(ctx->irq_num, NULL);
  7595. + free_irq(ctx->irq_num, ctx);
  7596. + ctx->irq_set = 0;
  7597. + fput(ctx->usdpaa_filp);
  7598. + }
  7599. + kfree(filp->private_data);
  7600. + return 0;
  7601. +}
  7602. +
  7603. +static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
  7604. +{
  7605. + unsigned long flags;
  7606. + struct usdpaa_irq_ctx *ctx = _ctx;
  7607. + spin_lock_irqsave(&ctx->lock, flags);
  7608. + ++ctx->irq_count;
  7609. + spin_unlock_irqrestore(&ctx->lock, flags);
  7610. + wake_up_all(&ctx->wait_queue);
  7611. + /* Set the inhibit register. This will be reenabled
  7612. + once the USDPAA code handles the IRQ */
  7613. + out_be32(ctx->inhibit_addr, 0x1);
  7614. + pr_info("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count);
  7615. + return IRQ_HANDLED;
  7616. +}
  7617. +
  7618. +static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map)
  7619. +{
  7620. + struct usdpaa_irq_ctx *ctx = fp->private_data;
  7621. + int ret;
  7622. +
  7623. + if (ctx->irq_set) {
  7624. + pr_debug("Setting USDPAA IRQ when it was already set!\n");
  7625. + return -EBUSY;
  7626. + }
  7627. +
  7628. + ctx->usdpaa_filp = fget(irq_map->fd);
  7629. + if (!ctx->usdpaa_filp) {
  7630. + pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd);
  7631. + return -EINVAL;
  7632. + }
  7633. +
  7634. + ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh,
  7635. + irq_map->type, &ctx->irq_num,
  7636. + &ctx->inhibit_addr);
  7637. + if (ret) {
  7638. + pr_debug("USDPAA IRQ couldn't identify portal\n");
  7639. + fput(ctx->usdpaa_filp);
  7640. + return ret;
  7641. + }
  7642. +
  7643. + ctx->irq_set = 1;
  7644. +
  7645. + snprintf(ctx->irq_name, sizeof(ctx->irq_name),
  7646. + "usdpaa_irq %d", ctx->irq_num);
  7647. +
  7648. + ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0,
  7649. + ctx->irq_name, ctx);
  7650. + if (ret) {
  7651. + pr_err("USDPAA request_irq(%d) failed, ret= %d\n",
  7652. + ctx->irq_num, ret);
  7653. + ctx->irq_set = 0;
  7654. + fput(ctx->usdpaa_filp);
  7655. + return ret;
  7656. + }
  7657. + ret = irq_set_affinity(ctx->irq_num, tsk_cpus_allowed(current));
  7658. + if (ret)
  7659. + pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret);
  7660. +
  7661. + ret = irq_set_affinity_hint(ctx->irq_num, tsk_cpus_allowed(current));
  7662. + if (ret)
  7663. + pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret);
  7664. +
  7665. + return 0;
  7666. +}
  7667. +
  7668. +static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
  7669. + unsigned long arg)
  7670. +{
  7671. + int ret;
  7672. + struct usdpaa_ioctl_irq_map irq_map;
  7673. +
  7674. + if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) {
  7675. + pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd);
  7676. + return -EINVAL;
  7677. + }
  7678. +
  7679. + ret = copy_from_user(&irq_map, (void __user *)arg,
  7680. + sizeof(irq_map));
  7681. + if (ret)
  7682. + return ret;
  7683. + return map_irq(fp, &irq_map);
  7684. +}
  7685. +
  7686. +static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
  7687. + size_t count, loff_t *offp)
  7688. +{
  7689. + struct usdpaa_irq_ctx *ctx = filp->private_data;
  7690. + int ret;
  7691. +
  7692. + if (!ctx->irq_set) {
  7693. + pr_debug("Reading USDPAA IRQ before it was set\n");
  7694. + return -EINVAL;
  7695. + }
  7696. +
  7697. + if (count < sizeof(ctx->irq_count)) {
  7698. + pr_debug("USDPAA IRQ Read too small\n");
  7699. + return -EINVAL;
  7700. + }
  7701. + if (ctx->irq_count == ctx->last_irq_count) {
  7702. + if (filp->f_flags & O_NONBLOCK)
  7703. + return -EAGAIN;
  7704. +
  7705. + ret = wait_event_interruptible(ctx->wait_queue,
  7706. + ctx->irq_count != ctx->last_irq_count);
  7707. + if (ret == -ERESTARTSYS)
  7708. + return ret;
  7709. + }
  7710. +
  7711. + ctx->last_irq_count = ctx->irq_count;
  7712. +
  7713. + if (copy_to_user(buff, &ctx->last_irq_count,
  7714. + sizeof(ctx->last_irq_count)))
  7715. + return -EFAULT;
  7716. + return sizeof(ctx->irq_count);
  7717. +}
  7718. +
  7719. +static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
  7720. +{
  7721. + struct usdpaa_irq_ctx *ctx = filp->private_data;
  7722. + unsigned int ret = 0;
  7723. + unsigned long flags;
  7724. +
  7725. + if (!ctx->irq_set)
  7726. + return POLLHUP;
  7727. +
  7728. + poll_wait(filp, &ctx->wait_queue, wait);
  7729. +
  7730. + spin_lock_irqsave(&ctx->lock, flags);
  7731. + if (ctx->irq_count != ctx->last_irq_count)
  7732. + ret |= POLLIN | POLLRDNORM;
  7733. + spin_unlock_irqrestore(&ctx->lock, flags);
  7734. + return ret;
  7735. +}
  7736. +
  7737. +static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
  7738. + unsigned long arg)
  7739. +{
  7740. +#ifdef CONFIG_COMPAT
  7741. + void __user *a = (void __user *)arg;
  7742. +#endif
  7743. + switch (cmd) {
  7744. +#ifdef CONFIG_COMPAT
  7745. + case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT:
  7746. + {
  7747. + struct compat_ioctl_irq_map input;
  7748. + struct usdpaa_ioctl_irq_map converted;
  7749. + if (copy_from_user(&input, a, sizeof(input)))
  7750. + return -EFAULT;
  7751. + converted.type = input.type;
  7752. + converted.fd = input.fd;
  7753. + converted.portal_cinh = compat_ptr(input.portal_cinh);
  7754. + return map_irq(fp, &converted);
  7755. + }
  7756. +#endif
  7757. + default:
  7758. + return usdpaa_irq_ioctl(fp, cmd, arg);
  7759. + }
  7760. +}
  7761. +
  7762. +static const struct file_operations usdpaa_irq_fops = {
  7763. + .open = usdpaa_irq_open,
  7764. + .release = usdpaa_irq_release,
  7765. + .unlocked_ioctl = usdpaa_irq_ioctl,
  7766. + .compat_ioctl = usdpaa_irq_ioctl_compat,
  7767. + .read = usdpaa_irq_read,
  7768. + .poll = usdpaa_irq_poll
  7769. +};
  7770. +
  7771. +static struct miscdevice usdpaa_miscdev = {
  7772. + .name = "fsl-usdpaa-irq",
  7773. + .fops = &usdpaa_irq_fops,
  7774. + .minor = MISC_DYNAMIC_MINOR,
  7775. +};
  7776. +
  7777. +static int __init usdpaa_irq_init(void)
  7778. +{
  7779. + int ret;
  7780. +
  7781. + pr_info("Freescale USDPAA process IRQ driver\n");
  7782. + ret = misc_register(&usdpaa_miscdev);
  7783. + if (ret)
  7784. + pr_err("fsl-usdpaa-irq: failed to register misc device\n");
  7785. + return ret;
  7786. +}
  7787. +
  7788. +static void __exit usdpaa_irq_exit(void)
  7789. +{
  7790. + misc_deregister(&usdpaa_miscdev);
  7791. +}
  7792. +
  7793. +module_init(usdpaa_irq_init);
  7794. +module_exit(usdpaa_irq_exit);
  7795. +
  7796. +MODULE_LICENSE("GPL");
  7797. +MODULE_AUTHOR("Freescale Semiconductor");
  7798. +MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver");
  7799. --- /dev/null
  7800. +++ b/drivers/staging/fsl_qbman/qbman_driver.c
  7801. @@ -0,0 +1,88 @@
  7802. +/* Copyright 2013 Freescale Semiconductor, Inc.
  7803. + *
  7804. + * Redistribution and use in source and binary forms, with or without
  7805. + * modification, are permitted provided that the following conditions are met:
  7806. + * * Redistributions of source code must retain the above copyright
  7807. + * notice, this list of conditions and the following disclaimer.
  7808. + * * Redistributions in binary form must reproduce the above copyright
  7809. + * notice, this list of conditions and the following disclaimer in the
  7810. + * documentation and/or other materials provided with the distribution.
  7811. + * * Neither the name of Freescale Semiconductor nor the
  7812. + * names of its contributors may be used to endorse or promote products
  7813. + * derived from this software without specific prior written permission.
  7814. + *
  7815. + *
  7816. + * ALTERNATIVELY, this software may be distributed under the terms of the
  7817. + * GNU General Public License ("GPL") as published by the Free Software
  7818. + * Foundation, either version 2 of that License or (at your option) any
  7819. + * later version.
  7820. + *
  7821. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  7822. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  7823. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  7824. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  7825. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  7826. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  7827. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  7828. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7829. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  7830. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7831. + */
  7832. +
  7833. +#include <linux/time.h>
  7834. +#include "qman_private.h"
  7835. +#include "bman_private.h"
  7836. +__init void qman_init_early(void);
  7837. +__init void bman_init_early(void);
  7838. +
  7839. +static __init int qbman_init(void)
  7840. +{
  7841. + struct device_node *dn;
  7842. + u32 is_portal_available;
  7843. +
  7844. + bman_init();
  7845. + qman_init();
  7846. +
  7847. + is_portal_available = 0;
  7848. + for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
  7849. + if (!of_device_is_available(dn))
  7850. + continue;
  7851. + else
  7852. + is_portal_available = 1;
  7853. + }
  7854. +
  7855. + if (!qman_have_ccsr() && is_portal_available) {
  7856. + struct qman_fq fq = {
  7857. + .fqid = 1
  7858. + };
  7859. + struct qm_mcr_queryfq_np np;
  7860. + int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
  7861. + struct timespec nowts, diffts, startts = current_kernel_time();
  7862. + /* Loop while querying given fqid succeeds or time out */
  7863. + while (1) {
  7864. + err = qman_query_fq_np(&fq, &np);
  7865. + if (!err) {
  7866. + /* success, control-plane has configured QMan */
  7867. + break;
  7868. + } else if (err != -ERANGE) {
  7869. + pr_err("QMan: I/O error, continuing anyway\n");
  7870. + break;
  7871. + }
  7872. + nowts = current_kernel_time();
  7873. + diffts = timespec_sub(nowts, startts);
  7874. + if (diffts.tv_sec > 0) {
  7875. + if (!retry--) {
  7876. + pr_err("QMan: time out, control-plane"
  7877. + " dead?\n");
  7878. + break;
  7879. + }
  7880. + pr_warn("QMan: polling for the control-plane"
  7881. + " (%d)\n", retry);
  7882. + }
  7883. + }
  7884. + }
  7885. + bman_resource_init();
  7886. + qman_resource_init();
  7887. + return 0;
  7888. +}
  7889. +subsys_initcall(qbman_init);
  7890. --- /dev/null
  7891. +++ b/drivers/staging/fsl_qbman/qman_config.c
  7892. @@ -0,0 +1,1199 @@
  7893. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  7894. + *
  7895. + * Redistribution and use in source and binary forms, with or without
  7896. + * modification, are permitted provided that the following conditions are met:
  7897. + * * Redistributions of source code must retain the above copyright
  7898. + * notice, this list of conditions and the following disclaimer.
  7899. + * * Redistributions in binary form must reproduce the above copyright
  7900. + * notice, this list of conditions and the following disclaimer in the
  7901. + * documentation and/or other materials provided with the distribution.
  7902. + * * Neither the name of Freescale Semiconductor nor the
  7903. + * names of its contributors may be used to endorse or promote products
  7904. + * derived from this software without specific prior written permission.
  7905. + *
  7906. + *
  7907. + * ALTERNATIVELY, this software may be distributed under the terms of the
  7908. + * GNU General Public License ("GPL") as published by the Free Software
  7909. + * Foundation, either version 2 of that License or (at your option) any
  7910. + * later version.
  7911. + *
  7912. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  7913. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  7914. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  7915. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  7916. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  7917. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  7918. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  7919. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7920. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  7921. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7922. + */
  7923. +
  7924. +#include <asm/cacheflush.h>
  7925. +#include "qman_private.h"
  7926. +#include <linux/highmem.h>
  7927. +#include <linux/of_reserved_mem.h>
  7928. +
  7929. +/* Last updated for v00.800 of the BG */
  7930. +
  7931. +/* Register offsets */
  7932. +#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
  7933. +#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
  7934. +#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
  7935. +#define REG_DD_CFG 0x0200
  7936. +#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
  7937. +#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
  7938. +#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
  7939. +#define REG_PFDR_FPC 0x0400
  7940. +#define REG_PFDR_FP_HEAD 0x0404
  7941. +#define REG_PFDR_FP_TAIL 0x0408
  7942. +#define REG_PFDR_FP_LWIT 0x0410
  7943. +#define REG_PFDR_CFG 0x0414
  7944. +#define REG_SFDR_CFG 0x0500
  7945. +#define REG_SFDR_IN_USE 0x0504
  7946. +#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
  7947. +#define REG_WQ_DEF_ENC_WQID 0x0630
  7948. +#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
  7949. +#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
  7950. +#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
  7951. +#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
  7952. +#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
  7953. +#define REG_CM_CFG 0x0800
  7954. +#define REG_ECSR 0x0a00
  7955. +#define REG_ECIR 0x0a04
  7956. +#define REG_EADR 0x0a08
  7957. +#define REG_ECIR2 0x0a0c
  7958. +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
  7959. +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
  7960. +#define REG_MCR 0x0b00
  7961. +#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
  7962. +#define REG_MISC_CFG 0x0be0
  7963. +#define REG_HID_CFG 0x0bf0
  7964. +#define REG_IDLE_STAT 0x0bf4
  7965. +#define REG_IP_REV_1 0x0bf8
  7966. +#define REG_IP_REV_2 0x0bfc
  7967. +#define REG_FQD_BARE 0x0c00
  7968. +#define REG_PFDR_BARE 0x0c20
  7969. +#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
  7970. +#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
  7971. +#define REG_QCSP_BARE 0x0c80
  7972. +#define REG_QCSP_BAR 0x0c84
  7973. +#define REG_CI_SCHED_CFG 0x0d00
  7974. +#define REG_SRCIDR 0x0d04
  7975. +#define REG_LIODNR 0x0d08
  7976. +#define REG_CI_RLM_AVG 0x0d14
  7977. +#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */
  7978. +#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
  7979. +#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
  7980. +#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
  7981. +#define REG_CEETM_CFG_IDX 0x900
  7982. +#define REG_CEETM_CFG_PRES 0x904
  7983. +#define REG_CEETM_XSFDR_IN_USE 0x908
  7984. +
  7985. +/* Assists for QMAN_MCR */
  7986. +#define MCR_INIT_PFDR 0x01000000
  7987. +#define MCR_get_rslt(v) (u8)((v) >> 24)
  7988. +#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0))
  7989. +#define MCR_rslt_ok(r) (rslt == 0xf0)
  7990. +#define MCR_rslt_eaccess(r) (rslt == 0xf8)
  7991. +#define MCR_rslt_inval(r) (rslt == 0xff)
  7992. +
  7993. +struct qman;
  7994. +
  7995. +/* Follows WQ_CS_CFG0-5 */
  7996. +enum qm_wq_class {
  7997. + qm_wq_portal = 0,
  7998. + qm_wq_pool = 1,
  7999. + qm_wq_fman0 = 2,
  8000. + qm_wq_fman1 = 3,
  8001. + qm_wq_caam = 4,
  8002. + qm_wq_pme = 5,
  8003. + qm_wq_first = qm_wq_portal,
  8004. + qm_wq_last = qm_wq_pme
  8005. +};
  8006. +
  8007. +/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
  8008. +enum qm_memory {
  8009. + qm_memory_fqd,
  8010. + qm_memory_pfdr
  8011. +};
  8012. +
  8013. +/* Used by all error interrupt registers except 'inhibit' */
  8014. +#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
  8015. +#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
  8016. +#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
  8017. +#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
  8018. +#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
  8019. +#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
  8020. +#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
  8021. +#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
  8022. +#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
  8023. +#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
  8024. +#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
  8025. +#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
  8026. +#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
  8027. +#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
  8028. +#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
  8029. +#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
  8030. +#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
  8031. +#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
  8032. +
  8033. +/* QMAN_ECIR valid error bit */
  8034. +#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
  8035. + QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
  8036. + QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
  8037. +#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
  8038. + QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
  8039. + QM_EIRQ_IFSI)
  8040. +
  8041. +union qman_ecir {
  8042. + u32 ecir_raw;
  8043. + struct {
  8044. + u32 __reserved:2;
  8045. + u32 portal_type:1;
  8046. + u32 portal_num:5;
  8047. + u32 fqid:24;
  8048. + } __packed info;
  8049. +};
  8050. +
  8051. +union qman_ecir2 {
  8052. + u32 ecir2_raw;
  8053. + struct {
  8054. + u32 portal_type:1;
  8055. + u32 __reserved:21;
  8056. + u32 portal_num:10;
  8057. + } __packed info;
  8058. +};
  8059. +
  8060. +union qman_eadr {
  8061. + u32 eadr_raw;
  8062. + struct {
  8063. + u32 __reserved1:4;
  8064. + u32 memid:4;
  8065. + u32 __reserved2:12;
  8066. + u32 eadr:12;
  8067. + } __packed info;
  8068. + struct {
  8069. + u32 __reserved1:3;
  8070. + u32 memid:5;
  8071. + u32 __reserved:8;
  8072. + u32 eadr:16;
  8073. + } __packed info_rev3;
  8074. +};
  8075. +
  8076. +struct qman_hwerr_txt {
  8077. + u32 mask;
  8078. + const char *txt;
  8079. +};
  8080. +
  8081. +#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
  8082. +
  8083. +static const struct qman_hwerr_txt qman_hwerr_txts[] = {
  8084. + QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
  8085. + QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
  8086. + QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
  8087. + QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
  8088. + QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
  8089. + QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
  8090. + QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
  8091. + QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
  8092. + QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
  8093. + QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
  8094. + QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
  8095. + QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
  8096. + QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
  8097. + QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
  8098. + QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
  8099. + QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
  8100. + QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
  8101. + QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
  8102. +};
  8103. +#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
  8104. +
  8105. +struct qman_error_info_mdata {
  8106. + u16 addr_mask;
  8107. + u16 bits;
  8108. + const char *txt;
  8109. +};
  8110. +
  8111. +#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
  8112. +static const struct qman_error_info_mdata error_mdata[] = {
  8113. + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
  8114. + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
  8115. + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
  8116. + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
  8117. + QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
  8118. + QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
  8119. + QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
  8120. + QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
  8121. + QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
  8122. + QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"),
  8123. + QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"),
  8124. + QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"),
  8125. + QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"),
  8126. + QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"),
  8127. + QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"),
  8128. + QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"),
  8129. + QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"),
  8130. + QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"),
  8131. +};
  8132. +#define QMAN_ERR_MDATA_COUNT \
  8133. + (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
  8134. +
  8135. +/* Add this in Kconfig */
  8136. +#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
  8137. +
  8138. +/**
  8139. + * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
  8140. + * @v: for accessors that write values, this is the 32-bit value
  8141. + *
  8142. + * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
  8143. + * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
  8144. + * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
  8145. + * "write the enable register" rather than "enable the write register"!
  8146. + */
  8147. +#define qm_err_isr_status_read(qm) \
  8148. + __qm_err_isr_read(qm, qm_isr_status)
  8149. +#define qm_err_isr_status_clear(qm, m) \
  8150. + __qm_err_isr_write(qm, qm_isr_status, m)
  8151. +#define qm_err_isr_enable_read(qm) \
  8152. + __qm_err_isr_read(qm, qm_isr_enable)
  8153. +#define qm_err_isr_enable_write(qm, v) \
  8154. + __qm_err_isr_write(qm, qm_isr_enable, v)
  8155. +#define qm_err_isr_disable_read(qm) \
  8156. + __qm_err_isr_read(qm, qm_isr_disable)
  8157. +#define qm_err_isr_disable_write(qm, v) \
  8158. + __qm_err_isr_write(qm, qm_isr_disable, v)
  8159. +#define qm_err_isr_inhibit(qm) \
  8160. + __qm_err_isr_write(qm, qm_isr_inhibit, 1)
  8161. +#define qm_err_isr_uninhibit(qm) \
  8162. + __qm_err_isr_write(qm, qm_isr_inhibit, 0)
  8163. +
  8164. +/*
  8165. + * TODO: unimplemented registers
  8166. + *
  8167. + * Keeping a list here of Qman registers I have not yet covered;
  8168. + * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
  8169. + * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
  8170. + * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
  8171. + */
  8172. +
  8173. +/* Encapsulate "struct qman *" as a cast of the register space address. */
  8174. +
  8175. +static struct qman *qm_create(void *regs)
  8176. +{
  8177. + return (struct qman *)regs;
  8178. +}
  8179. +
  8180. +static inline u32 __qm_in(struct qman *qm, u32 offset)
  8181. +{
  8182. + return in_be32((void *)qm + offset);
  8183. +}
  8184. +static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
  8185. +{
  8186. + out_be32((void *)qm + offset, val);
  8187. +}
  8188. +#define qm_in(reg) __qm_in(qm, REG_##reg)
  8189. +#define qm_out(reg, val) __qm_out(qm, REG_##reg, val)
  8190. +
  8191. +static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
  8192. +{
  8193. + return __qm_in(qm, REG_ERR_ISR + (n << 2));
  8194. +}
  8195. +
  8196. +static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
  8197. +{
  8198. + __qm_out(qm, REG_ERR_ISR + (n << 2), val);
  8199. +}
  8200. +
  8201. +static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
  8202. + int ed, u8 sernd)
  8203. +{
  8204. + DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
  8205. + (portal == qm_dc_portal_fman1));
  8206. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
  8207. + qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
  8208. + else
  8209. + qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
  8210. +}
  8211. +
  8212. +static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
  8213. + u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
  8214. + u8 csw6, u8 csw7)
  8215. +{
  8216. + qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
  8217. + ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
  8218. + ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
  8219. + ((csw6 & 0x7) << 4) | (csw7 & 0x7));
  8220. +}
  8221. +
  8222. +static void qm_set_hid(struct qman *qm)
  8223. +{
  8224. + qm_out(HID_CFG, 0);
  8225. +}
  8226. +
  8227. +static void qm_set_corenet_initiator(struct qman *qm)
  8228. +{
  8229. + qm_out(CI_SCHED_CFG,
  8230. + 0x80000000 | /* write srcciv enable */
  8231. + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
  8232. + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
  8233. + (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
  8234. + CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W);
  8235. +}
  8236. +
  8237. +static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor,
  8238. + u8 *cfg)
  8239. +{
  8240. + u32 v = qm_in(IP_REV_1);
  8241. + u32 v2 = qm_in(IP_REV_2);
  8242. + *id = (v >> 16);
  8243. + *major = (v >> 8) & 0xff;
  8244. + *minor = v & 0xff;
  8245. + *cfg = v2 & 0xff;
  8246. +}
  8247. +
  8248. +static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
  8249. + int enable, int prio, int stash, u32 size)
  8250. +{
  8251. + u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
  8252. + u32 exp = ilog2(size);
  8253. + /* choke if size isn't within range */
  8254. + DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
  8255. + is_power_of_2(size));
  8256. + /* choke if 'ba' has lower-alignment than 'size' */
  8257. + DPA_ASSERT(!(ba & (size - 1)));
  8258. + __qm_out(qm, offset, upper_32_bits(ba));
  8259. + __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
  8260. + __qm_out(qm, offset + REG_offset_AR,
  8261. + (enable ? 0x80000000 : 0) |
  8262. + (prio ? 0x40000000 : 0) |
  8263. + (stash ? 0x20000000 : 0) |
  8264. + (exp - 1));
  8265. +}
  8266. +
  8267. +static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
  8268. +{
  8269. + qm_out(PFDR_FP_LWIT, th & 0xffffff);
  8270. + qm_out(PFDR_CFG, k);
  8271. +}
  8272. +
  8273. +static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
  8274. +{
  8275. + qm_out(SFDR_CFG, th & 0x3ff);
  8276. +}
  8277. +
  8278. +static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
  8279. +{
  8280. + u8 rslt = MCR_get_rslt(qm_in(MCR));
  8281. +
  8282. + DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
  8283. + /* Make sure the command interface is 'idle' */
  8284. + if (!MCR_rslt_idle(rslt))
  8285. + panic("QMAN_MCR isn't idle");
  8286. +
  8287. + /* Write the MCR command params then the verb */
  8288. + qm_out(MCP(0), pfdr_start);
  8289. + /* TODO: remove this - it's a workaround for a model bug that is
  8290. + * corrected in more recent versions. We use the workaround until
  8291. + * everyone has upgraded. */
  8292. + qm_out(MCP(1), (pfdr_start + num - 16));
  8293. + lwsync();
  8294. + qm_out(MCR, MCR_INIT_PFDR);
  8295. + /* Poll for the result */
  8296. + do {
  8297. + rslt = MCR_get_rslt(qm_in(MCR));
  8298. + } while (!MCR_rslt_idle(rslt));
  8299. + if (MCR_rslt_ok(rslt))
  8300. + return 0;
  8301. + if (MCR_rslt_eaccess(rslt))
  8302. + return -EACCES;
  8303. + if (MCR_rslt_inval(rslt))
  8304. + return -EINVAL;
  8305. + pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
  8306. + return -ENOSYS;
  8307. +}
  8308. +
  8309. +/*****************/
  8310. +/* Config driver */
  8311. +/*****************/
  8312. +
  8313. +#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ)
  8314. +#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ)
  8315. +
  8316. +/* We support only one of these */
  8317. +static struct qman *qm;
  8318. +static struct device_node *qm_node;
  8319. +
  8320. +/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
  8321. + * during qman_init_ccsr(). */
  8322. +static dma_addr_t fqd_a, pfdr_a;
  8323. +static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ;
  8324. +
  8325. +static int qman_fqd(struct reserved_mem *rmem)
  8326. +{
  8327. + fqd_a = rmem->base;
  8328. + fqd_sz = rmem->size;
  8329. +
  8330. + WARN_ON(!(fqd_a && fqd_sz));
  8331. +
  8332. + return 0;
  8333. +}
  8334. +RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
  8335. +
  8336. +static int qman_pfdr(struct reserved_mem *rmem)
  8337. +{
  8338. + pfdr_a = rmem->base;
  8339. + pfdr_sz = rmem->size;
  8340. +
  8341. + WARN_ON(!(pfdr_a && pfdr_sz));
  8342. +
  8343. + return 0;
  8344. +}
  8345. +RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
  8346. +
  8347. +size_t get_qman_fqd_size()
  8348. +{
  8349. + return fqd_sz;
  8350. +}
  8351. +
  8352. +/* Parse the <name> property to extract the memory location and size and
  8353. + * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
  8354. + * size. Also flush this memory range from data cache so that QMAN originated
  8355. + * transactions for this memory region could be marked non-coherent.
  8356. + */
  8357. +static __init int parse_mem_property(struct device_node *node, const char *name,
  8358. + dma_addr_t *addr, size_t *sz, int zero)
  8359. +{
  8360. + int ret;
  8361. +
  8362. + /* If using a "zero-pma", don't try to zero it, even if you asked */
  8363. + if (zero && of_find_property(node, "zero-pma", &ret)) {
  8364. + pr_info(" it's a 'zero-pma', not zeroing from s/w\n");
  8365. + zero = 0;
  8366. + }
  8367. +
  8368. + if (zero) {
  8369. + /* map as cacheable, non-guarded */
  8370. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  8371. + void __iomem *tmpp = ioremap_cache(*addr, *sz);
  8372. +#else
  8373. + void __iomem *tmpp = ioremap(*addr, *sz);
  8374. +#endif
  8375. +
  8376. + if (!tmpp)
  8377. + return -ENOMEM;
  8378. + memset_io(tmpp, 0, *sz);
  8379. + flush_dcache_range((unsigned long)tmpp,
  8380. + (unsigned long)tmpp + *sz);
  8381. + iounmap(tmpp);
  8382. + }
  8383. +
  8384. + return 0;
  8385. +}
  8386. +
  8387. +/* TODO:
  8388. + * - there is obviously no handling of errors,
  8389. + * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
  8390. + * both memory resources to zero.
  8391. + */
  8392. +static int __init fsl_qman_init(struct device_node *node)
  8393. +{
  8394. + struct resource res;
  8395. + resource_size_t len;
  8396. + u32 __iomem *regs;
  8397. + const char *s;
  8398. + int ret, standby = 0;
  8399. + u16 id;
  8400. + u8 major, minor, cfg;
  8401. + ret = of_address_to_resource(node, 0, &res);
  8402. + if (ret) {
  8403. + pr_err("Can't get %s property '%s'\n", node->full_name, "reg");
  8404. + return ret;
  8405. + }
  8406. + s = of_get_property(node, "fsl,hv-claimable", &ret);
  8407. + if (s && !strcmp(s, "standby"))
  8408. + standby = 1;
  8409. + if (!standby) {
  8410. + ret = parse_mem_property(node, "fsl,qman-fqd",
  8411. + &fqd_a, &fqd_sz, 1);
  8412. + pr_info("qman-fqd addr 0x%llx size 0x%zx\n",
  8413. + (unsigned long long)fqd_a, fqd_sz);
  8414. + BUG_ON(ret);
  8415. + ret = parse_mem_property(node, "fsl,qman-pfdr",
  8416. + &pfdr_a, &pfdr_sz, 0);
  8417. + pr_info("qman-pfdr addr 0x%llx size 0x%zx\n",
  8418. + (unsigned long long)pfdr_a, pfdr_sz);
  8419. + BUG_ON(ret);
  8420. + }
  8421. + /* Global configuration */
  8422. + len = resource_size(&res);
  8423. + if (len != (unsigned long)len)
  8424. + return -EINVAL;
  8425. + regs = ioremap(res.start, (unsigned long)len);
  8426. + qm = qm_create(regs);
  8427. + qm_node = node;
  8428. + qm_get_version(qm, &id, &major, &minor, &cfg);
  8429. + pr_info("Qman ver:%04x,%02x,%02x,%02x\n", id, major, minor, cfg);
  8430. + if (!qman_ip_rev) {
  8431. + if ((major == 1) && (minor == 0)) {
  8432. + pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
  8433. + iounmap(regs);
  8434. + return -ENODEV;
  8435. + } else if ((major == 1) && (minor == 1))
  8436. + qman_ip_rev = QMAN_REV11;
  8437. + else if ((major == 1) && (minor == 2))
  8438. + qman_ip_rev = QMAN_REV12;
  8439. + else if ((major == 2) && (minor == 0))
  8440. + qman_ip_rev = QMAN_REV20;
  8441. + else if ((major == 3) && (minor == 0))
  8442. + qman_ip_rev = QMAN_REV30;
  8443. + else if ((major == 3) && (minor == 1))
  8444. + qman_ip_rev = QMAN_REV31;
  8445. + else if ((major == 3) && (minor == 2))
  8446. + qman_ip_rev = QMAN_REV32;
  8447. + else {
  8448. + pr_warn("unknown Qman version, default to rev1.1\n");
  8449. + qman_ip_rev = QMAN_REV11;
  8450. + }
  8451. + qman_ip_cfg = cfg;
  8452. + }
  8453. +
  8454. + if (standby) {
  8455. + pr_info(" -> in standby mode\n");
  8456. + return 0;
  8457. + }
  8458. + return 0;
  8459. +}
  8460. +
  8461. +int qman_have_ccsr(void)
  8462. +{
  8463. + return qm ? 1 : 0;
  8464. +}
  8465. +
  8466. +__init int qman_init_early(void)
  8467. +{
  8468. + struct device_node *dn;
  8469. + int ret;
  8470. +
  8471. + for_each_compatible_node(dn, NULL, "fsl,qman") {
  8472. + if (qm)
  8473. + pr_err("%s: only one 'fsl,qman' allowed\n",
  8474. + dn->full_name);
  8475. + else {
  8476. + if (!of_device_is_available(dn))
  8477. + continue;
  8478. +
  8479. + ret = fsl_qman_init(dn);
  8480. + BUG_ON(ret);
  8481. + }
  8482. + }
  8483. + return 0;
  8484. +}
  8485. +postcore_initcall_sync(qman_init_early);
  8486. +
  8487. +static void log_edata_bits(u32 bit_count)
  8488. +{
  8489. + u32 i, j, mask = 0xffffffff;
  8490. +
  8491. + pr_warn("Qman ErrInt, EDATA:\n");
  8492. + i = bit_count/32;
  8493. + if (bit_count%32) {
  8494. + i++;
  8495. + mask = ~(mask << bit_count%32);
  8496. + }
  8497. + j = 16-i;
  8498. + pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask);
  8499. + j++;
  8500. + for (; j < 16; j++)
  8501. + pr_warn(" 0x%08x\n", qm_in(EDATA(j)));
  8502. +}
  8503. +
  8504. +static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
  8505. +{
  8506. + union qman_ecir ecir_val;
  8507. + union qman_eadr eadr_val;
  8508. +
  8509. + ecir_val.ecir_raw = qm_in(ECIR);
  8510. + /* Is portal info valid */
  8511. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
  8512. + union qman_ecir2 ecir2_val;
  8513. + ecir2_val.ecir2_raw = qm_in(ECIR2);
  8514. + if (ecsr_val & PORTAL_ECSR_ERR) {
  8515. + pr_warn("Qman ErrInt: %s id %d\n",
  8516. + (ecir2_val.info.portal_type) ?
  8517. + "DCP" : "SWP", ecir2_val.info.portal_num);
  8518. + }
  8519. + if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) {
  8520. + pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
  8521. + ecir_val.info.fqid);
  8522. + }
  8523. + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
  8524. + eadr_val.eadr_raw = qm_in(EADR);
  8525. + pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
  8526. + error_mdata[eadr_val.info_rev3.memid].txt,
  8527. + error_mdata[eadr_val.info_rev3.memid].addr_mask
  8528. + & eadr_val.info_rev3.eadr);
  8529. + log_edata_bits(
  8530. + error_mdata[eadr_val.info_rev3.memid].bits);
  8531. + }
  8532. + } else {
  8533. + if (ecsr_val & PORTAL_ECSR_ERR) {
  8534. + pr_warn("Qman ErrInt: %s id %d\n",
  8535. + (ecir_val.info.portal_type) ?
  8536. + "DCP" : "SWP", ecir_val.info.portal_num);
  8537. + }
  8538. + if (ecsr_val & FQID_ECSR_ERR) {
  8539. + pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
  8540. + ecir_val.info.fqid);
  8541. + }
  8542. + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
  8543. + eadr_val.eadr_raw = qm_in(EADR);
  8544. + pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
  8545. + error_mdata[eadr_val.info.memid].txt,
  8546. + error_mdata[eadr_val.info.memid].addr_mask
  8547. + & eadr_val.info.eadr);
  8548. + log_edata_bits(error_mdata[eadr_val.info.memid].bits);
  8549. + }
  8550. + }
  8551. +}
  8552. +
  8553. +/* Qman interrupt handler */
  8554. +static irqreturn_t qman_isr(int irq, void *ptr)
  8555. +{
  8556. + u32 isr_val, ier_val, ecsr_val, isr_mask, i;
  8557. +
  8558. + ier_val = qm_err_isr_enable_read(qm);
  8559. + isr_val = qm_err_isr_status_read(qm);
  8560. + ecsr_val = qm_in(ECSR);
  8561. + isr_mask = isr_val & ier_val;
  8562. +
  8563. + if (!isr_mask)
  8564. + return IRQ_NONE;
  8565. + for (i = 0; i < QMAN_HWE_COUNT; i++) {
  8566. + if (qman_hwerr_txts[i].mask & isr_mask) {
  8567. + pr_warn("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt);
  8568. + if (qman_hwerr_txts[i].mask & ecsr_val) {
  8569. + log_additional_error_info(isr_mask, ecsr_val);
  8570. + /* Re-arm error capture registers */
  8571. + qm_out(ECSR, ecsr_val);
  8572. + }
  8573. + if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
  8574. + pr_devel("Qman un-enabling error 0x%x\n",
  8575. + qman_hwerr_txts[i].mask);
  8576. + ier_val &= ~qman_hwerr_txts[i].mask;
  8577. + qm_err_isr_enable_write(qm, ier_val);
  8578. + }
  8579. + }
  8580. + }
  8581. + qm_err_isr_status_clear(qm, isr_val);
  8582. + return IRQ_HANDLED;
  8583. +}
  8584. +
  8585. +static int __bind_irq(void)
  8586. +{
  8587. + int ret, err_irq;
  8588. +
  8589. + err_irq = of_irq_to_resource(qm_node, 0, NULL);
  8590. + if (err_irq == 0) {
  8591. + pr_info("Can't get %s property '%s'\n", qm_node->full_name,
  8592. + "interrupts");
  8593. + return -ENODEV;
  8594. + }
  8595. + ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node);
  8596. + if (ret) {
  8597. + pr_err("request_irq() failed %d for '%s'\n", ret,
  8598. + qm_node->full_name);
  8599. + return -ENODEV;
  8600. + }
  8601. + /* Write-to-clear any stale bits, (eg. starvation being asserted prior
  8602. + * to resource allocation during driver init). */
  8603. + qm_err_isr_status_clear(qm, 0xffffffff);
  8604. + /* Enable Error Interrupts */
  8605. + qm_err_isr_enable_write(qm, 0xffffffff);
  8606. + return 0;
  8607. +}
  8608. +
  8609. +int qman_init_ccsr(struct device_node *node)
  8610. +{
  8611. + int ret;
  8612. + if (!qman_have_ccsr())
  8613. + return 0;
  8614. + if (node != qm_node)
  8615. + return -EINVAL;
  8616. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  8617. + /* TEMP for LS1043 : should be done in uboot */
  8618. + qm_out(QCSP_BARE, 0x5);
  8619. + qm_out(QCSP_BAR, 0x0);
  8620. +#endif
  8621. + /* FQD memory */
  8622. + qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
  8623. + /* PFDR memory */
  8624. + qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
  8625. + qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
  8626. + /* thresholds */
  8627. + qm_set_pfdr_threshold(qm, 512, 64);
  8628. + qm_set_sfdr_threshold(qm, 128);
  8629. + /* clear stale PEBI bit from interrupt status register */
  8630. + qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
  8631. + /* corenet initiator settings */
  8632. + qm_set_corenet_initiator(qm);
  8633. + /* HID settings */
  8634. + qm_set_hid(qm);
  8635. + /* Set scheduling weights to defaults */
  8636. + for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
  8637. + qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
  8638. + /* We are not prepared to accept ERNs for hardware enqueues */
  8639. + qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
  8640. + qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
  8641. + /* Initialise Error Interrupt Handler */
  8642. + ret = __bind_irq();
  8643. + if (ret)
  8644. + return ret;
  8645. + return 0;
  8646. +}
  8647. +
  8648. +#define LIO_CFG_LIODN_MASK 0x0fff0000
  8649. +void qman_liodn_fixup(u16 channel)
  8650. +{
  8651. + static int done;
  8652. + static u32 liodn_offset;
  8653. + u32 before, after;
  8654. + int idx = channel - QM_CHANNEL_SWPORTAL0;
  8655. +
  8656. + if (!qman_have_ccsr())
  8657. + return;
  8658. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
  8659. + before = qm_in(REV3_QCSP_LIO_CFG(idx));
  8660. + else
  8661. + before = qm_in(QCSP_LIO_CFG(idx));
  8662. + if (!done) {
  8663. + liodn_offset = before & LIO_CFG_LIODN_MASK;
  8664. + done = 1;
  8665. + return;
  8666. + }
  8667. + after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
  8668. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
  8669. + qm_out(REV3_QCSP_LIO_CFG(idx), after);
  8670. + else
  8671. + qm_out(QCSP_LIO_CFG(idx), after);
  8672. +}
  8673. +
  8674. +#define IO_CFG_SDEST_MASK 0x00ff0000
  8675. +int qman_set_sdest(u16 channel, unsigned int cpu_idx)
  8676. +{
  8677. + int idx = channel - QM_CHANNEL_SWPORTAL0;
  8678. + u32 before, after;
  8679. +
  8680. + if (!qman_have_ccsr())
  8681. + return -ENODEV;
  8682. + if ((qman_ip_rev & 0xFF00) == QMAN_REV31) {
  8683. + /* LS1043A - only one L2 cache */
  8684. + cpu_idx = 0;
  8685. + }
  8686. +
  8687. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
  8688. + before = qm_in(REV3_QCSP_IO_CFG(idx));
  8689. + /* Each pair of vcpu share the same SRQ(SDEST) */
  8690. + cpu_idx /= 2;
  8691. + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
  8692. + qm_out(REV3_QCSP_IO_CFG(idx), after);
  8693. + } else {
  8694. + before = qm_in(QCSP_IO_CFG(idx));
  8695. + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
  8696. + qm_out(QCSP_IO_CFG(idx), after);
  8697. + }
  8698. + return 0;
  8699. +}
  8700. +
  8701. +#define MISC_CFG_WPM_MASK 0x00000002
  8702. +int qm_set_wpm(int wpm)
  8703. +{
  8704. + u32 before;
  8705. + u32 after;
  8706. +
  8707. + if (!qman_have_ccsr())
  8708. + return -ENODEV;
  8709. +
  8710. + before = qm_in(MISC_CFG);
  8711. + after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
  8712. + qm_out(MISC_CFG, after);
  8713. + return 0;
  8714. +}
  8715. +
  8716. +int qm_get_wpm(int *wpm)
  8717. +{
  8718. + u32 before;
  8719. +
  8720. + if (!qman_have_ccsr())
  8721. + return -ENODEV;
  8722. +
  8723. + before = qm_in(MISC_CFG);
  8724. + *wpm = (before & MISC_CFG_WPM_MASK) >> 1;
  8725. + return 0;
  8726. +}
  8727. +
  8728. +/* CEETM_CFG_PRES register has PRES field which is calculated by:
  8729. + * PRES = (2^22 / credit update reference period) * QMan clock period
  8730. + * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk
  8731. + */
  8732. +
  8733. +int qman_ceetm_set_prescaler(enum qm_dc_portal portal)
  8734. +{
  8735. + u64 temp;
  8736. + u16 pres;
  8737. +
  8738. + if (!qman_have_ccsr())
  8739. + return -ENODEV;
  8740. +
  8741. + temp = 0x400000 * 100;
  8742. + do_div(temp, CONFIG_QMAN_CEETM_UPDATE_PERIOD);
  8743. + temp *= 10000000;
  8744. + do_div(temp, qman_clk);
  8745. + pres = (u16) temp;
  8746. + qm_out(CEETM_CFG_IDX, portal);
  8747. + qm_out(CEETM_CFG_PRES, pres);
  8748. + return 0;
  8749. +}
  8750. +
  8751. +int qman_ceetm_get_prescaler(u16 *pres)
  8752. +{
  8753. + if (!qman_have_ccsr())
  8754. + return -ENODEV;
  8755. + *pres = (u16)qm_in(CEETM_CFG_PRES);
  8756. + return 0;
  8757. +}
  8758. +
  8759. +#define DCP_CFG_CEETME_MASK 0xFFFF0000
  8760. +#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n))
  8761. +int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
  8762. +{
  8763. + u32 dcp_cfg;
  8764. +
  8765. + if (!qman_have_ccsr())
  8766. + return -ENODEV;
  8767. +
  8768. + dcp_cfg = qm_in(DCP_CFG(portal));
  8769. + dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal);
  8770. + qm_out(DCP_CFG(portal), dcp_cfg);
  8771. + return 0;
  8772. +}
  8773. +
  8774. +int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
  8775. +{
  8776. + u32 dcp_cfg;
  8777. +
  8778. + if (!qman_have_ccsr())
  8779. + return -ENODEV;
  8780. + dcp_cfg = qm_in(DCP_CFG(portal));
  8781. + dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal));
  8782. + qm_out(DCP_CFG(portal), dcp_cfg);
  8783. + return 0;
  8784. +}
  8785. +
  8786. +int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num)
  8787. +{
  8788. + if (!qman_have_ccsr())
  8789. + return -ENODEV;
  8790. + *num = qm_in(CEETM_XSFDR_IN_USE);
  8791. + return 0;
  8792. +}
  8793. +EXPORT_SYMBOL(qman_ceetm_get_xsfdr);
  8794. +
  8795. +#ifdef CONFIG_SYSFS
  8796. +
  8797. +#define DRV_NAME "fsl-qman"
  8798. +#define DCP_MAX_ID 3
  8799. +#define DCP_MIN_ID 0
  8800. +
  8801. +static ssize_t show_pfdr_fpc(struct device *dev,
  8802. + struct device_attribute *dev_attr, char *buf)
  8803. +{
  8804. + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
  8805. +};
  8806. +
  8807. +static ssize_t show_dlm_avg(struct device *dev,
  8808. + struct device_attribute *dev_attr, char *buf)
  8809. +{
  8810. + u32 data;
  8811. + int i;
  8812. +
  8813. + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
  8814. + return -EINVAL;
  8815. + if (i < DCP_MIN_ID || i > DCP_MAX_ID)
  8816. + return -EINVAL;
  8817. + data = qm_in(DCP_DLM_AVG(i));
  8818. + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
  8819. + (data & 0x000000ff)*390625);
  8820. +};
  8821. +
  8822. +static ssize_t set_dlm_avg(struct device *dev,
  8823. + struct device_attribute *dev_attr, const char *buf, size_t count)
  8824. +{
  8825. + unsigned long val;
  8826. + int i;
  8827. +
  8828. + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
  8829. + return -EINVAL;
  8830. + if (i < DCP_MIN_ID || i > DCP_MAX_ID)
  8831. + return -EINVAL;
  8832. + if (kstrtoul(buf, 0, &val)) {
  8833. + dev_dbg(dev, "invalid input %s\n", buf);
  8834. + return -EINVAL;
  8835. + }
  8836. + qm_out(DCP_DLM_AVG(i), val);
  8837. + return count;
  8838. +};
  8839. +
  8840. +static ssize_t show_pfdr_cfg(struct device *dev,
  8841. + struct device_attribute *dev_attr, char *buf)
  8842. +{
  8843. + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
  8844. +};
  8845. +
  8846. +static ssize_t set_pfdr_cfg(struct device *dev,
  8847. + struct device_attribute *dev_attr, const char *buf, size_t count)
  8848. +{
  8849. + unsigned long val;
  8850. +
  8851. + if (kstrtoul(buf, 0, &val)) {
  8852. + dev_dbg(dev, "invalid input %s\n", buf);
  8853. + return -EINVAL;
  8854. + }
  8855. + qm_out(PFDR_CFG, val);
  8856. + return count;
  8857. +};
  8858. +
  8859. +static ssize_t show_sfdr_in_use(struct device *dev,
  8860. + struct device_attribute *dev_attr, char *buf)
  8861. +{
  8862. + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
  8863. +};
  8864. +
  8865. +static ssize_t show_idle_stat(struct device *dev,
  8866. + struct device_attribute *dev_attr, char *buf)
  8867. +{
  8868. + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
  8869. +};
  8870. +
  8871. +static ssize_t show_ci_rlm_avg(struct device *dev,
  8872. + struct device_attribute *dev_attr, char *buf)
  8873. +{
  8874. + u32 data = qm_in(CI_RLM_AVG);
  8875. + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
  8876. + (data & 0x000000ff)*390625);
  8877. +};
  8878. +
  8879. +static ssize_t set_ci_rlm_avg(struct device *dev,
  8880. + struct device_attribute *dev_attr, const char *buf, size_t count)
  8881. +{
  8882. + unsigned long val;
  8883. +
  8884. + if (kstrtoul(buf, 0, &val)) {
  8885. + dev_dbg(dev, "invalid input %s\n", buf);
  8886. + return -EINVAL;
  8887. + }
  8888. + qm_out(CI_RLM_AVG, val);
  8889. + return count;
  8890. +};
  8891. +
  8892. +static ssize_t show_err_isr(struct device *dev,
  8893. + struct device_attribute *dev_attr, char *buf)
  8894. +{
  8895. + return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
  8896. +};
  8897. +
  8898. +#define SBEC_MAX_ID 14
  8899. +#define SBEC_MIN_ID 0
  8900. +
  8901. +static ssize_t show_sbec(struct device *dev,
  8902. + struct device_attribute *dev_attr, char *buf)
  8903. +{
  8904. + int i;
  8905. +
  8906. + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
  8907. + return -EINVAL;
  8908. + if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
  8909. + return -EINVAL;
  8910. + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
  8911. +};
  8912. +
  8913. +static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
  8914. +static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
  8915. +static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
  8916. +static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
  8917. + show_ci_rlm_avg, set_ci_rlm_avg);
  8918. +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
  8919. +static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
  8920. +
  8921. +static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
  8922. +static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
  8923. +static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
  8924. +static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
  8925. +
  8926. +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
  8927. +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
  8928. +static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
  8929. +static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
  8930. +static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
  8931. +static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
  8932. +static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
  8933. +static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
  8934. +static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
  8935. +static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
  8936. +static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
  8937. +static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
  8938. +static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
  8939. +static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
  8940. +static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
  8941. +
  8942. +static struct attribute *qman_dev_attributes[] = {
  8943. + &dev_attr_pfdr_fpc.attr,
  8944. + &dev_attr_pfdr_cfg.attr,
  8945. + &dev_attr_idle_stat.attr,
  8946. + &dev_attr_ci_rlm_avg.attr,
  8947. + &dev_attr_err_isr.attr,
  8948. + &dev_attr_dcp0_dlm_avg.attr,
  8949. + &dev_attr_dcp1_dlm_avg.attr,
  8950. + &dev_attr_dcp2_dlm_avg.attr,
  8951. + &dev_attr_dcp3_dlm_avg.attr,
  8952. + /* sfdr_in_use will be added if necessary */
  8953. + NULL
  8954. +};
  8955. +
  8956. +static struct attribute *qman_dev_ecr_attributes[] = {
  8957. + &dev_attr_sbec_0.attr,
  8958. + &dev_attr_sbec_1.attr,
  8959. + &dev_attr_sbec_2.attr,
  8960. + &dev_attr_sbec_3.attr,
  8961. + &dev_attr_sbec_4.attr,
  8962. + &dev_attr_sbec_5.attr,
  8963. + &dev_attr_sbec_6.attr,
  8964. + &dev_attr_sbec_7.attr,
  8965. + &dev_attr_sbec_8.attr,
  8966. + &dev_attr_sbec_9.attr,
  8967. + &dev_attr_sbec_10.attr,
  8968. + &dev_attr_sbec_11.attr,
  8969. + &dev_attr_sbec_12.attr,
  8970. + &dev_attr_sbec_13.attr,
  8971. + &dev_attr_sbec_14.attr,
  8972. + NULL
  8973. +};
  8974. +
  8975. +/* root level */
  8976. +static const struct attribute_group qman_dev_attr_grp = {
  8977. + .name = NULL,
  8978. + .attrs = qman_dev_attributes
  8979. +};
  8980. +static const struct attribute_group qman_dev_ecr_grp = {
  8981. + .name = "error_capture",
  8982. + .attrs = qman_dev_ecr_attributes
  8983. +};
  8984. +
  8985. +static int of_fsl_qman_remove(struct platform_device *ofdev)
  8986. +{
  8987. + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
  8988. + return 0;
  8989. +};
  8990. +
  8991. +static int of_fsl_qman_probe(struct platform_device *ofdev)
  8992. +{
  8993. + int ret;
  8994. +
  8995. + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
  8996. + if (ret)
  8997. + goto done;
  8998. + ret = sysfs_add_file_to_group(&ofdev->dev.kobj,
  8999. + &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
  9000. + if (ret)
  9001. + goto del_group_0;
  9002. + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp);
  9003. + if (ret)
  9004. + goto del_group_0;
  9005. +
  9006. + goto done;
  9007. +
  9008. +del_group_0:
  9009. + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
  9010. +done:
  9011. + if (ret)
  9012. + dev_err(&ofdev->dev,
  9013. + "Cannot create dev attributes ret=%d\n", ret);
  9014. + return ret;
  9015. +};
  9016. +
  9017. +static struct of_device_id of_fsl_qman_ids[] = {
  9018. + {
  9019. + .compatible = "fsl,qman",
  9020. + },
  9021. + {}
  9022. +};
  9023. +MODULE_DEVICE_TABLE(of, of_fsl_qman_ids);
  9024. +
  9025. +#ifdef CONFIG_SUSPEND
  9026. +
  9027. +static u32 saved_isdr;
  9028. +static int qman_pm_suspend_noirq(struct device *dev)
  9029. +{
  9030. + uint32_t idle_state;
  9031. +
  9032. + suspend_unused_qportal();
  9033. + /* save isdr, disable all, clear isr */
  9034. + saved_isdr = qm_err_isr_disable_read(qm);
  9035. + qm_err_isr_disable_write(qm, 0xffffffff);
  9036. + qm_err_isr_status_clear(qm, 0xffffffff);
  9037. + idle_state = qm_in(IDLE_STAT);
  9038. + if (!(idle_state & 0x1)) {
  9039. + pr_err("Qman not idle 0x%x aborting\n", idle_state);
  9040. + qm_err_isr_disable_write(qm, saved_isdr);
  9041. + resume_unused_qportal();
  9042. + return -EBUSY;
  9043. + }
  9044. +#ifdef CONFIG_PM_DEBUG
  9045. + pr_info("Qman suspend code, IDLE_STAT = 0x%x\n", idle_state);
  9046. +#endif
  9047. + return 0;
  9048. +}
  9049. +
  9050. +static int qman_pm_resume_noirq(struct device *dev)
  9051. +{
  9052. + /* restore isdr */
  9053. + qm_err_isr_disable_write(qm, saved_isdr);
  9054. + resume_unused_qportal();
  9055. + return 0;
  9056. +}
  9057. +#else
  9058. +#define qman_pm_suspend_noirq NULL
  9059. +#define qman_pm_resume_noirq NULL
  9060. +#endif
  9061. +
  9062. +static const struct dev_pm_ops qman_pm_ops = {
  9063. + .suspend_noirq = qman_pm_suspend_noirq,
  9064. + .resume_noirq = qman_pm_resume_noirq,
  9065. +};
  9066. +
  9067. +static struct platform_driver of_fsl_qman_driver = {
  9068. + .driver = {
  9069. + .owner = THIS_MODULE,
  9070. + .name = DRV_NAME,
  9071. + .of_match_table = of_fsl_qman_ids,
  9072. + .pm = &qman_pm_ops,
  9073. + },
  9074. + .probe = of_fsl_qman_probe,
  9075. + .remove = of_fsl_qman_remove,
  9076. +};
  9077. +
  9078. +static int qman_ctrl_init(void)
  9079. +{
  9080. + return platform_driver_register(&of_fsl_qman_driver);
  9081. +}
  9082. +
  9083. +static void qman_ctrl_exit(void)
  9084. +{
  9085. + platform_driver_unregister(&of_fsl_qman_driver);
  9086. +}
  9087. +
  9088. +module_init(qman_ctrl_init);
  9089. +module_exit(qman_ctrl_exit);
  9090. +
  9091. +#endif /* CONFIG_SYSFS */
  9092. --- /dev/null
  9093. +++ b/drivers/staging/fsl_qbman/qman_debugfs.c
  9094. @@ -0,0 +1,1594 @@
  9095. +/* Copyright 2010-2011 Freescale Semiconductor, Inc.
  9096. + *
  9097. + * Redistribution and use in source and binary forms, with or without
  9098. + * modification, are permitted provided that the following conditions are met:
  9099. + * * Redistributions of source code must retain the above copyright
  9100. + * notice, this list of conditions and the following disclaimer.
  9101. + * * Redistributions in binary form must reproduce the above copyright
  9102. + * notice, this list of conditions and the following disclaimer in the
  9103. + * documentation and/or other materials provided with the distribution.
  9104. + * * Neither the name of Freescale Semiconductor nor the
  9105. + * names of its contributors may be used to endorse or promote products
  9106. + * derived from this software without specific prior written permission.
  9107. + *
  9108. + *
  9109. + * ALTERNATIVELY, this software may be distributed under the terms of the
  9110. + * GNU General Public License ("GPL") as published by the Free Software
  9111. + * Foundation, either version 2 of that License or (at your option) any
  9112. + * later version.
  9113. + *
  9114. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  9115. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  9116. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  9117. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  9118. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  9119. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  9120. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  9121. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  9122. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  9123. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  9124. + */
  9125. +#include "qman_private.h"
  9126. +
  9127. +#define MAX_FQID (0x00ffffff)
  9128. +#define QM_FQD_BLOCK_SIZE 64
  9129. +#define QM_FQD_AR (0xC10)
  9130. +
  9131. +static u32 fqid_max;
  9132. +static u64 qman_ccsr_start;
  9133. +static u64 qman_ccsr_size;
  9134. +
  9135. +static const char * const state_txt[] = {
  9136. + "Out of Service",
  9137. + "Retired",
  9138. + "Tentatively Scheduled",
  9139. + "Truly Scheduled",
  9140. + "Parked",
  9141. + "Active, Active Held or Held Suspended",
  9142. + "Unknown State 6",
  9143. + "Unknown State 7",
  9144. + NULL,
  9145. +};
  9146. +
  9147. +static const u8 fqd_states[] = {
  9148. + QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED,
  9149. + QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED,
  9150. + QM_MCR_NP_STATE_ACTIVE};
  9151. +
  9152. +struct mask_to_text {
  9153. + u16 mask;
  9154. + const char *txt;
  9155. +};
  9156. +
  9157. +struct mask_filter_s {
  9158. + u16 mask;
  9159. + u8 filter;
  9160. +};
  9161. +
  9162. +static const struct mask_filter_s mask_filter[] = {
  9163. + {QM_FQCTRL_PREFERINCACHE, 0},
  9164. + {QM_FQCTRL_PREFERINCACHE, 1},
  9165. + {QM_FQCTRL_HOLDACTIVE, 0},
  9166. + {QM_FQCTRL_HOLDACTIVE, 1},
  9167. + {QM_FQCTRL_AVOIDBLOCK, 0},
  9168. + {QM_FQCTRL_AVOIDBLOCK, 1},
  9169. + {QM_FQCTRL_FORCESFDR, 0},
  9170. + {QM_FQCTRL_FORCESFDR, 1},
  9171. + {QM_FQCTRL_CPCSTASH, 0},
  9172. + {QM_FQCTRL_CPCSTASH, 1},
  9173. + {QM_FQCTRL_CTXASTASHING, 0},
  9174. + {QM_FQCTRL_CTXASTASHING, 1},
  9175. + {QM_FQCTRL_ORP, 0},
  9176. + {QM_FQCTRL_ORP, 1},
  9177. + {QM_FQCTRL_TDE, 0},
  9178. + {QM_FQCTRL_TDE, 1},
  9179. + {QM_FQCTRL_CGE, 0},
  9180. + {QM_FQCTRL_CGE, 1}
  9181. +};
  9182. +
  9183. +static const struct mask_to_text fq_ctrl_text_list[] = {
  9184. + {
  9185. + .mask = QM_FQCTRL_PREFERINCACHE,
  9186. + .txt = "Prefer in cache",
  9187. + },
  9188. + {
  9189. + .mask = QM_FQCTRL_HOLDACTIVE,
  9190. + .txt = "Hold active in portal",
  9191. + },
  9192. + {
  9193. + .mask = QM_FQCTRL_AVOIDBLOCK,
  9194. + .txt = "Avoid Blocking",
  9195. + },
  9196. + {
  9197. + .mask = QM_FQCTRL_FORCESFDR,
  9198. + .txt = "High-priority SFDRs",
  9199. + },
  9200. + {
  9201. + .mask = QM_FQCTRL_CPCSTASH,
  9202. + .txt = "CPC Stash Enable",
  9203. + },
  9204. + {
  9205. + .mask = QM_FQCTRL_CTXASTASHING,
  9206. + .txt = "Context-A stashing",
  9207. + },
  9208. + {
  9209. + .mask = QM_FQCTRL_ORP,
  9210. + .txt = "ORP Enable",
  9211. + },
  9212. + {
  9213. + .mask = QM_FQCTRL_TDE,
  9214. + .txt = "Tail-Drop Enable",
  9215. + },
  9216. + {
  9217. + .mask = QM_FQCTRL_CGE,
  9218. + .txt = "Congestion Group Enable",
  9219. + },
  9220. + {
  9221. + .mask = 0,
  9222. + .txt = NULL,
  9223. + }
  9224. +};
  9225. +
  9226. +static const char *get_fqd_ctrl_text(u16 mask)
  9227. +{
  9228. + int i = 0;
  9229. +
  9230. + while (fq_ctrl_text_list[i].txt != NULL) {
  9231. + if (fq_ctrl_text_list[i].mask == mask)
  9232. + return fq_ctrl_text_list[i].txt;
  9233. + i++;
  9234. + }
  9235. + return NULL;
  9236. +}
  9237. +
  9238. +static const struct mask_to_text stashing_text_list[] = {
  9239. + {
  9240. + .mask = QM_STASHING_EXCL_CTX,
  9241. + .txt = "FQ Ctx Stash"
  9242. + },
  9243. + {
  9244. + .mask = QM_STASHING_EXCL_DATA,
  9245. + .txt = "Frame Data Stash",
  9246. + },
  9247. + {
  9248. + .mask = QM_STASHING_EXCL_ANNOTATION,
  9249. + .txt = "Frame Annotation Stash",
  9250. + },
  9251. + {
  9252. + .mask = 0,
  9253. + .txt = NULL,
  9254. + },
  9255. +};
  9256. +
  9257. +static int user_input_convert(const char __user *user_buf, size_t count,
  9258. + unsigned long *val)
  9259. +{
  9260. + char buf[12];
  9261. +
  9262. + if (count > sizeof(buf) - 1)
  9263. + return -EINVAL;
  9264. + if (copy_from_user(buf, user_buf, count))
  9265. + return -EFAULT;
  9266. + buf[count] = '\0';
  9267. + if (kstrtoul(buf, 0, val))
  9268. + return -EINVAL;
  9269. + return 0;
  9270. +}
  9271. +
  9272. +struct line_buffer_fq {
  9273. + u32 buf[8];
  9274. + u32 buf_cnt;
  9275. + int line_cnt;
  9276. +};
  9277. +
  9278. +static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid,
  9279. + struct seq_file *file)
  9280. +{
  9281. + line_buf->buf[line_buf->buf_cnt] = fqid;
  9282. + line_buf->buf_cnt++;
  9283. + if (line_buf->buf_cnt == 8) {
  9284. + /* Buffer is full, flush it */
  9285. + if (line_buf->line_cnt != 0)
  9286. + seq_puts(file, ",\n");
  9287. + seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x,"
  9288. + "0x%06x,0x%06x,0x%06x",
  9289. + line_buf->buf[0], line_buf->buf[1], line_buf->buf[2],
  9290. + line_buf->buf[3], line_buf->buf[4], line_buf->buf[5],
  9291. + line_buf->buf[6], line_buf->buf[7]);
  9292. + line_buf->buf_cnt = 0;
  9293. + line_buf->line_cnt++;
  9294. + }
  9295. +}
  9296. +
  9297. +static void flush_line_buffer(struct line_buffer_fq *line_buf,
  9298. + struct seq_file *file)
  9299. +{
  9300. + if (line_buf->buf_cnt) {
  9301. + int y = 0;
  9302. + if (line_buf->line_cnt != 0)
  9303. + seq_puts(file, ",\n");
  9304. + while (y != line_buf->buf_cnt) {
  9305. + if (y+1 == line_buf->buf_cnt)
  9306. + seq_printf(file, "0x%06x", line_buf->buf[y]);
  9307. + else
  9308. + seq_printf(file, "0x%06x,", line_buf->buf[y]);
  9309. + y++;
  9310. + }
  9311. + line_buf->line_cnt++;
  9312. + }
  9313. + if (line_buf->line_cnt)
  9314. + seq_putc(file, '\n');
  9315. +}
  9316. +
  9317. +static struct dentry *dfs_root; /* debugfs root directory */
  9318. +
  9319. +/*******************************************************************************
  9320. + * Query Frame Queue Non Programmable Fields
  9321. + ******************************************************************************/
  9322. +struct query_fq_np_fields_data_s {
  9323. + u32 fqid;
  9324. +};
  9325. +static struct query_fq_np_fields_data_s query_fq_np_fields_data = {
  9326. + .fqid = 1,
  9327. +};
  9328. +
  9329. +static int query_fq_np_fields_show(struct seq_file *file, void *offset)
  9330. +{
  9331. + int ret;
  9332. + struct qm_mcr_queryfq_np np;
  9333. + struct qman_fq fq;
  9334. +
  9335. + fq.fqid = query_fq_np_fields_data.fqid;
  9336. + ret = qman_query_fq_np(&fq, &np);
  9337. + if (ret)
  9338. + return ret;
  9339. + /* Print state */
  9340. + seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n",
  9341. + fq.fqid);
  9342. + seq_printf(file, " force eligible pending: %s\n",
  9343. + (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no");
  9344. + seq_printf(file, " retirement pending: %s\n",
  9345. + (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no");
  9346. + seq_printf(file, " state: %s\n",
  9347. + state_txt[np.state & QM_MCR_NP_STATE_MASK]);
  9348. + seq_printf(file, " fq_link: 0x%x\n", np.fqd_link);
  9349. + seq_printf(file, " odp_seq: %u\n", np.odp_seq);
  9350. + seq_printf(file, " orp_nesn: %u\n", np.orp_nesn);
  9351. + seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq);
  9352. + seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq);
  9353. + seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr);
  9354. + seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr);
  9355. + seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr);
  9356. + seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr);
  9357. + seq_printf(file, " is: ics_surp contains a %s\n",
  9358. + (np.is) ? "deficit" : "surplus");
  9359. + seq_printf(file, " ics_surp: %u\n", np.ics_surp);
  9360. + seq_printf(file, " byte_cnt: %u\n", np.byte_cnt);
  9361. + seq_printf(file, " frm_cnt: %u\n", np.frm_cnt);
  9362. + seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr);
  9363. + seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr);
  9364. + seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr);
  9365. + seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr);
  9366. + seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr);
  9367. + return 0;
  9368. +}
  9369. +
  9370. +static int query_fq_np_fields_open(struct inode *inode,
  9371. + struct file *file)
  9372. +{
  9373. + return single_open(file, query_fq_np_fields_show, NULL);
  9374. +}
  9375. +
  9376. +static ssize_t query_fq_np_fields_write(struct file *f,
  9377. + const char __user *buf, size_t count, loff_t *off)
  9378. +{
  9379. + int ret;
  9380. + unsigned long val;
  9381. +
  9382. + ret = user_input_convert(buf, count, &val);
  9383. + if (ret)
  9384. + return ret;
  9385. + if (val > MAX_FQID)
  9386. + return -EINVAL;
  9387. + query_fq_np_fields_data.fqid = (u32)val;
  9388. + return count;
  9389. +}
  9390. +
  9391. +static const struct file_operations query_fq_np_fields_fops = {
  9392. + .owner = THIS_MODULE,
  9393. + .open = query_fq_np_fields_open,
  9394. + .read = seq_read,
  9395. + .write = query_fq_np_fields_write,
  9396. + .release = single_release,
  9397. +};
  9398. +
  9399. +/*******************************************************************************
  9400. + * Frame Queue Programmable Fields
  9401. + ******************************************************************************/
  9402. +struct query_fq_fields_data_s {
  9403. + u32 fqid;
  9404. +};
  9405. +
  9406. +static struct query_fq_fields_data_s query_fq_fields_data = {
  9407. + .fqid = 1,
  9408. +};
  9409. +
  9410. +static int query_fq_fields_show(struct seq_file *file, void *offset)
  9411. +{
  9412. + int ret;
  9413. + struct qm_fqd fqd;
  9414. + struct qman_fq fq;
  9415. + int i = 0;
  9416. +
  9417. + memset(&fqd, 0, sizeof(struct qm_fqd));
  9418. + fq.fqid = query_fq_fields_data.fqid;
  9419. + ret = qman_query_fq(&fq, &fqd);
  9420. + if (ret)
  9421. + return ret;
  9422. + seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n",
  9423. + fq.fqid);
  9424. + seq_printf(file, " orprws: %u\n", fqd.orprws);
  9425. + seq_printf(file, " oa: %u\n", fqd.oa);
  9426. + seq_printf(file, " olws: %u\n", fqd.olws);
  9427. +
  9428. + seq_printf(file, " cgid: %u\n", fqd.cgid);
  9429. +
  9430. + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0)
  9431. + seq_puts(file, " fq_ctrl: None\n");
  9432. + else {
  9433. + i = 0;
  9434. + seq_puts(file, " fq_ctrl:\n");
  9435. + while (fq_ctrl_text_list[i].txt != NULL) {
  9436. + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
  9437. + fq_ctrl_text_list[i].mask)
  9438. + seq_printf(file, " %s\n",
  9439. + fq_ctrl_text_list[i].txt);
  9440. + i++;
  9441. + }
  9442. + }
  9443. + seq_printf(file, " dest_channel: %u\n", fqd.dest.channel);
  9444. + seq_printf(file, " dest_wq: %u\n", fqd.dest.wq);
  9445. + seq_printf(file, " ics_cred: %u\n", fqd.ics_cred);
  9446. + seq_printf(file, " td_mant: %u\n", fqd.td.mant);
  9447. + seq_printf(file, " td_exp: %u\n", fqd.td.exp);
  9448. +
  9449. + seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b);
  9450. +
  9451. + seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd));
  9452. + /* Any stashing configured */
  9453. + if ((fqd.context_a.stashing.exclusive & 0x7) == 0)
  9454. + seq_puts(file, " ctx_a_stash_exclusive: None\n");
  9455. + else {
  9456. + seq_puts(file, " ctx_a_stash_exclusive:\n");
  9457. + i = 0;
  9458. + while (stashing_text_list[i].txt != NULL) {
  9459. + if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask)
  9460. + seq_printf(file, " %s\n",
  9461. + stashing_text_list[i].txt);
  9462. + i++;
  9463. + }
  9464. + }
  9465. + seq_printf(file, " ctx_a_stash_annotation_cl: %u\n",
  9466. + fqd.context_a.stashing.annotation_cl);
  9467. + seq_printf(file, " ctx_a_stash_data_cl: %u\n",
  9468. + fqd.context_a.stashing.data_cl);
  9469. + seq_printf(file, " ctx_a_stash_context_cl: %u\n",
  9470. + fqd.context_a.stashing.context_cl);
  9471. + return 0;
  9472. +}
  9473. +
  9474. +static int query_fq_fields_open(struct inode *inode,
  9475. + struct file *file)
  9476. +{
  9477. + return single_open(file, query_fq_fields_show, NULL);
  9478. +}
  9479. +
  9480. +static ssize_t query_fq_fields_write(struct file *f,
  9481. + const char __user *buf, size_t count, loff_t *off)
  9482. +{
  9483. + int ret;
  9484. + unsigned long val;
  9485. +
  9486. + ret = user_input_convert(buf, count, &val);
  9487. + if (ret)
  9488. + return ret;
  9489. + if (val > MAX_FQID)
  9490. + return -EINVAL;
  9491. + query_fq_fields_data.fqid = (u32)val;
  9492. + return count;
  9493. +}
  9494. +
  9495. +static const struct file_operations query_fq_fields_fops = {
  9496. + .owner = THIS_MODULE,
  9497. + .open = query_fq_fields_open,
  9498. + .read = seq_read,
  9499. + .write = query_fq_fields_write,
  9500. + .release = single_release,
  9501. +};
  9502. +
  9503. +/*******************************************************************************
  9504. + * Query WQ lengths
  9505. + ******************************************************************************/
  9506. +struct query_wq_lengths_data_s {
  9507. + union {
  9508. + u16 channel_wq; /* ignores wq (3 lsbits) */
  9509. + struct {
  9510. + u16 id:13; /* qm_channel */
  9511. + u16 __reserved:3;
  9512. + } __packed channel;
  9513. + };
  9514. +};
  9515. +static struct query_wq_lengths_data_s query_wq_lengths_data;
  9516. +static int query_wq_lengths_show(struct seq_file *file, void *offset)
  9517. +{
  9518. + int ret;
  9519. + struct qm_mcr_querywq wq;
  9520. + int i;
  9521. +
  9522. + memset(&wq, 0, sizeof(struct qm_mcr_querywq));
  9523. + wq.channel.id = query_wq_lengths_data.channel.id;
  9524. + ret = qman_query_wq(0, &wq);
  9525. + if (ret)
  9526. + return ret;
  9527. + seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id);
  9528. + for (i = 0; i < 8; i++)
  9529. + /* mask out upper 4 bits since they are not part of length */
  9530. + seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff);
  9531. + return 0;
  9532. +}
  9533. +
  9534. +static int query_wq_lengths_open(struct inode *inode,
  9535. + struct file *file)
  9536. +{
  9537. + return single_open(file, query_wq_lengths_show, NULL);
  9538. +}
  9539. +
  9540. +static ssize_t query_wq_lengths_write(struct file *f,
  9541. + const char __user *buf, size_t count, loff_t *off)
  9542. +{
  9543. + int ret;
  9544. + unsigned long val;
  9545. +
  9546. + ret = user_input_convert(buf, count, &val);
  9547. + if (ret)
  9548. + return ret;
  9549. + if (val > 0xfff8)
  9550. + return -EINVAL;
  9551. + query_wq_lengths_data.channel.id = (u16)val;
  9552. + return count;
  9553. +}
  9554. +
  9555. +static const struct file_operations query_wq_lengths_fops = {
  9556. + .owner = THIS_MODULE,
  9557. + .open = query_wq_lengths_open,
  9558. + .read = seq_read,
  9559. + .write = query_wq_lengths_write,
  9560. + .release = single_release,
  9561. +};
  9562. +
  9563. +/*******************************************************************************
  9564. + * Query CGR
  9565. + ******************************************************************************/
  9566. +struct query_cgr_s {
  9567. + u8 cgid;
  9568. +};
  9569. +static struct query_cgr_s query_cgr_data;
  9570. +
  9571. +static int query_cgr_show(struct seq_file *file, void *offset)
  9572. +{
  9573. + int ret;
  9574. + struct qm_mcr_querycgr cgrd;
  9575. + struct qman_cgr cgr;
  9576. + int i, j;
  9577. + u32 mask;
  9578. +
  9579. + memset(&cgr, 0, sizeof(cgr));
  9580. + memset(&cgrd, 0, sizeof(cgrd));
  9581. + cgr.cgrid = query_cgr_data.cgid;
  9582. + ret = qman_query_cgr(&cgr, &cgrd);
  9583. + if (ret)
  9584. + return ret;
  9585. + seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid);
  9586. + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9587. + cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn,
  9588. + cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn,
  9589. + cgrd.cgr.wr_parm_g.Pn);
  9590. +
  9591. + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9592. + cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn,
  9593. + cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn,
  9594. + cgrd.cgr.wr_parm_y.Pn);
  9595. +
  9596. + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9597. + cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn,
  9598. + cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn,
  9599. + cgrd.cgr.wr_parm_r.Pn);
  9600. +
  9601. + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
  9602. + cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r);
  9603. +
  9604. + seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en);
  9605. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
  9606. + seq_puts(file, " cscn_targ_dcp:\n");
  9607. + mask = 0x80000000;
  9608. + for (i = 0; i < 32; i++) {
  9609. + if (cgrd.cgr.cscn_targ & mask)
  9610. + seq_printf(file, " send CSCN to dcp %u\n",
  9611. + (31 - i));
  9612. + mask >>= 1;
  9613. + }
  9614. +
  9615. + seq_puts(file, " cscn_targ_swp:\n");
  9616. + for (i = 0; i < 4; i++) {
  9617. + mask = 0x80000000;
  9618. + for (j = 0; j < 32; j++) {
  9619. + if (cgrd.cscn_targ_swp[i] & mask)
  9620. + seq_printf(file, " send CSCN to swp"
  9621. + " %u\n", (127 - (i * 32) - j));
  9622. + mask >>= 1;
  9623. + }
  9624. + }
  9625. + } else {
  9626. + seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ);
  9627. + }
  9628. + seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en);
  9629. + seq_printf(file, " cs: %u\n", cgrd.cgr.cs);
  9630. +
  9631. + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
  9632. + cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn);
  9633. +
  9634. + seq_printf(file, " mode: %s\n",
  9635. + (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ?
  9636. + "frame count" : "byte count");
  9637. + seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd));
  9638. + seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd));
  9639. +
  9640. + return 0;
  9641. +}
  9642. +
  9643. +static int query_cgr_open(struct inode *inode, struct file *file)
  9644. +{
  9645. + return single_open(file, query_cgr_show, NULL);
  9646. +}
  9647. +
  9648. +static ssize_t query_cgr_write(struct file *f, const char __user *buf,
  9649. + size_t count, loff_t *off)
  9650. +{
  9651. + int ret;
  9652. + unsigned long val;
  9653. +
  9654. + ret = user_input_convert(buf, count, &val);
  9655. + if (ret)
  9656. + return ret;
  9657. + if (val > 0xff)
  9658. + return -EINVAL;
  9659. + query_cgr_data.cgid = (u8)val;
  9660. + return count;
  9661. +}
  9662. +
  9663. +static const struct file_operations query_cgr_fops = {
  9664. + .owner = THIS_MODULE,
  9665. + .open = query_cgr_open,
  9666. + .read = seq_read,
  9667. + .write = query_cgr_write,
  9668. + .release = single_release,
  9669. +};
  9670. +
  9671. +/*******************************************************************************
  9672. + * Test Write CGR
  9673. + ******************************************************************************/
  9674. +struct test_write_cgr_s {
  9675. + u64 i_bcnt;
  9676. + u8 cgid;
  9677. +};
  9678. +static struct test_write_cgr_s test_write_cgr_data;
  9679. +
  9680. +static int testwrite_cgr_show(struct seq_file *file, void *offset)
  9681. +{
  9682. + int ret;
  9683. + struct qm_mcr_cgrtestwrite result;
  9684. + struct qman_cgr cgr;
  9685. + u64 i_bcnt;
  9686. +
  9687. + memset(&cgr, 0, sizeof(struct qman_cgr));
  9688. + memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite));
  9689. + cgr.cgrid = test_write_cgr_data.cgid;
  9690. + i_bcnt = test_write_cgr_data.i_bcnt;
  9691. + ret = qman_testwrite_cgr(&cgr, i_bcnt, &result);
  9692. + if (ret)
  9693. + return ret;
  9694. + seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid);
  9695. + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9696. + result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn,
  9697. + result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn,
  9698. + result.cgr.wr_parm_g.Pn);
  9699. + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9700. + result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn,
  9701. + result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn,
  9702. + result.cgr.wr_parm_y.Pn);
  9703. + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9704. + result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn,
  9705. + result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn,
  9706. + result.cgr.wr_parm_r.Pn);
  9707. + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
  9708. + result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r);
  9709. + seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en);
  9710. + seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ);
  9711. + seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en);
  9712. + seq_printf(file, " cs: %u\n", result.cgr.cs);
  9713. + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
  9714. + result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn);
  9715. +
  9716. + /* Add Mode for Si 2 */
  9717. + seq_printf(file, " mode: %s\n",
  9718. + (result.cgr.mode & QMAN_CGR_MODE_FRAME) ?
  9719. + "frame count" : "byte count");
  9720. +
  9721. + seq_printf(file, " i_bcnt: %llu\n",
  9722. + qm_mcr_cgrtestwrite_i_get64(&result));
  9723. + seq_printf(file, " a_bcnt: %llu\n",
  9724. + qm_mcr_cgrtestwrite_a_get64(&result));
  9725. + seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g);
  9726. + seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y);
  9727. + seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r);
  9728. + return 0;
  9729. +}
  9730. +
  9731. +static int testwrite_cgr_open(struct inode *inode, struct file *file)
  9732. +{
  9733. + return single_open(file, testwrite_cgr_show, NULL);
  9734. +}
  9735. +
  9736. +static const struct file_operations testwrite_cgr_fops = {
  9737. + .owner = THIS_MODULE,
  9738. + .open = testwrite_cgr_open,
  9739. + .read = seq_read,
  9740. + .release = single_release,
  9741. +};
  9742. +
  9743. +
  9744. +static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset)
  9745. +{
  9746. + seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt);
  9747. + return 0;
  9748. +}
  9749. +static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file)
  9750. +{
  9751. + return single_open(file, testwrite_cgr_ibcnt_show, NULL);
  9752. +}
  9753. +
  9754. +static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf,
  9755. + size_t count, loff_t *off)
  9756. +{
  9757. + int ret;
  9758. + unsigned long val;
  9759. +
  9760. + ret = user_input_convert(buf, count, &val);
  9761. + if (ret)
  9762. + return ret;
  9763. + test_write_cgr_data.i_bcnt = val;
  9764. + return count;
  9765. +}
  9766. +
  9767. +static const struct file_operations teswrite_cgr_ibcnt_fops = {
  9768. + .owner = THIS_MODULE,
  9769. + .open = testwrite_cgr_ibcnt_open,
  9770. + .read = seq_read,
  9771. + .write = testwrite_cgr_ibcnt_write,
  9772. + .release = single_release,
  9773. +};
  9774. +
  9775. +static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset)
  9776. +{
  9777. + seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid);
  9778. + return 0;
  9779. +}
  9780. +static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file)
  9781. +{
  9782. + return single_open(file, testwrite_cgr_cgrid_show, NULL);
  9783. +}
  9784. +
  9785. +static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf,
  9786. + size_t count, loff_t *off)
  9787. +{
  9788. + int ret;
  9789. + unsigned long val;
  9790. +
  9791. + ret = user_input_convert(buf, count, &val);
  9792. + if (ret)
  9793. + return ret;
  9794. + if (val > 0xff)
  9795. + return -EINVAL;
  9796. + test_write_cgr_data.cgid = (u8)val;
  9797. + return count;
  9798. +}
  9799. +
  9800. +static const struct file_operations teswrite_cgr_cgrid_fops = {
  9801. + .owner = THIS_MODULE,
  9802. + .open = testwrite_cgr_cgrid_open,
  9803. + .read = seq_read,
  9804. + .write = testwrite_cgr_cgrid_write,
  9805. + .release = single_release,
  9806. +};
  9807. +
  9808. +/*******************************************************************************
  9809. + * Query Congestion State
  9810. + ******************************************************************************/
  9811. +static int query_congestion_show(struct seq_file *file, void *offset)
  9812. +{
  9813. + int ret;
  9814. + struct qm_mcr_querycongestion cs;
  9815. + int i, j, in_cong = 0;
  9816. + u32 mask;
  9817. +
  9818. + memset(&cs, 0, sizeof(struct qm_mcr_querycongestion));
  9819. + ret = qman_query_congestion(&cs);
  9820. + if (ret)
  9821. + return ret;
  9822. + seq_puts(file, "Query Congestion Result\n");
  9823. + for (i = 0; i < 8; i++) {
  9824. + mask = 0x80000000;
  9825. + for (j = 0; j < 32; j++) {
  9826. + if (cs.state.__state[i] & mask) {
  9827. + in_cong = 1;
  9828. + seq_printf(file, " cg %u: %s\n", (i*32)+j,
  9829. + "in congestion");
  9830. + }
  9831. + mask >>= 1;
  9832. + }
  9833. + }
  9834. + if (!in_cong)
  9835. + seq_puts(file, " All congestion groups not congested.\n");
  9836. + return 0;
  9837. +}
  9838. +
  9839. +static int query_congestion_open(struct inode *inode, struct file *file)
  9840. +{
  9841. + return single_open(file, query_congestion_show, NULL);
  9842. +}
  9843. +
  9844. +static const struct file_operations query_congestion_fops = {
  9845. + .owner = THIS_MODULE,
  9846. + .open = query_congestion_open,
  9847. + .read = seq_read,
  9848. + .release = single_release,
  9849. +};
  9850. +
  9851. +/*******************************************************************************
  9852. + * Query CCGR
  9853. + ******************************************************************************/
  9854. +struct query_ccgr_s {
  9855. + u32 ccgid;
  9856. +};
  9857. +static struct query_ccgr_s query_ccgr_data;
  9858. +
  9859. +static int query_ccgr_show(struct seq_file *file, void *offset)
  9860. +{
  9861. + int ret;
  9862. + struct qm_mcr_ceetm_ccgr_query ccgr_query;
  9863. + struct qm_mcc_ceetm_ccgr_query query_opts;
  9864. + int i, j;
  9865. + u32 mask;
  9866. +
  9867. + memset(&ccgr_query, 0, sizeof(struct qm_mcr_ceetm_ccgr_query));
  9868. + memset(&query_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_query));
  9869. +
  9870. + if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
  9871. + return -EINVAL;
  9872. +
  9873. + seq_printf(file, "Query CCGID %x\n", query_ccgr_data.ccgid);
  9874. + query_opts.dcpid = ((query_ccgr_data.ccgid & 0xFF000000) >> 24);
  9875. + query_opts.ccgrid = query_ccgr_data.ccgid & 0x000001FF;
  9876. + ret = qman_ceetm_query_ccgr(&query_opts, &ccgr_query);
  9877. + if (ret)
  9878. + return ret;
  9879. + seq_printf(file, "Query CCGR id %x in DCP %d\n", query_opts.ccgrid,
  9880. + query_opts.dcpid);
  9881. + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9882. + ccgr_query.cm_query.wr_parm_g.MA,
  9883. + ccgr_query.cm_query.wr_parm_g.Mn,
  9884. + ccgr_query.cm_query.wr_parm_g.SA,
  9885. + ccgr_query.cm_query.wr_parm_g.Sn,
  9886. + ccgr_query.cm_query.wr_parm_g.Pn);
  9887. +
  9888. + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9889. + ccgr_query.cm_query.wr_parm_y.MA,
  9890. + ccgr_query.cm_query.wr_parm_y.Mn,
  9891. + ccgr_query.cm_query.wr_parm_y.SA,
  9892. + ccgr_query.cm_query.wr_parm_y.Sn,
  9893. + ccgr_query.cm_query.wr_parm_y.Pn);
  9894. +
  9895. + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
  9896. + ccgr_query.cm_query.wr_parm_r.MA,
  9897. + ccgr_query.cm_query.wr_parm_r.Mn,
  9898. + ccgr_query.cm_query.wr_parm_r.SA,
  9899. + ccgr_query.cm_query.wr_parm_r.Sn,
  9900. + ccgr_query.cm_query.wr_parm_r.Pn);
  9901. +
  9902. + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
  9903. + ccgr_query.cm_query.ctl_wr_en_g,
  9904. + ccgr_query.cm_query.ctl_wr_en_y,
  9905. + ccgr_query.cm_query.ctl_wr_en_r);
  9906. +
  9907. + seq_printf(file, " cscn_en: %u\n", ccgr_query.cm_query.ctl_cscn_en);
  9908. + seq_puts(file, " cscn_targ_dcp:\n");
  9909. + mask = 0x80000000;
  9910. + for (i = 0; i < 32; i++) {
  9911. + if (ccgr_query.cm_query.cscn_targ_dcp & mask)
  9912. + seq_printf(file, " send CSCN to dcp %u\n", (31 - i));
  9913. + mask >>= 1;
  9914. + }
  9915. +
  9916. + seq_puts(file, " cscn_targ_swp:\n");
  9917. + for (i = 0; i < 4; i++) {
  9918. + mask = 0x80000000;
  9919. + for (j = 0; j < 32; j++) {
  9920. + if (ccgr_query.cm_query.cscn_targ_swp[i] & mask)
  9921. + seq_printf(file, " send CSCN to swp"
  9922. + "%u\n", (127 - (i * 32) - j));
  9923. + mask >>= 1;
  9924. + }
  9925. + }
  9926. +
  9927. + seq_printf(file, " td_en: %u\n", ccgr_query.cm_query.ctl_td_en);
  9928. +
  9929. + seq_printf(file, " cs_thresh_in_TA: %u, cs_thresh_in_Tn: %u\n",
  9930. + ccgr_query.cm_query.cs_thres.TA,
  9931. + ccgr_query.cm_query.cs_thres.Tn);
  9932. +
  9933. + seq_printf(file, " cs_thresh_out_TA: %u, cs_thresh_out_Tn: %u\n",
  9934. + ccgr_query.cm_query.cs_thres_x.TA,
  9935. + ccgr_query.cm_query.cs_thres_x.Tn);
  9936. +
  9937. + seq_printf(file, " td_thresh_TA: %u, td_thresh_Tn: %u\n",
  9938. + ccgr_query.cm_query.td_thres.TA,
  9939. + ccgr_query.cm_query.td_thres.Tn);
  9940. +
  9941. + seq_printf(file, " mode: %s\n",
  9942. + (ccgr_query.cm_query.ctl_mode &
  9943. + QMAN_CGR_MODE_FRAME) ?
  9944. + "frame count" : "byte count");
  9945. + seq_printf(file, " i_cnt: %llu\n", (u64)ccgr_query.cm_query.i_cnt);
  9946. + seq_printf(file, " a_cnt: %llu\n", (u64)ccgr_query.cm_query.a_cnt);
  9947. +
  9948. + return 0;
  9949. +}
  9950. +
  9951. +static int query_ccgr_open(struct inode *inode, struct file *file)
  9952. +{
  9953. + return single_open(file, query_ccgr_show, NULL);
  9954. +}
  9955. +
  9956. +static ssize_t query_ccgr_write(struct file *f, const char __user *buf,
  9957. + size_t count, loff_t *off)
  9958. +{
  9959. + int ret;
  9960. + unsigned long val;
  9961. +
  9962. + ret = user_input_convert(buf, count, &val);
  9963. + if (ret)
  9964. + return ret;
  9965. + query_ccgr_data.ccgid = val;
  9966. + return count;
  9967. +}
  9968. +
  9969. +static const struct file_operations query_ccgr_fops = {
  9970. + .owner = THIS_MODULE,
  9971. + .open = query_ccgr_open,
  9972. + .read = seq_read,
  9973. + .write = query_ccgr_write,
  9974. + .release = single_release,
  9975. +};
  9976. +/*******************************************************************************
  9977. + * QMan register
  9978. + ******************************************************************************/
  9979. +struct qman_register_s {
  9980. + u32 val;
  9981. +};
  9982. +static struct qman_register_s qman_register_data;
  9983. +
  9984. +static void init_ccsrmempeek(void)
  9985. +{
  9986. + struct device_node *dn;
  9987. + const u32 *regaddr_p;
  9988. +
  9989. + dn = of_find_compatible_node(NULL, NULL, "fsl,qman");
  9990. + if (!dn) {
  9991. + pr_info("No fsl,qman node\n");
  9992. + return;
  9993. + }
  9994. + regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL);
  9995. + if (!regaddr_p) {
  9996. + of_node_put(dn);
  9997. + return;
  9998. + }
  9999. + qman_ccsr_start = of_translate_address(dn, regaddr_p);
  10000. + of_node_put(dn);
  10001. +}
  10002. +/* This function provides access to QMan ccsr memory map */
  10003. +static int qman_ccsrmempeek(u32 *val, u32 offset)
  10004. +{
  10005. + void __iomem *addr;
  10006. + u64 phys_addr;
  10007. +
  10008. + if (!qman_ccsr_start)
  10009. + return -EINVAL;
  10010. +
  10011. + if (offset > (qman_ccsr_size - sizeof(u32)))
  10012. + return -EINVAL;
  10013. +
  10014. + phys_addr = qman_ccsr_start + offset;
  10015. + addr = ioremap(phys_addr, sizeof(u32));
  10016. + if (!addr) {
  10017. + pr_err("ccsrmempeek, ioremap failed\n");
  10018. + return -EINVAL;
  10019. + }
  10020. + *val = in_be32(addr);
  10021. + iounmap(addr);
  10022. + return 0;
  10023. +}
  10024. +
  10025. +static int qman_ccsrmempeek_show(struct seq_file *file, void *offset)
  10026. +{
  10027. + u32 b;
  10028. +
  10029. + qman_ccsrmempeek(&b, qman_register_data.val);
  10030. + seq_printf(file, "QMan register offset = 0x%x\n",
  10031. + qman_register_data.val);
  10032. + seq_printf(file, "value = 0x%08x\n", b);
  10033. +
  10034. + return 0;
  10035. +}
  10036. +
  10037. +static int qman_ccsrmempeek_open(struct inode *inode, struct file *file)
  10038. +{
  10039. + return single_open(file, qman_ccsrmempeek_show, NULL);
  10040. +}
  10041. +
  10042. +static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf,
  10043. + size_t count, loff_t *off)
  10044. +{
  10045. + int ret;
  10046. + unsigned long val;
  10047. +
  10048. + ret = user_input_convert(buf, count, &val);
  10049. + if (ret)
  10050. + return ret;
  10051. + /* multiple of 4 */
  10052. + if (val > (qman_ccsr_size - sizeof(u32))) {
  10053. + pr_info("Input 0x%lx > 0x%llx\n",
  10054. + val, (qman_ccsr_size - sizeof(u32)));
  10055. + return -EINVAL;
  10056. + }
  10057. + if (val & 0x3) {
  10058. + pr_info("Input 0x%lx not multiple of 4\n", val);
  10059. + return -EINVAL;
  10060. + }
  10061. + qman_register_data.val = val;
  10062. + return count;
  10063. +}
  10064. +
  10065. +static const struct file_operations qman_ccsrmempeek_fops = {
  10066. + .owner = THIS_MODULE,
  10067. + .open = qman_ccsrmempeek_open,
  10068. + .read = seq_read,
  10069. + .write = qman_ccsrmempeek_write,
  10070. +};
  10071. +
  10072. +/*******************************************************************************
  10073. + * QMan state
  10074. + ******************************************************************************/
  10075. +static int qman_fqd_state_show(struct seq_file *file, void *offset)
  10076. +{
  10077. + struct qm_mcr_queryfq_np np;
  10078. + struct qman_fq fq;
  10079. + struct line_buffer_fq line_buf;
  10080. + int ret, i;
  10081. + u8 *state = file->private;
  10082. + u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
  10083. +
  10084. + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
  10085. + memset(&line_buf, 0, sizeof(line_buf));
  10086. +
  10087. + seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]);
  10088. +
  10089. + for (i = 1; i < fqid_max; i++) {
  10090. + fq.fqid = i;
  10091. + ret = qman_query_fq_np(&fq, &np);
  10092. + if (ret)
  10093. + return ret;
  10094. + if (*state == (np.state & QM_MCR_NP_STATE_MASK))
  10095. + add_to_line_buffer(&line_buf, fq.fqid, file);
  10096. + /* Keep a summary count of all states */
  10097. + if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
  10098. + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
  10099. + }
  10100. + flush_line_buffer(&line_buf, file);
  10101. +
  10102. + for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
  10103. + seq_printf(file, "%s count = %u\n", state_txt[i],
  10104. + qm_fq_state_cnt[i]);
  10105. + }
  10106. + return 0;
  10107. +}
  10108. +
  10109. +static int qman_fqd_state_open(struct inode *inode, struct file *file)
  10110. +{
  10111. + return single_open(file, qman_fqd_state_show, inode->i_private);
  10112. +}
  10113. +
  10114. +static const struct file_operations qman_fqd_state_fops = {
  10115. + .owner = THIS_MODULE,
  10116. + .open = qman_fqd_state_open,
  10117. + .read = seq_read,
  10118. +};
  10119. +
  10120. +static int qman_fqd_ctrl_show(struct seq_file *file, void *offset)
  10121. +{
  10122. + struct qm_fqd fqd;
  10123. + struct qman_fq fq;
  10124. + u32 fq_en_cnt = 0, fq_di_cnt = 0;
  10125. + int ret, i;
  10126. + struct mask_filter_s *data = file->private;
  10127. + const char *ctrl_txt = get_fqd_ctrl_text(data->mask);
  10128. + struct line_buffer_fq line_buf;
  10129. +
  10130. + memset(&line_buf, 0, sizeof(line_buf));
  10131. + seq_printf(file, "List of fq ids with: %s :%s\n",
  10132. + ctrl_txt, (data->filter) ? "enabled" : "disabled");
  10133. + for (i = 1; i < fqid_max; i++) {
  10134. + fq.fqid = i;
  10135. + memset(&fqd, 0, sizeof(struct qm_fqd));
  10136. + ret = qman_query_fq(&fq, &fqd);
  10137. + if (ret)
  10138. + return ret;
  10139. + if (data->filter) {
  10140. + if (fqd.fq_ctrl & data->mask)
  10141. + add_to_line_buffer(&line_buf, fq.fqid, file);
  10142. + } else {
  10143. + if (!(fqd.fq_ctrl & data->mask))
  10144. + add_to_line_buffer(&line_buf, fq.fqid, file);
  10145. + }
  10146. + if (fqd.fq_ctrl & data->mask)
  10147. + fq_en_cnt++;
  10148. + else
  10149. + fq_di_cnt++;
  10150. + }
  10151. + flush_line_buffer(&line_buf, file);
  10152. +
  10153. + seq_printf(file, "Total FQD with: %s : enabled = %u\n",
  10154. + ctrl_txt, fq_en_cnt);
  10155. + seq_printf(file, "Total FQD with: %s : disabled = %u\n",
  10156. + ctrl_txt, fq_di_cnt);
  10157. + return 0;
  10158. +}
  10159. +
  10160. +/*******************************************************************************
  10161. + * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE
  10162. + ******************************************************************************/
  10163. +static int qman_fqd_ctrl_open(struct inode *inode, struct file *file)
  10164. +{
  10165. + return single_open(file, qman_fqd_ctrl_show, inode->i_private);
  10166. +}
  10167. +
  10168. +static const struct file_operations qman_fqd_ctrl_fops = {
  10169. + .owner = THIS_MODULE,
  10170. + .open = qman_fqd_ctrl_open,
  10171. + .read = seq_read,
  10172. +};
  10173. +
  10174. +/*******************************************************************************
  10175. + * QMan ctrl summary
  10176. + ******************************************************************************/
  10177. +/*******************************************************************************
  10178. + * QMan summary state
  10179. + ******************************************************************************/
  10180. +static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset)
  10181. +{
  10182. + struct qm_mcr_queryfq_np np;
  10183. + struct qman_fq fq;
  10184. + int ret, i;
  10185. + u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
  10186. +
  10187. + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
  10188. +
  10189. + for (i = 1; i < fqid_max; i++) {
  10190. + fq.fqid = i;
  10191. + ret = qman_query_fq_np(&fq, &np);
  10192. + if (ret)
  10193. + return ret;
  10194. + /* Keep a summary count of all states */
  10195. + if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
  10196. + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
  10197. + }
  10198. +
  10199. + for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
  10200. + seq_printf(file, "%s count = %u\n", state_txt[i],
  10201. + qm_fq_state_cnt[i]);
  10202. + }
  10203. + return 0;
  10204. +}
  10205. +
  10206. +static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset)
  10207. +{
  10208. + struct qm_fqd fqd;
  10209. + struct qman_fq fq;
  10210. + int ret, i , j;
  10211. + u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2];
  10212. +
  10213. + memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt));
  10214. +
  10215. + for (i = 1; i < fqid_max; i++) {
  10216. + memset(&fqd, 0, sizeof(struct qm_fqd));
  10217. + fq.fqid = i;
  10218. + ret = qman_query_fq(&fq, &fqd);
  10219. + if (ret)
  10220. + return ret;
  10221. + /* Keep a summary count of all states */
  10222. + for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2)
  10223. + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
  10224. + mask_filter[j].mask)
  10225. + qm_prog_cnt[j/2]++;
  10226. + }
  10227. + for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) {
  10228. + seq_printf(file, "%s count = %u\n",
  10229. + get_fqd_ctrl_text(mask_filter[i*2].mask),
  10230. + qm_prog_cnt[i]);
  10231. + }
  10232. + return 0;
  10233. +}
  10234. +
  10235. +static int qman_fqd_summary_show(struct seq_file *file, void *offset)
  10236. +{
  10237. + int ret;
  10238. +
  10239. + /* Display summary of non programmable fields */
  10240. + ret = qman_fqd_non_prog_summary_show(file, offset);
  10241. + if (ret)
  10242. + return ret;
  10243. + seq_puts(file, "-----------------------------------------\n");
  10244. + /* Display programmable fields */
  10245. + ret = qman_fqd_prog_summary_show(file, offset);
  10246. + if (ret)
  10247. + return ret;
  10248. + return 0;
  10249. +}
  10250. +
  10251. +static int qman_fqd_summary_open(struct inode *inode, struct file *file)
  10252. +{
  10253. + return single_open(file, qman_fqd_summary_show, NULL);
  10254. +}
  10255. +
  10256. +static const struct file_operations qman_fqd_summary_fops = {
  10257. + .owner = THIS_MODULE,
  10258. + .open = qman_fqd_summary_open,
  10259. + .read = seq_read,
  10260. +};
  10261. +
  10262. +/*******************************************************************************
  10263. + * QMan destination work queue
  10264. + ******************************************************************************/
  10265. +struct qman_dest_wq_s {
  10266. + u16 wq_id;
  10267. +};
  10268. +static struct qman_dest_wq_s qman_dest_wq_data = {
  10269. + .wq_id = 0,
  10270. +};
  10271. +
  10272. +static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset)
  10273. +{
  10274. + struct qm_fqd fqd;
  10275. + struct qman_fq fq;
  10276. + int ret, i;
  10277. + u16 *wq, wq_id = qman_dest_wq_data.wq_id;
  10278. + struct line_buffer_fq line_buf;
  10279. +
  10280. + memset(&line_buf, 0, sizeof(line_buf));
  10281. + /* use vmalloc : need to allocate large memory region and don't
  10282. + * require the memory to be physically contiguous. */
  10283. + wq = vzalloc(sizeof(u16) * (0xFFFF+1));
  10284. + if (!wq)
  10285. + return -ENOMEM;
  10286. +
  10287. + seq_printf(file, "List of fq ids with destination work queue id"
  10288. + " = 0x%x\n", wq_id);
  10289. +
  10290. + for (i = 1; i < fqid_max; i++) {
  10291. + fq.fqid = i;
  10292. + memset(&fqd, 0, sizeof(struct qm_fqd));
  10293. + ret = qman_query_fq(&fq, &fqd);
  10294. + if (ret) {
  10295. + vfree(wq);
  10296. + return ret;
  10297. + }
  10298. + if (wq_id == fqd.dest_wq)
  10299. + add_to_line_buffer(&line_buf, fq.fqid, file);
  10300. + wq[fqd.dest_wq]++;
  10301. + }
  10302. + flush_line_buffer(&line_buf, file);
  10303. +
  10304. + seq_puts(file, "Summary of all FQD destination work queue values\n");
  10305. + for (i = 0; i < 0xFFFF; i++) {
  10306. + if (wq[i])
  10307. + seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, "
  10308. + "count = %u\n", i >> 3, i & 0x3, i, wq[i]);
  10309. + }
  10310. + vfree(wq);
  10311. + return 0;
  10312. +}
  10313. +
  10314. +static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf,
  10315. + size_t count, loff_t *off)
  10316. +{
  10317. + int ret;
  10318. + unsigned long val;
  10319. +
  10320. + ret = user_input_convert(buf, count, &val);
  10321. + if (ret)
  10322. + return ret;
  10323. + if (val > 0xFFFF)
  10324. + return -EINVAL;
  10325. + qman_dest_wq_data.wq_id = val;
  10326. + return count;
  10327. +}
  10328. +
  10329. +static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file)
  10330. +{
  10331. + return single_open(file, qman_fqd_dest_wq_show, NULL);
  10332. +}
  10333. +
  10334. +static const struct file_operations qman_fqd_dest_wq_fops = {
  10335. + .owner = THIS_MODULE,
  10336. + .open = qman_fqd_dest_wq_open,
  10337. + .read = seq_read,
  10338. + .write = qman_fqd_dest_wq_write,
  10339. +};
  10340. +
  10341. +/*******************************************************************************
  10342. + * QMan Intra-Class Scheduling Credit
  10343. + ******************************************************************************/
  10344. +static int qman_fqd_cred_show(struct seq_file *file, void *offset)
  10345. +{
  10346. + struct qm_fqd fqd;
  10347. + struct qman_fq fq;
  10348. + int ret, i;
  10349. + u32 fq_cnt = 0;
  10350. + struct line_buffer_fq line_buf;
  10351. +
  10352. + memset(&line_buf, 0, sizeof(line_buf));
  10353. + seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0"
  10354. + "\n");
  10355. +
  10356. + for (i = 1; i < fqid_max; i++) {
  10357. + fq.fqid = i;
  10358. + memset(&fqd, 0, sizeof(struct qm_fqd));
  10359. + ret = qman_query_fq(&fq, &fqd);
  10360. + if (ret)
  10361. + return ret;
  10362. + if (fqd.ics_cred > 0) {
  10363. + add_to_line_buffer(&line_buf, fq.fqid, file);
  10364. + fq_cnt++;
  10365. + }
  10366. + }
  10367. + flush_line_buffer(&line_buf, file);
  10368. +
  10369. + seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt);
  10370. + return 0;
  10371. +}
  10372. +
  10373. +static int qman_fqd_cred_open(struct inode *inode, struct file *file)
  10374. +{
  10375. + return single_open(file, qman_fqd_cred_show, NULL);
  10376. +}
  10377. +
  10378. +static const struct file_operations qman_fqd_cred_fops = {
  10379. + .owner = THIS_MODULE,
  10380. + .open = qman_fqd_cred_open,
  10381. + .read = seq_read,
  10382. +};
  10383. +
  10384. +/*******************************************************************************
  10385. + * Class Queue Fields
  10386. + ******************************************************************************/
  10387. +struct query_cq_fields_data_s {
  10388. + u32 cqid;
  10389. +};
  10390. +
  10391. +static struct query_cq_fields_data_s query_cq_fields_data = {
  10392. + .cqid = 1,
  10393. +};
  10394. +
  10395. +static int query_cq_fields_show(struct seq_file *file, void *offset)
  10396. +{
  10397. + int ret;
  10398. + struct qm_mcr_ceetm_cq_query query_result;
  10399. + unsigned int cqid;
  10400. + unsigned int portal;
  10401. +
  10402. + if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
  10403. + return -EINVAL;
  10404. +
  10405. + cqid = query_cq_fields_data.cqid & 0x00FFFFFF;
  10406. + portal = query_cq_fields_data.cqid >> 24;
  10407. + if (portal > qm_dc_portal_fman1)
  10408. + return -EINVAL;
  10409. +
  10410. + ret = qman_ceetm_query_cq(cqid, portal, &query_result);
  10411. + if (ret)
  10412. + return ret;
  10413. + seq_printf(file, "Query CQ Fields Result cqid 0x%x on DCP %d\n",
  10414. + cqid, portal);
  10415. + seq_printf(file, " ccgid: %u\n", query_result.ccgid);
  10416. + seq_printf(file, " state: %u\n", query_result.state);
  10417. + seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr);
  10418. + seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr);
  10419. + seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr);
  10420. + seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr);
  10421. + seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr);
  10422. + seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr);
  10423. + seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr);
  10424. + seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr);
  10425. + seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr);
  10426. + seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr);
  10427. + seq_printf(file, " frame_count: %u\n", query_result.frm_cnt);
  10428. +
  10429. + return 0;
  10430. +}
  10431. +
  10432. +static int query_cq_fields_open(struct inode *inode,
  10433. + struct file *file)
  10434. +{
  10435. + return single_open(file, query_cq_fields_show, NULL);
  10436. +}
  10437. +
  10438. +static ssize_t query_cq_fields_write(struct file *f,
  10439. + const char __user *buf, size_t count, loff_t *off)
  10440. +{
  10441. + int ret;
  10442. + unsigned long val;
  10443. +
  10444. + ret = user_input_convert(buf, count, &val);
  10445. + if (ret)
  10446. + return ret;
  10447. + query_cq_fields_data.cqid = (u32)val;
  10448. + return count;
  10449. +}
  10450. +
  10451. +static const struct file_operations query_cq_fields_fops = {
  10452. + .owner = THIS_MODULE,
  10453. + .open = query_cq_fields_open,
  10454. + .read = seq_read,
  10455. + .write = query_cq_fields_write,
  10456. + .release = single_release,
  10457. +};
  10458. +
  10459. +/*******************************************************************************
  10460. + * READ CEETM_XSFDR_IN_USE
  10461. + ******************************************************************************/
  10462. +struct query_ceetm_xsfdr_data_s {
  10463. + enum qm_dc_portal dcp_portal;
  10464. +};
  10465. +
  10466. +static struct query_ceetm_xsfdr_data_s query_ceetm_xsfdr_data;
  10467. +
  10468. +static int query_ceetm_xsfdr_show(struct seq_file *file, void *offset)
  10469. +{
  10470. + int ret;
  10471. + unsigned int xsfdr_in_use;
  10472. + enum qm_dc_portal portal;
  10473. +
  10474. +
  10475. + if (qman_ip_rev < QMAN_REV31)
  10476. + return -EINVAL;
  10477. +
  10478. + portal = query_ceetm_xsfdr_data.dcp_portal;
  10479. + ret = qman_ceetm_get_xsfdr(portal, &xsfdr_in_use);
  10480. + if (ret) {
  10481. + seq_printf(file, "Read CEETM_XSFDR_IN_USE on DCP %d failed\n",
  10482. + portal);
  10483. + return ret;
  10484. + }
  10485. +
  10486. + seq_printf(file, "DCP%d: CEETM_XSFDR_IN_USE number is %u\n", portal,
  10487. + (xsfdr_in_use & 0x1FFF));
  10488. + return 0;
  10489. +}
  10490. +
  10491. +static int query_ceetm_xsfdr_open(struct inode *inode,
  10492. + struct file *file)
  10493. +{
  10494. + return single_open(file, query_ceetm_xsfdr_show, NULL);
  10495. +}
  10496. +
  10497. +static ssize_t query_ceetm_xsfdr_write(struct file *f,
  10498. + const char __user *buf, size_t count, loff_t *off)
  10499. +{
  10500. + int ret;
  10501. + unsigned long val;
  10502. +
  10503. + ret = user_input_convert(buf, count, &val);
  10504. + if (ret)
  10505. + return ret;
  10506. + if (val > qm_dc_portal_fman1)
  10507. + return -EINVAL;
  10508. + query_ceetm_xsfdr_data.dcp_portal = (u32)val;
  10509. + return count;
  10510. +}
  10511. +
  10512. +static const struct file_operations query_ceetm_xsfdr_fops = {
  10513. + .owner = THIS_MODULE,
  10514. + .open = query_ceetm_xsfdr_open,
  10515. + .read = seq_read,
  10516. + .write = query_ceetm_xsfdr_write,
  10517. + .release = single_release,
  10518. +};
  10519. +
  10520. +/* helper macros used in qman_debugfs_module_init */
  10521. +#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \
  10522. + do { \
  10523. + d = debugfs_create_file(name, \
  10524. + mode, parent, \
  10525. + data, \
  10526. + fops); \
  10527. + if (d == NULL) { \
  10528. + ret = -ENOMEM; \
  10529. + goto _return; \
  10530. + } \
  10531. + } while (0)
  10532. +
  10533. +/* dfs_root as parent */
  10534. +#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \
  10535. + QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops)
  10536. +
  10537. +/* fqd_root as parent */
  10538. +#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \
  10539. + QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops)
  10540. +
  10541. +/* fqd state */
  10542. +#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \
  10543. + QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \
  10544. + (void *)&mask_filter[index], &qman_fqd_ctrl_fops)
  10545. +
  10546. +static int __init qman_debugfs_module_init(void)
  10547. +{
  10548. + int ret = 0;
  10549. + struct dentry *d, *fqd_root;
  10550. + u32 reg;
  10551. +
  10552. + fqid_max = 0;
  10553. + init_ccsrmempeek();
  10554. + if (qman_ccsr_start) {
  10555. + if (!qman_ccsrmempeek(&reg, QM_FQD_AR)) {
  10556. + /* extract the size of the FQD window */
  10557. + reg = reg & 0x3f;
  10558. + /* calculate valid frame queue descriptor range */
  10559. + fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE;
  10560. + }
  10561. + }
  10562. + dfs_root = debugfs_create_dir("qman", NULL);
  10563. + fqd_root = debugfs_create_dir("fqd", dfs_root);
  10564. + if (dfs_root == NULL || fqd_root == NULL) {
  10565. + ret = -ENOMEM;
  10566. + pr_err("Cannot create qman/fqd debugfs dir\n");
  10567. + goto _return;
  10568. + }
  10569. + if (fqid_max) {
  10570. + QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO,
  10571. + NULL, &qman_ccsrmempeek_fops);
  10572. + }
  10573. + QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO,
  10574. + &query_fq_np_fields_data, &query_fq_np_fields_fops);
  10575. +
  10576. + QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO,
  10577. + &query_fq_fields_data, &query_fq_fields_fops);
  10578. +
  10579. + QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO,
  10580. + &query_wq_lengths_data, &query_wq_lengths_fops);
  10581. +
  10582. + QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO,
  10583. + &query_cgr_data, &query_cgr_fops);
  10584. +
  10585. + QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO,
  10586. + NULL, &query_congestion_fops);
  10587. +
  10588. + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO,
  10589. + NULL, &testwrite_cgr_fops);
  10590. +
  10591. + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO,
  10592. + NULL, &teswrite_cgr_cgrid_fops);
  10593. +
  10594. + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO,
  10595. + NULL, &teswrite_cgr_ibcnt_fops);
  10596. +
  10597. + QMAN_DBGFS_ENTRY_ROOT("query_ceetm_ccgr", S_IRUGO | S_IWUGO,
  10598. + &query_ccgr_data, &query_ccgr_fops);
  10599. + /* Create files with fqd_root as parent */
  10600. +
  10601. + QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO,
  10602. + (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops);
  10603. +
  10604. + QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO,
  10605. + (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED],
  10606. + &qman_fqd_state_fops);
  10607. +
  10608. + QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO,
  10609. + (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED],
  10610. + &qman_fqd_state_fops);
  10611. +
  10612. + QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO,
  10613. + (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED],
  10614. + &qman_fqd_state_fops);
  10615. +
  10616. + QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO,
  10617. + (void *)&fqd_states[QM_MCR_NP_STATE_PARKED],
  10618. + &qman_fqd_state_fops);
  10619. +
  10620. + QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO,
  10621. + (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE],
  10622. + &qman_fqd_state_fops);
  10623. + QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO,
  10624. + &query_cq_fields_data, &query_cq_fields_fops);
  10625. + QMAN_DBGFS_ENTRY_ROOT("query_ceetm_xsfdr_in_use", S_IRUGO | S_IWUGO,
  10626. + &query_ceetm_xsfdr_data, &query_ceetm_xsfdr_fops);
  10627. +
  10628. +
  10629. + QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17);
  10630. +
  10631. + QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16);
  10632. +
  10633. + QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15);
  10634. +
  10635. + QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14);
  10636. +
  10637. + QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13);
  10638. +
  10639. + QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12);
  10640. +
  10641. + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11);
  10642. +
  10643. + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10);
  10644. +
  10645. + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9);
  10646. +
  10647. + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8);
  10648. +
  10649. + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7);
  10650. +
  10651. + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6);
  10652. +
  10653. + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5);
  10654. +
  10655. + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4);
  10656. +
  10657. + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3);
  10658. +
  10659. + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2);
  10660. +
  10661. + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1);
  10662. +
  10663. + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0);
  10664. +
  10665. + QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO,
  10666. + NULL, &qman_fqd_summary_fops);
  10667. +
  10668. + QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO,
  10669. + NULL, &qman_fqd_dest_wq_fops);
  10670. +
  10671. + QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO,
  10672. + NULL, &qman_fqd_cred_fops);
  10673. +
  10674. + return 0;
  10675. +
  10676. +_return:
  10677. + debugfs_remove_recursive(dfs_root);
  10678. + return ret;
  10679. +}
  10680. +
  10681. +static void __exit qman_debugfs_module_exit(void)
  10682. +{
  10683. + debugfs_remove_recursive(dfs_root);
  10684. +}
  10685. +
  10686. +module_init(qman_debugfs_module_init);
  10687. +module_exit(qman_debugfs_module_exit);
  10688. +MODULE_LICENSE("Dual BSD/GPL");
  10689. --- /dev/null
  10690. +++ b/drivers/staging/fsl_qbman/qman_driver.c
  10691. @@ -0,0 +1,980 @@
  10692. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  10693. + *
  10694. + * Redistribution and use in source and binary forms, with or without
  10695. + * modification, are permitted provided that the following conditions are met:
  10696. + * * Redistributions of source code must retain the above copyright
  10697. + * notice, this list of conditions and the following disclaimer.
  10698. + * * Redistributions in binary form must reproduce the above copyright
  10699. + * notice, this list of conditions and the following disclaimer in the
  10700. + * documentation and/or other materials provided with the distribution.
  10701. + * * Neither the name of Freescale Semiconductor nor the
  10702. + * names of its contributors may be used to endorse or promote products
  10703. + * derived from this software without specific prior written permission.
  10704. + *
  10705. + *
  10706. + * ALTERNATIVELY, this software may be distributed under the terms of the
  10707. + * GNU General Public License ("GPL") as published by the Free Software
  10708. + * Foundation, either version 2 of that License or (at your option) any
  10709. + * later version.
  10710. + *
  10711. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  10712. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  10713. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  10714. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  10715. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  10716. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  10717. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  10718. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  10719. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  10720. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  10721. + */
  10722. +
  10723. +#include "qman_private.h"
  10724. +
  10725. +#include <asm/smp.h> /* hard_smp_processor_id() if !CONFIG_SMP */
  10726. +#ifdef CONFIG_HOTPLUG_CPU
  10727. +#include <linux/cpu.h>
  10728. +#endif
  10729. +
  10730. +/* Global variable containing revision id (even on non-control plane systems
  10731. + * where CCSR isn't available) */
  10732. +u16 qman_ip_rev;
  10733. +EXPORT_SYMBOL(qman_ip_rev);
  10734. +u8 qman_ip_cfg;
  10735. +EXPORT_SYMBOL(qman_ip_cfg);
  10736. +u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
  10737. +EXPORT_SYMBOL(qm_channel_pool1);
  10738. +u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
  10739. +EXPORT_SYMBOL(qm_channel_caam);
  10740. +u16 qm_channel_pme = QMAN_CHANNEL_PME;
  10741. +EXPORT_SYMBOL(qm_channel_pme);
  10742. +u16 qm_channel_dce = QMAN_CHANNEL_DCE;
  10743. +EXPORT_SYMBOL(qm_channel_dce);
  10744. +u16 qman_portal_max;
  10745. +EXPORT_SYMBOL(qman_portal_max);
  10746. +
  10747. +u32 qman_clk;
  10748. +struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
  10749. +/* the qman ceetm instances on the given SoC */
  10750. +u8 num_ceetms;
  10751. +
  10752. +/* For these variables, and the portal-initialisation logic, the
  10753. + * comments in bman_driver.c apply here so won't be repeated. */
  10754. +static struct qman_portal *shared_portals[NR_CPUS];
  10755. +static int num_shared_portals;
  10756. +static int shared_portals_idx;
  10757. +static LIST_HEAD(unused_pcfgs);
  10758. +static DEFINE_SPINLOCK(unused_pcfgs_lock);
  10759. +
  10760. +/* A SDQCR mask comprising all the available/visible pool channels */
  10761. +static u32 pools_sdqcr;
  10762. +
  10763. +#define STR_ERR_NOPROP "No '%s' property in node %s\n"
  10764. +#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
  10765. +#define STR_FQID_RANGE "fsl,fqid-range"
  10766. +#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
  10767. +#define STR_CGRID_RANGE "fsl,cgrid-range"
  10768. +
  10769. +/* A "fsl,fqid-range" node; release the given range to the allocator */
  10770. +static __init int fsl_fqid_range_init(struct device_node *node)
  10771. +{
  10772. + int ret;
  10773. + const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
  10774. + if (!range) {
  10775. + pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
  10776. + return -EINVAL;
  10777. + }
  10778. + if (ret != 8) {
  10779. + pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
  10780. + return -EINVAL;
  10781. + }
  10782. + qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10783. + pr_info("Qman: FQID allocator includes range %d:%d\n",
  10784. + be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10785. + return 0;
  10786. +}
  10787. +
  10788. +/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
  10789. +static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
  10790. +{
  10791. + int ret;
  10792. + const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
  10793. + if (!chanid) {
  10794. + pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
  10795. + return -EINVAL;
  10796. + }
  10797. + if (ret != 8) {
  10798. + pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
  10799. + return -EINVAL;
  10800. + }
  10801. + for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++)
  10802. + pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret);
  10803. + return 0;
  10804. +}
  10805. +
  10806. +/* A "fsl,pool-channel-range" node; release the given range to the allocator */
  10807. +static __init int fsl_pool_channel_range_init(struct device_node *node)
  10808. +{
  10809. + int ret;
  10810. + const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
  10811. + if (!chanid) {
  10812. + pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
  10813. + return -EINVAL;
  10814. + }
  10815. + if (ret != 8) {
  10816. + pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
  10817. + return -EINVAL;
  10818. + }
  10819. + qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
  10820. + pr_info("Qman: pool channel allocator includes range %d:%d\n",
  10821. + be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
  10822. + return 0;
  10823. +}
  10824. +
  10825. +/* A "fsl,cgrid-range" node; release the given range to the allocator */
  10826. +static __init int fsl_cgrid_range_init(struct device_node *node)
  10827. +{
  10828. + struct qman_cgr cgr;
  10829. + int ret, errors = 0;
  10830. + const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
  10831. + if (!range) {
  10832. + pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
  10833. + return -EINVAL;
  10834. + }
  10835. + if (ret != 8) {
  10836. + pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
  10837. + return -EINVAL;
  10838. + }
  10839. + qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10840. + pr_info("Qman: CGRID allocator includes range %d:%d\n",
  10841. + be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10842. + for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
  10843. + ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
  10844. + if (ret)
  10845. + errors++;
  10846. + }
  10847. + if (errors)
  10848. + pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
  10849. + errors, (errors > 1) ? "s" : "", range[0], range[1]);
  10850. + return 0;
  10851. +}
  10852. +
  10853. +static __init int fsl_ceetm_init(struct device_node *node)
  10854. +{
  10855. + enum qm_dc_portal dcp_portal;
  10856. + struct qm_ceetm_sp *sp;
  10857. + struct qm_ceetm_lni *lni;
  10858. + int ret, i;
  10859. + const u32 *range;
  10860. +
  10861. + /* Find LFQID range */
  10862. + range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret);
  10863. + if (!range) {
  10864. + pr_err("No fsl,ceetm-lfqid-range in node %s\n",
  10865. + node->full_name);
  10866. + return -EINVAL;
  10867. + }
  10868. + if (ret != 8) {
  10869. + pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node"
  10870. + " %s\n", node->full_name);
  10871. + return -EINVAL;
  10872. + }
  10873. +
  10874. + dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16;
  10875. + if (dcp_portal > qm_dc_portal_fman1) {
  10876. + pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal);
  10877. + return -EINVAL;
  10878. + }
  10879. +
  10880. + if (dcp_portal == qm_dc_portal_fman0)
  10881. + qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10882. + if (dcp_portal == qm_dc_portal_fman1)
  10883. + qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10884. + pr_debug("Qman: The lfqid allocator of CEETM %d includes range"
  10885. + " 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10886. +
  10887. + qman_ceetms[dcp_portal].idx = dcp_portal;
  10888. + INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals);
  10889. + INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis);
  10890. +
  10891. + /* Find Sub-portal range */
  10892. + range = of_get_property(node, "fsl,ceetm-sp-range", &ret);
  10893. + if (!range) {
  10894. + pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name);
  10895. + return -EINVAL;
  10896. + }
  10897. + if (ret != 8) {
  10898. + pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n",
  10899. + node->full_name);
  10900. + return -EINVAL;
  10901. + }
  10902. +
  10903. + for (i = 0; i < be32_to_cpu(range[1]); i++) {
  10904. + sp = kzalloc(sizeof(*sp), GFP_KERNEL);
  10905. + if (!sp) {
  10906. + pr_err("Can't alloc memory for sub-portal %d\n",
  10907. + range[0] + i);
  10908. + return -ENOMEM;
  10909. + }
  10910. + sp->idx = be32_to_cpu(range[0]) + i;
  10911. + sp->dcp_idx = dcp_portal;
  10912. + sp->is_claimed = 0;
  10913. + list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals);
  10914. + sp++;
  10915. + }
  10916. + pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n",
  10917. + be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
  10918. + qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]);
  10919. + qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]);
  10920. +
  10921. + /* Find LNI range */
  10922. + range = of_get_property(node, "fsl,ceetm-lni-range", &ret);
  10923. + if (!range) {
  10924. + pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name);
  10925. + return -EINVAL;
  10926. + }
  10927. + if (ret != 8) {
  10928. + pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n",
  10929. + node->full_name);
  10930. + return -EINVAL;
  10931. + }
  10932. +
  10933. + for (i = 0; i < be32_to_cpu(range[1]); i++) {
  10934. + lni = kzalloc(sizeof(*lni), GFP_KERNEL);
  10935. + if (!lni) {
  10936. + pr_err("Can't alloc memory for LNI %d\n",
  10937. + range[0] + i);
  10938. + return -ENOMEM;
  10939. + }
  10940. + lni->idx = be32_to_cpu(range[0]) + i;
  10941. + lni->dcp_idx = dcp_portal;
  10942. + lni->is_claimed = 0;
  10943. + INIT_LIST_HEAD(&lni->channels);
  10944. + list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis);
  10945. + lni++;
  10946. + }
  10947. + pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n",
  10948. + be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
  10949. + qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]);
  10950. + qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]);
  10951. +
  10952. + /* Find CEETM channel range */
  10953. + range = of_get_property(node, "fsl,ceetm-channel-range", &ret);
  10954. + if (!range) {
  10955. + pr_err("No fsl,ceetm-channel-range in node %s\n",
  10956. + node->full_name);
  10957. + return -EINVAL;
  10958. + }
  10959. + if (ret != 8) {
  10960. + pr_err("fsl,ceetm-channel-range is not a 2-cell range in node"
  10961. + "%s\n", node->full_name);
  10962. + return -EINVAL;
  10963. + }
  10964. +
  10965. + if (dcp_portal == qm_dc_portal_fman0)
  10966. + qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10967. + if (dcp_portal == qm_dc_portal_fman1)
  10968. + qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10969. + pr_debug("Qman: The channel allocator of CEETM %d includes"
  10970. + " range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
  10971. +
  10972. + /* Set CEETM PRES register */
  10973. + ret = qman_ceetm_set_prescaler(dcp_portal);
  10974. + if (ret)
  10975. + return ret;
  10976. + return 0;
  10977. +}
  10978. +
  10979. +static void qman_get_ip_revision(struct device_node *dn)
  10980. +{
  10981. + u16 ip_rev = 0;
  10982. + u8 ip_cfg = QMAN_REV_CFG_0;
  10983. + for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
  10984. + if (!of_device_is_available(dn))
  10985. + continue;
  10986. + if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
  10987. + of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
  10988. + pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
  10989. + BUG_ON(1);
  10990. + } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
  10991. + of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
  10992. + ip_rev = QMAN_REV11;
  10993. + qman_portal_max = 10;
  10994. + } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
  10995. + of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
  10996. + ip_rev = QMAN_REV12;
  10997. + qman_portal_max = 10;
  10998. + } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
  10999. + of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
  11000. + ip_rev = QMAN_REV20;
  11001. + qman_portal_max = 3;
  11002. + } else if (of_device_is_compatible(dn,
  11003. + "fsl,qman-portal-3.0.0")) {
  11004. + ip_rev = QMAN_REV30;
  11005. + qman_portal_max = 50;
  11006. + } else if (of_device_is_compatible(dn,
  11007. + "fsl,qman-portal-3.0.1")) {
  11008. + ip_rev = QMAN_REV30;
  11009. + qman_portal_max = 25;
  11010. + ip_cfg = QMAN_REV_CFG_1;
  11011. + } else if (of_device_is_compatible(dn,
  11012. + "fsl,qman-portal-3.1.0")) {
  11013. + ip_rev = QMAN_REV31;
  11014. + qman_portal_max = 50;
  11015. + } else if (of_device_is_compatible(dn,
  11016. + "fsl,qman-portal-3.1.1")) {
  11017. + ip_rev = QMAN_REV31;
  11018. + qman_portal_max = 25;
  11019. + ip_cfg = QMAN_REV_CFG_1;
  11020. + } else if (of_device_is_compatible(dn,
  11021. + "fsl,qman-portal-3.1.2")) {
  11022. + ip_rev = QMAN_REV31;
  11023. + qman_portal_max = 18;
  11024. + ip_cfg = QMAN_REV_CFG_2;
  11025. + } else if (of_device_is_compatible(dn,
  11026. + "fsl,qman-portal-3.1.3")) {
  11027. + ip_rev = QMAN_REV31;
  11028. + qman_portal_max = 10;
  11029. + ip_cfg = QMAN_REV_CFG_3;
  11030. + } else if (of_device_is_compatible(dn,
  11031. + "fsl,qman-portal-3.2.0")) {
  11032. + ip_rev = QMAN_REV32;
  11033. + qman_portal_max = 10;
  11034. + ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043
  11035. + } else {
  11036. + pr_warn("unknown QMan version in portal node,"
  11037. + "default to rev1.1\n");
  11038. + ip_rev = QMAN_REV11;
  11039. + qman_portal_max = 10;
  11040. + }
  11041. +
  11042. + if (!qman_ip_rev) {
  11043. + if (ip_rev) {
  11044. + qman_ip_rev = ip_rev;
  11045. + qman_ip_cfg = ip_cfg;
  11046. + } else {
  11047. + pr_warn("unknown Qman version,"
  11048. + " default to rev1.1\n");
  11049. + qman_ip_rev = QMAN_REV11;
  11050. + qman_ip_cfg = QMAN_REV_CFG_0;
  11051. + }
  11052. + } else if (ip_rev && (qman_ip_rev != ip_rev))
  11053. + pr_warn("Revision=0x%04x, but portal '%s' has"
  11054. + " 0x%04x\n",
  11055. + qman_ip_rev, dn->full_name, ip_rev);
  11056. + if (qman_ip_rev == ip_rev)
  11057. + break;
  11058. + }
  11059. +}
  11060. +
  11061. +/* Parse a portal node, perform generic mapping duties and return the config. It
  11062. + * is not known at this stage for what purpose (or even if) the portal will be
  11063. + * used. */
  11064. +static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
  11065. +{
  11066. + struct qm_portal_config *pcfg;
  11067. + const u32 *index_p, *channel_p;
  11068. + u32 index, channel;
  11069. + int irq, ret;
  11070. + resource_size_t len;
  11071. +
  11072. + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
  11073. + if (!pcfg) {
  11074. + pr_err("can't allocate portal config");
  11075. + return NULL;
  11076. + }
  11077. +
  11078. + /*
  11079. + * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
  11080. + * 'struct device' in order to get the PAMU stashing setup and the QMan
  11081. + * portal [driver] won't function at all without ring stashing
  11082. + *
  11083. + * Making the QMan portal driver nice and proper is part of the
  11084. + * upstreaming effort
  11085. + */
  11086. + pcfg->dev.bus = &platform_bus_type;
  11087. + pcfg->dev.of_node = node;
  11088. +#ifdef CONFIG_FSL_PAMU
  11089. + pcfg->dev.archdata.iommu_domain = NULL;
  11090. +#endif
  11091. +
  11092. + ret = of_address_to_resource(node, DPA_PORTAL_CE,
  11093. + &pcfg->addr_phys[DPA_PORTAL_CE]);
  11094. + if (ret) {
  11095. + pr_err("Can't get %s property '%s'\n", node->full_name,
  11096. + "reg::CE");
  11097. + goto err;
  11098. + }
  11099. + ret = of_address_to_resource(node, DPA_PORTAL_CI,
  11100. + &pcfg->addr_phys[DPA_PORTAL_CI]);
  11101. + if (ret) {
  11102. + pr_err("Can't get %s property '%s'\n", node->full_name,
  11103. + "reg::CI");
  11104. + goto err;
  11105. + }
  11106. + index_p = of_get_property(node, "cell-index", &ret);
  11107. + if (!index_p || (ret != 4)) {
  11108. + pr_err("Can't get %s property '%s'\n", node->full_name,
  11109. + "cell-index");
  11110. + goto err;
  11111. + }
  11112. + index = be32_to_cpu(*index_p);
  11113. + if (index >= qman_portal_max) {
  11114. + pr_err("QMan portal index %d is beyond max (%d)\n",
  11115. + index, qman_portal_max);
  11116. + goto err;
  11117. + }
  11118. +
  11119. + channel_p = of_get_property(node, "fsl,qman-channel-id", &ret);
  11120. + if (!channel_p || (ret != 4)) {
  11121. + pr_err("Can't get %s property '%s'\n", node->full_name,
  11122. + "fsl,qman-channel-id");
  11123. + goto err;
  11124. + }
  11125. + channel = be32_to_cpu(*channel_p);
  11126. + if (channel != (index + QM_CHANNEL_SWPORTAL0))
  11127. + pr_err("Warning: node %s has mismatched %s and %s\n",
  11128. + node->full_name, "cell-index", "fsl,qman-channel-id");
  11129. + pcfg->public_cfg.channel = channel;
  11130. + pcfg->public_cfg.cpu = -1;
  11131. + irq = irq_of_parse_and_map(node, 0);
  11132. + if (irq == 0) {
  11133. + pr_err("Can't get %s property '%s'\n", node->full_name,
  11134. + "interrupts");
  11135. + goto err;
  11136. + }
  11137. + pcfg->public_cfg.irq = irq;
  11138. + pcfg->public_cfg.index = index;
  11139. +#ifdef CONFIG_FSL_QMAN_CONFIG
  11140. + /* We need the same LIODN offset for all portals */
  11141. + qman_liodn_fixup(pcfg->public_cfg.channel);
  11142. +#endif
  11143. +
  11144. + len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
  11145. + if (len != (unsigned long)len)
  11146. + goto err;
  11147. +
  11148. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  11149. + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
  11150. + pcfg->addr_phys[DPA_PORTAL_CE].start,
  11151. + resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
  11152. +
  11153. + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
  11154. + pcfg->addr_phys[DPA_PORTAL_CI].start,
  11155. + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
  11156. +#else
  11157. + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
  11158. + pcfg->addr_phys[DPA_PORTAL_CE].start,
  11159. + (unsigned long)len,
  11160. + 0);
  11161. + pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
  11162. + pcfg->addr_phys[DPA_PORTAL_CI].start,
  11163. + resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
  11164. + _PAGE_GUARDED | _PAGE_NO_CACHE);
  11165. +#endif
  11166. + return pcfg;
  11167. +err:
  11168. + kfree(pcfg);
  11169. + return NULL;
  11170. +}
  11171. +
  11172. +static struct qm_portal_config *get_pcfg(struct list_head *list)
  11173. +{
  11174. + struct qm_portal_config *pcfg;
  11175. + if (list_empty(list))
  11176. + return NULL;
  11177. + pcfg = list_entry(list->prev, struct qm_portal_config, list);
  11178. + list_del(&pcfg->list);
  11179. + return pcfg;
  11180. +}
  11181. +
  11182. +static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
  11183. +{
  11184. + struct qm_portal_config *pcfg;
  11185. + if (list_empty(list))
  11186. + return NULL;
  11187. + list_for_each_entry(pcfg, list, list) {
  11188. + if (pcfg->public_cfg.index == idx) {
  11189. + list_del(&pcfg->list);
  11190. + return pcfg;
  11191. + }
  11192. + }
  11193. + return NULL;
  11194. +}
  11195. +
  11196. +static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
  11197. +{
  11198. +#ifdef CONFIG_FSL_PAMU
  11199. + int ret;
  11200. + int window_count = 1;
  11201. + struct iommu_domain_geometry geom_attr;
  11202. + struct pamu_stash_attribute stash_attr;
  11203. +
  11204. + pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
  11205. + if (!pcfg->iommu_domain) {
  11206. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
  11207. + __func__);
  11208. + goto _no_iommu;
  11209. + }
  11210. + geom_attr.aperture_start = 0;
  11211. + geom_attr.aperture_end =
  11212. + ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
  11213. + geom_attr.force_aperture = true;
  11214. + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
  11215. + &geom_attr);
  11216. + if (ret < 0) {
  11217. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  11218. + __func__, ret);
  11219. + goto _iommu_domain_free;
  11220. + }
  11221. + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
  11222. + &window_count);
  11223. + if (ret < 0) {
  11224. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  11225. + __func__, ret);
  11226. + goto _iommu_domain_free;
  11227. + }
  11228. + stash_attr.cpu = cpu;
  11229. + stash_attr.cache = PAMU_ATTR_CACHE_L1;
  11230. + /* set stash information for the window */
  11231. + stash_attr.window = 0;
  11232. + ret = iommu_domain_set_attr(pcfg->iommu_domain,
  11233. + DOMAIN_ATTR_FSL_PAMU_STASH,
  11234. + &stash_attr);
  11235. + if (ret < 0) {
  11236. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  11237. + __func__, ret);
  11238. + goto _iommu_domain_free;
  11239. + }
  11240. + ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
  11241. + IOMMU_READ | IOMMU_WRITE);
  11242. + if (ret < 0) {
  11243. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
  11244. + __func__, ret);
  11245. + goto _iommu_domain_free;
  11246. + }
  11247. + ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
  11248. + if (ret < 0) {
  11249. + pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
  11250. + __func__, ret);
  11251. + goto _iommu_domain_free;
  11252. + }
  11253. + ret = iommu_domain_set_attr(pcfg->iommu_domain,
  11254. + DOMAIN_ATTR_FSL_PAMU_ENABLE,
  11255. + &window_count);
  11256. + if (ret < 0) {
  11257. + pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
  11258. + __func__, ret);
  11259. + goto _iommu_detach_device;
  11260. + }
  11261. +
  11262. +_no_iommu:
  11263. +#endif
  11264. +#ifdef CONFIG_FSL_QMAN_CONFIG
  11265. + if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
  11266. +#endif
  11267. + pr_warn("Failed to set QMan portal's stash request queue\n");
  11268. +
  11269. + return;
  11270. +
  11271. +#ifdef CONFIG_FSL_PAMU
  11272. +_iommu_detach_device:
  11273. + iommu_detach_device(pcfg->iommu_domain, NULL);
  11274. +_iommu_domain_free:
  11275. + iommu_domain_free(pcfg->iommu_domain);
  11276. +#endif
  11277. +}
  11278. +
  11279. +struct qm_portal_config *qm_get_unused_portal_idx(u32 idx)
  11280. +{
  11281. + struct qm_portal_config *ret;
  11282. + spin_lock(&unused_pcfgs_lock);
  11283. + if (idx == QBMAN_ANY_PORTAL_IDX)
  11284. + ret = get_pcfg(&unused_pcfgs);
  11285. + else
  11286. + ret = get_pcfg_idx(&unused_pcfgs, idx);
  11287. + spin_unlock(&unused_pcfgs_lock);
  11288. + /* Bind stashing LIODNs to the CPU we are currently executing on, and
  11289. + * set the portal to use the stashing request queue corresonding to the
  11290. + * cpu as well. The user-space driver assumption is that the pthread has
  11291. + * to already be affine to one cpu only before opening a portal. If that
  11292. + * check is circumvented, the only risk is a performance degradation -
  11293. + * stashing will go to whatever cpu they happened to be running on when
  11294. + * opening the device file, and if that isn't the cpu they subsequently
  11295. + * bind to and do their polling on, tough. */
  11296. + if (ret)
  11297. + portal_set_cpu(ret, hard_smp_processor_id());
  11298. + return ret;
  11299. +}
  11300. +
  11301. +struct qm_portal_config *qm_get_unused_portal(void)
  11302. +{
  11303. + return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
  11304. +}
  11305. +
  11306. +void qm_put_unused_portal(struct qm_portal_config *pcfg)
  11307. +{
  11308. + spin_lock(&unused_pcfgs_lock);
  11309. + list_add(&pcfg->list, &unused_pcfgs);
  11310. + spin_unlock(&unused_pcfgs_lock);
  11311. +}
  11312. +
  11313. +static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
  11314. +{
  11315. + struct qman_portal *p;
  11316. +
  11317. + pcfg->iommu_domain = NULL;
  11318. + portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
  11319. + p = qman_create_affine_portal(pcfg, NULL);
  11320. + if (p) {
  11321. + u32 irq_sources = 0;
  11322. + /* Determine what should be interrupt-vs-poll driven */
  11323. +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
  11324. + irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
  11325. + QM_PIRQ_CSCI | QM_PIRQ_CCSCI;
  11326. +#endif
  11327. +#ifdef CONFIG_FSL_DPA_PIRQ_FAST
  11328. + irq_sources |= QM_PIRQ_DQRI;
  11329. +#endif
  11330. + qman_p_irqsource_add(p, irq_sources);
  11331. + pr_info("Qman portal %sinitialised, cpu %d\n",
  11332. + pcfg->public_cfg.is_shared ? "(shared) " : "",
  11333. + pcfg->public_cfg.cpu);
  11334. + } else
  11335. + pr_crit("Qman portal failure on cpu %d\n",
  11336. + pcfg->public_cfg.cpu);
  11337. + return p;
  11338. +}
  11339. +
  11340. +static void init_slave(int cpu)
  11341. +{
  11342. + struct qman_portal *p;
  11343. + struct cpumask oldmask = *tsk_cpus_allowed(current);
  11344. + set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
  11345. + p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
  11346. + if (!p)
  11347. + pr_err("Qman slave portal failure on cpu %d\n", cpu);
  11348. + else
  11349. + pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
  11350. + set_cpus_allowed_ptr(current, &oldmask);
  11351. + if (shared_portals_idx >= num_shared_portals)
  11352. + shared_portals_idx = 0;
  11353. +}
  11354. +
  11355. +static struct cpumask want_unshared __initdata;
  11356. +static struct cpumask want_shared __initdata;
  11357. +
  11358. +static int __init parse_qportals(char *str)
  11359. +{
  11360. + return parse_portals_bootarg(str, &want_shared, &want_unshared,
  11361. + "qportals");
  11362. +}
  11363. +__setup("qportals=", parse_qportals);
  11364. +
  11365. +static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
  11366. + unsigned int cpu)
  11367. +{
  11368. +#ifdef CONFIG_FSL_PAMU
  11369. + struct pamu_stash_attribute stash_attr;
  11370. + int ret;
  11371. +
  11372. + if (pcfg->iommu_domain) {
  11373. + stash_attr.cpu = cpu;
  11374. + stash_attr.cache = PAMU_ATTR_CACHE_L1;
  11375. + /* set stash information for the window */
  11376. + stash_attr.window = 0;
  11377. + ret = iommu_domain_set_attr(pcfg->iommu_domain,
  11378. + DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
  11379. + if (ret < 0) {
  11380. + pr_err("Failed to update pamu stash setting\n");
  11381. + return;
  11382. + }
  11383. + }
  11384. +#endif
  11385. +#ifdef CONFIG_FSL_QMAN_CONFIG
  11386. + if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
  11387. + pr_warn("Failed to update portal's stash request queue\n");
  11388. +#endif
  11389. +}
  11390. +
  11391. +static void qman_offline_cpu(unsigned int cpu)
  11392. +{
  11393. + struct qman_portal *p;
  11394. + const struct qm_portal_config *pcfg;
  11395. + p = (struct qman_portal *)affine_portals[cpu];
  11396. + if (p) {
  11397. + pcfg = qman_get_qm_portal_config(p);
  11398. + if (pcfg) {
  11399. + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
  11400. + qman_portal_update_sdest(pcfg, 0);
  11401. + }
  11402. + }
  11403. +}
  11404. +
  11405. +#ifdef CONFIG_HOTPLUG_CPU
  11406. +static void qman_online_cpu(unsigned int cpu)
  11407. +{
  11408. + struct qman_portal *p;
  11409. + const struct qm_portal_config *pcfg;
  11410. + p = (struct qman_portal *)affine_portals[cpu];
  11411. + if (p) {
  11412. + pcfg = qman_get_qm_portal_config(p);
  11413. + if (pcfg) {
  11414. + irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
  11415. + qman_portal_update_sdest(pcfg, cpu);
  11416. + }
  11417. + }
  11418. +}
  11419. +
  11420. +static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
  11421. + unsigned long action, void *hcpu)
  11422. +{
  11423. + unsigned int cpu = (unsigned long)hcpu;
  11424. +
  11425. + switch (action) {
  11426. + case CPU_ONLINE:
  11427. + case CPU_ONLINE_FROZEN:
  11428. + qman_online_cpu(cpu);
  11429. + break;
  11430. + case CPU_DOWN_PREPARE:
  11431. + case CPU_DOWN_PREPARE_FROZEN:
  11432. + qman_offline_cpu(cpu);
  11433. + default:
  11434. + break;
  11435. + }
  11436. + return NOTIFY_OK;
  11437. +}
  11438. +
  11439. +static struct notifier_block qman_hotplug_cpu_notifier = {
  11440. + .notifier_call = qman_hotplug_cpu_callback,
  11441. +};
  11442. +#endif /* CONFIG_HOTPLUG_CPU */
  11443. +
  11444. +__init int qman_init(void)
  11445. +{
  11446. + struct cpumask slave_cpus;
  11447. + struct cpumask unshared_cpus = *cpu_none_mask;
  11448. + struct cpumask shared_cpus = *cpu_none_mask;
  11449. + LIST_HEAD(unshared_pcfgs);
  11450. + LIST_HEAD(shared_pcfgs);
  11451. + struct device_node *dn;
  11452. + struct qm_portal_config *pcfg;
  11453. + struct qman_portal *p;
  11454. + int cpu, ret;
  11455. + const u32 *clk;
  11456. + struct cpumask offline_cpus;
  11457. +
  11458. + /* Initialise the Qman (CCSR) device */
  11459. + for_each_compatible_node(dn, NULL, "fsl,qman") {
  11460. + if (!qman_init_ccsr(dn))
  11461. + pr_info("Qman err interrupt handler present\n");
  11462. + else
  11463. + pr_err("Qman CCSR setup failed\n");
  11464. +
  11465. + clk = of_get_property(dn, "clock-frequency", NULL);
  11466. + if (!clk)
  11467. + pr_warn("Can't find Qman clock frequency\n");
  11468. + else
  11469. + qman_clk = be32_to_cpu(*clk);
  11470. + }
  11471. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  11472. + /* Setup lookup table for FQ demux */
  11473. + ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64);
  11474. + if (ret)
  11475. + return ret;
  11476. +#endif
  11477. +
  11478. + /* Get qman ip revision */
  11479. + qman_get_ip_revision(dn);
  11480. + if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
  11481. + qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
  11482. + qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
  11483. + qm_channel_pme = QMAN_CHANNEL_PME_REV3;
  11484. + }
  11485. +
  11486. + if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2))
  11487. + qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312;
  11488. +
  11489. + /*
  11490. + * Parse the ceetm node to get how many ceetm instances are supported
  11491. + * on the current silicon. num_ceetms must be confirmed before portals
  11492. + * are intiailized.
  11493. + */
  11494. + num_ceetms = 0;
  11495. + for_each_compatible_node(dn, NULL, "fsl,qman-ceetm")
  11496. + num_ceetms++;
  11497. +
  11498. + /* Parse pool channels into the SDQCR mask. (Must happen before portals
  11499. + * are initialised.) */
  11500. + for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
  11501. + ret = fsl_pool_channel_range_sdqcr(dn);
  11502. + if (ret)
  11503. + return ret;
  11504. + }
  11505. +
  11506. + memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
  11507. + /* Initialise portals. See bman_driver.c for comments */
  11508. + for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
  11509. + if (!of_device_is_available(dn))
  11510. + continue;
  11511. + pcfg = parse_pcfg(dn);
  11512. + if (pcfg) {
  11513. + pcfg->public_cfg.pools = pools_sdqcr;
  11514. + list_add_tail(&pcfg->list, &unused_pcfgs);
  11515. + }
  11516. + }
  11517. + for_each_possible_cpu(cpu) {
  11518. + if (cpumask_test_cpu(cpu, &want_shared)) {
  11519. + pcfg = get_pcfg(&unused_pcfgs);
  11520. + if (!pcfg)
  11521. + break;
  11522. + pcfg->public_cfg.cpu = cpu;
  11523. + list_add_tail(&pcfg->list, &shared_pcfgs);
  11524. + cpumask_set_cpu(cpu, &shared_cpus);
  11525. + }
  11526. + if (cpumask_test_cpu(cpu, &want_unshared)) {
  11527. + if (cpumask_test_cpu(cpu, &shared_cpus))
  11528. + continue;
  11529. + pcfg = get_pcfg(&unused_pcfgs);
  11530. + if (!pcfg)
  11531. + break;
  11532. + pcfg->public_cfg.cpu = cpu;
  11533. + list_add_tail(&pcfg->list, &unshared_pcfgs);
  11534. + cpumask_set_cpu(cpu, &unshared_cpus);
  11535. + }
  11536. + }
  11537. + if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
  11538. + for_each_online_cpu(cpu) {
  11539. + pcfg = get_pcfg(&unused_pcfgs);
  11540. + if (!pcfg)
  11541. + break;
  11542. + pcfg->public_cfg.cpu = cpu;
  11543. + list_add_tail(&pcfg->list, &unshared_pcfgs);
  11544. + cpumask_set_cpu(cpu, &unshared_cpus);
  11545. + }
  11546. + }
  11547. + cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
  11548. + cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
  11549. + if (cpumask_empty(&slave_cpus)) {
  11550. + if (!list_empty(&shared_pcfgs)) {
  11551. + cpumask_or(&unshared_cpus, &unshared_cpus,
  11552. + &shared_cpus);
  11553. + cpumask_clear(&shared_cpus);
  11554. + list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
  11555. + INIT_LIST_HEAD(&shared_pcfgs);
  11556. + }
  11557. + } else {
  11558. + if (list_empty(&shared_pcfgs)) {
  11559. + pcfg = get_pcfg(&unshared_pcfgs);
  11560. + if (!pcfg) {
  11561. + pr_crit("No QMan portals available!\n");
  11562. + return 0;
  11563. + }
  11564. + cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
  11565. + cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
  11566. + list_add_tail(&pcfg->list, &shared_pcfgs);
  11567. + }
  11568. + }
  11569. + list_for_each_entry(pcfg, &unshared_pcfgs, list) {
  11570. + pcfg->public_cfg.is_shared = 0;
  11571. + p = init_pcfg(pcfg);
  11572. + if (!p) {
  11573. + pr_crit("Unable to configure portals\n");
  11574. + return 0;
  11575. + }
  11576. + }
  11577. + list_for_each_entry(pcfg, &shared_pcfgs, list) {
  11578. + pcfg->public_cfg.is_shared = 1;
  11579. + p = init_pcfg(pcfg);
  11580. + if (p)
  11581. + shared_portals[num_shared_portals++] = p;
  11582. + }
  11583. + if (!cpumask_empty(&slave_cpus))
  11584. + for_each_cpu(cpu, &slave_cpus)
  11585. + init_slave(cpu);
  11586. + pr_info("Qman portals initialised\n");
  11587. + cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
  11588. + for_each_cpu(cpu, &offline_cpus)
  11589. + qman_offline_cpu(cpu);
  11590. +#ifdef CONFIG_HOTPLUG_CPU
  11591. + register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
  11592. +#endif
  11593. + return 0;
  11594. +}
  11595. +
  11596. +__init int qman_resource_init(void)
  11597. +{
  11598. + struct device_node *dn;
  11599. + int ret;
  11600. +
  11601. + /* Initialise FQID allocation ranges */
  11602. + for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
  11603. + ret = fsl_fqid_range_init(dn);
  11604. + if (ret)
  11605. + return ret;
  11606. + }
  11607. + /* Initialise CGRID allocation ranges */
  11608. + for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
  11609. + ret = fsl_cgrid_range_init(dn);
  11610. + if (ret)
  11611. + return ret;
  11612. + }
  11613. + /* Parse pool channels into the allocator. (Must happen after portals
  11614. + * are initialised.) */
  11615. + for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
  11616. + ret = fsl_pool_channel_range_init(dn);
  11617. + if (ret)
  11618. + return ret;
  11619. + }
  11620. +
  11621. + /* Parse CEETM */
  11622. + for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") {
  11623. + ret = fsl_ceetm_init(dn);
  11624. + if (ret)
  11625. + return ret;
  11626. + }
  11627. + return 0;
  11628. +}
  11629. +
  11630. +#ifdef CONFIG_SUSPEND
  11631. +void suspend_unused_qportal(void)
  11632. +{
  11633. + struct qm_portal_config *pcfg;
  11634. +
  11635. + if (list_empty(&unused_pcfgs))
  11636. + return;
  11637. +
  11638. + list_for_each_entry(pcfg, &unused_pcfgs, list) {
  11639. +#ifdef CONFIG_PM_DEBUG
  11640. + pr_info("Need to save qportal %d\n", pcfg->public_cfg.index);
  11641. +#endif
  11642. + /* save isdr, disable all via isdr, clear isr */
  11643. + pcfg->saved_isdr =
  11644. + __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
  11645. + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
  11646. + 0xe08);
  11647. + __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
  11648. + 0xe00);
  11649. + }
  11650. + return;
  11651. +}
  11652. +
  11653. +void resume_unused_qportal(void)
  11654. +{
  11655. + struct qm_portal_config *pcfg;
  11656. +
  11657. + if (list_empty(&unused_pcfgs))
  11658. + return;
  11659. +
  11660. + list_for_each_entry(pcfg, &unused_pcfgs, list) {
  11661. +#ifdef CONFIG_PM_DEBUG
  11662. + pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index);
  11663. +#endif
  11664. + /* restore isdr */
  11665. + __raw_writel(pcfg->saved_isdr,
  11666. + pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
  11667. + }
  11668. + return;
  11669. +}
  11670. +#endif
  11671. +
  11672. --- /dev/null
  11673. +++ b/drivers/staging/fsl_qbman/qman_high.c
  11674. @@ -0,0 +1,5568 @@
  11675. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  11676. + *
  11677. + * Redistribution and use in source and binary forms, with or without
  11678. + * modification, are permitted provided that the following conditions are met:
  11679. + * * Redistributions of source code must retain the above copyright
  11680. + * notice, this list of conditions and the following disclaimer.
  11681. + * * Redistributions in binary form must reproduce the above copyright
  11682. + * notice, this list of conditions and the following disclaimer in the
  11683. + * documentation and/or other materials provided with the distribution.
  11684. + * * Neither the name of Freescale Semiconductor nor the
  11685. + * names of its contributors may be used to endorse or promote products
  11686. + * derived from this software without specific prior written permission.
  11687. + *
  11688. + *
  11689. + * ALTERNATIVELY, this software may be distributed under the terms of the
  11690. + * GNU General Public License ("GPL") as published by the Free Software
  11691. + * Foundation, either version 2 of that License or (at your option) any
  11692. + * later version.
  11693. + *
  11694. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  11695. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  11696. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  11697. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  11698. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  11699. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  11700. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  11701. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  11702. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  11703. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  11704. + */
  11705. +
  11706. +#include "qman_low.h"
  11707. +
  11708. +/* Compilation constants */
  11709. +#define DQRR_MAXFILL 15
  11710. +#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
  11711. +#define IRQNAME "QMan portal %d"
  11712. +#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
  11713. +
  11714. +/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
  11715. + * positive, and rounding to the closest value if it's zero. NB, this macro
  11716. + * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
  11717. + * that are compatible with this. NB, these arguments should not be expressions
  11718. + * unless it is safe for them to be evaluated multiple times. Eg. do not pass
  11719. + * in "some_value++" as a parameter to the macro! */
  11720. +#define ROUNDING(n, d, r) \
  11721. + (((r) < 0) ? div64_u64((n), (d)) : \
  11722. + (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
  11723. + div64_u64(((n) + ((d) / 2)), (d))))
  11724. +
  11725. +/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
  11726. + * inter-processor locking only. Note, FQLOCK() is always called either under a
  11727. + * local_irq_save() or from interrupt context - hence there's no need for irq
  11728. + * protection (and indeed, attempting to nest irq-protection doesn't work, as
  11729. + * the "irq en/disable" machinery isn't recursive...). */
  11730. +#define FQLOCK(fq) \
  11731. + do { \
  11732. + struct qman_fq *__fq478 = (fq); \
  11733. + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
  11734. + spin_lock(&__fq478->fqlock); \
  11735. + } while (0)
  11736. +#define FQUNLOCK(fq) \
  11737. + do { \
  11738. + struct qman_fq *__fq478 = (fq); \
  11739. + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
  11740. + spin_unlock(&__fq478->fqlock); \
  11741. + } while (0)
  11742. +
  11743. +static inline void fq_set(struct qman_fq *fq, u32 mask)
  11744. +{
  11745. + set_bits(mask, &fq->flags);
  11746. +}
  11747. +static inline void fq_clear(struct qman_fq *fq, u32 mask)
  11748. +{
  11749. + clear_bits(mask, &fq->flags);
  11750. +}
  11751. +static inline int fq_isset(struct qman_fq *fq, u32 mask)
  11752. +{
  11753. + return fq->flags & mask;
  11754. +}
  11755. +static inline int fq_isclear(struct qman_fq *fq, u32 mask)
  11756. +{
  11757. + return !(fq->flags & mask);
  11758. +}
  11759. +
  11760. +struct qman_portal {
  11761. + struct qm_portal p;
  11762. + unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
  11763. + unsigned long irq_sources;
  11764. + u32 use_eqcr_ci_stashing;
  11765. + u32 slowpoll; /* only used when interrupts are off */
  11766. + struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
  11767. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  11768. + struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
  11769. +#endif
  11770. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  11771. + raw_spinlock_t sharing_lock; /* only used if is_shared */
  11772. + int is_shared;
  11773. + struct qman_portal *sharing_redirect;
  11774. +#endif
  11775. + u32 sdqcr;
  11776. + int dqrr_disable_ref;
  11777. + /* A portal-specific handler for DCP ERNs. If this is NULL, the global
  11778. + * handler is called instead. */
  11779. + qman_cb_dc_ern cb_dc_ern;
  11780. + /* When the cpu-affine portal is activated, this is non-NULL */
  11781. + const struct qm_portal_config *config;
  11782. + /* This is needed for providing a non-NULL device to dma_map_***() */
  11783. + struct platform_device *pdev;
  11784. + struct dpa_rbtree retire_table;
  11785. + char irqname[MAX_IRQNAME];
  11786. + /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
  11787. + struct qman_cgrs *cgrs;
  11788. + /* linked-list of CSCN handlers. */
  11789. + struct list_head cgr_cbs;
  11790. + /* list lock */
  11791. + spinlock_t cgr_lock;
  11792. + /* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */
  11793. + struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX];
  11794. + /* 256-element array, each is a linked-list of CCSCN handlers. */
  11795. + struct list_head ccgr_cbs[QMAN_CEETM_MAX];
  11796. + /* list lock */
  11797. + spinlock_t ccgr_lock;
  11798. + /* track if memory was allocated by the driver */
  11799. + u8 alloced;
  11800. + /* power management data */
  11801. + u32 save_isdr;
  11802. +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  11803. + /* Keep a shadow copy of the DQRR on LE systems
  11804. + as the SW needs to do byteswaps of read only
  11805. + memory. Must be aligned to the size of the
  11806. + ring to ensure easy index calcualtions based
  11807. + on address */
  11808. + struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
  11809. + __attribute__((aligned(512)));
  11810. +#endif
  11811. +};
  11812. +
  11813. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  11814. +#define PORTAL_IRQ_LOCK(p, irqflags) \
  11815. + do { \
  11816. + if ((p)->is_shared) \
  11817. + raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
  11818. + else \
  11819. + local_irq_save(irqflags); \
  11820. + } while (0)
  11821. +#define PORTAL_IRQ_UNLOCK(p, irqflags) \
  11822. + do { \
  11823. + if ((p)->is_shared) \
  11824. + raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
  11825. + irqflags); \
  11826. + else \
  11827. + local_irq_restore(irqflags); \
  11828. + } while (0)
  11829. +#else
  11830. +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
  11831. +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
  11832. +#endif
  11833. +
  11834. +/* Global handler for DCP ERNs. Used when the portal receiving the message does
  11835. + * not have a portal-specific handler. */
  11836. +static qman_cb_dc_ern cb_dc_ern;
  11837. +
  11838. +static cpumask_t affine_mask;
  11839. +static DEFINE_SPINLOCK(affine_mask_lock);
  11840. +static u16 affine_channels[NR_CPUS];
  11841. +static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
  11842. +void *affine_portals[NR_CPUS];
  11843. +
  11844. +/* "raw" gets the cpu-local struct whether it's a redirect or not. */
  11845. +static inline struct qman_portal *get_raw_affine_portal(void)
  11846. +{
  11847. + return &get_cpu_var(qman_affine_portal);
  11848. +}
  11849. +/* For ops that can redirect, this obtains the portal to use */
  11850. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  11851. +static inline struct qman_portal *get_affine_portal(void)
  11852. +{
  11853. + struct qman_portal *p = get_raw_affine_portal();
  11854. + if (p->sharing_redirect)
  11855. + return p->sharing_redirect;
  11856. + return p;
  11857. +}
  11858. +#else
  11859. +#define get_affine_portal() get_raw_affine_portal()
  11860. +#endif
  11861. +/* For every "get", there must be a "put" */
  11862. +static inline void put_affine_portal(void)
  11863. +{
  11864. + put_cpu_var(qman_affine_portal);
  11865. +}
  11866. +/* Exception: poll functions assume the caller is cpu-affine and in no risk of
  11867. + * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
  11868. + * semantic - ie. to disable pre-emption. Some use-cases expect the execution
  11869. + * context to remain as non-atomic during poll-triggered callbacks as it was
  11870. + * when the poll API was first called (eg. NAPI), so we go out of our way in
  11871. + * this case to not disable pre-emption. */
  11872. +static inline struct qman_portal *get_poll_portal(void)
  11873. +{
  11874. + return &get_cpu_var(qman_affine_portal);
  11875. +}
  11876. +#define put_poll_portal()
  11877. +
  11878. +/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
  11879. + * retirement notifications (the fact they are sometimes h/w-consumed means that
  11880. + * contextB isn't always a s/w demux - and as we can't know which case it is
  11881. + * when looking at the notification, we have to use the slow lookup for all of
  11882. + * them). NB, it's possible to have multiple FQ objects refer to the same FQID
  11883. + * (though at most one of them should be the consumer), so this table isn't for
  11884. + * all FQs - FQs are added when retirement commands are issued, and removed when
  11885. + * they complete, which also massively reduces the size of this table. */
  11886. +IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
  11887. +
  11888. +/* This is what everything can wait on, even if it migrates to a different cpu
  11889. + * to the one whose affine portal it is waiting on. */
  11890. +static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
  11891. +
  11892. +static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
  11893. +{
  11894. + int ret = fqtree_push(&p->retire_table, fq);
  11895. + if (ret)
  11896. + pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
  11897. + return ret;
  11898. +}
  11899. +
  11900. +static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
  11901. +{
  11902. + fqtree_del(&p->retire_table, fq);
  11903. +}
  11904. +
  11905. +static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
  11906. +{
  11907. + return fqtree_find(&p->retire_table, fqid);
  11908. +}
  11909. +
  11910. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  11911. +static void **qman_fq_lookup_table;
  11912. +static size_t qman_fq_lookup_table_size;
  11913. +
  11914. +int qman_setup_fq_lookup_table(size_t num_entries)
  11915. +{
  11916. + num_entries++;
  11917. + /* Allocate 1 more entry since the first entry is not used */
  11918. + qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
  11919. + if (!qman_fq_lookup_table) {
  11920. + pr_err("QMan: Could not allocate fq lookup table\n");
  11921. + return -ENOMEM;
  11922. + }
  11923. + qman_fq_lookup_table_size = num_entries;
  11924. + pr_info("QMan: Allocated lookup table at %p, entry count %lu\n",
  11925. + qman_fq_lookup_table,
  11926. + (unsigned long)qman_fq_lookup_table_size);
  11927. + return 0;
  11928. +}
  11929. +
  11930. +/* global structure that maintains fq object mapping */
  11931. +static DEFINE_SPINLOCK(fq_hash_table_lock);
  11932. +
  11933. +static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
  11934. +{
  11935. + u32 i;
  11936. +
  11937. + spin_lock(&fq_hash_table_lock);
  11938. + /* Can't use index zero because this has special meaning
  11939. + * in context_b field. */
  11940. + for (i = 1; i < qman_fq_lookup_table_size; i++) {
  11941. + if (qman_fq_lookup_table[i] == NULL) {
  11942. + *entry = i;
  11943. + qman_fq_lookup_table[i] = fq;
  11944. + spin_unlock(&fq_hash_table_lock);
  11945. + return 0;
  11946. + }
  11947. + }
  11948. + spin_unlock(&fq_hash_table_lock);
  11949. + return -ENOMEM;
  11950. +}
  11951. +
  11952. +static void clear_fq_table_entry(u32 entry)
  11953. +{
  11954. + spin_lock(&fq_hash_table_lock);
  11955. + BUG_ON(entry >= qman_fq_lookup_table_size);
  11956. + qman_fq_lookup_table[entry] = NULL;
  11957. + spin_unlock(&fq_hash_table_lock);
  11958. +}
  11959. +
  11960. +static inline struct qman_fq *get_fq_table_entry(u32 entry)
  11961. +{
  11962. + BUG_ON(entry >= qman_fq_lookup_table_size);
  11963. + return qman_fq_lookup_table[entry];
  11964. +}
  11965. +#endif
  11966. +
  11967. +static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
  11968. +{
  11969. + /* Byteswap the FQD to HW format */
  11970. + fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
  11971. + fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
  11972. + fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
  11973. + fqd->context_b = cpu_to_be32(fqd->context_b);
  11974. + fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
  11975. +}
  11976. +
  11977. +static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
  11978. +{
  11979. + /* Byteswap the FQD to CPU format */
  11980. + fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
  11981. + fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
  11982. + fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
  11983. + fqd->context_b = be32_to_cpu(fqd->context_b);
  11984. + fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
  11985. +}
  11986. +
  11987. +/* Swap a 40 bit address */
  11988. +static inline u64 cpu_to_be40(u64 in)
  11989. +{
  11990. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  11991. + return in;
  11992. +#else
  11993. + u64 out = 0;
  11994. + u8 *p = (u8 *) &out;
  11995. + p[0] = in >> 32;
  11996. + p[1] = in >> 24;
  11997. + p[2] = in >> 16;
  11998. + p[3] = in >> 8;
  11999. + p[4] = in >> 0;
  12000. + return out;
  12001. +#endif
  12002. +}
  12003. +static inline u64 be40_to_cpu(u64 in)
  12004. +{
  12005. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  12006. + return in;
  12007. +#else
  12008. + u64 out = 0;
  12009. + u8 *pout = (u8 *) &out;
  12010. + u8 *pin = (u8 *) &in;
  12011. + pout[0] = pin[4];
  12012. + pout[1] = pin[3];
  12013. + pout[2] = pin[2];
  12014. + pout[3] = pin[1];
  12015. + pout[4] = pin[0];
  12016. + return out;
  12017. +#endif
  12018. +}
  12019. +
  12020. +/* Swap a 24 bit value */
  12021. +static inline u32 cpu_to_be24(u32 in)
  12022. +{
  12023. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  12024. + return in;
  12025. +#else
  12026. + u32 out = 0;
  12027. + u8 *p = (u8 *) &out;
  12028. + p[0] = in >> 16;
  12029. + p[1] = in >> 8;
  12030. + p[2] = in >> 0;
  12031. + return out;
  12032. +#endif
  12033. +}
  12034. +
  12035. +static inline u32 be24_to_cpu(u32 in)
  12036. +{
  12037. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  12038. + return in;
  12039. +#else
  12040. + u32 out = 0;
  12041. + u8 *pout = (u8 *) &out;
  12042. + u8 *pin = (u8 *) &in;
  12043. + pout[0] = pin[2];
  12044. + pout[1] = pin[1];
  12045. + pout[2] = pin[0];
  12046. + return out;
  12047. +#endif
  12048. +}
  12049. +
  12050. +static inline u64 be48_to_cpu(u64 in)
  12051. +{
  12052. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  12053. + return in;
  12054. +#else
  12055. + u64 out = 0;
  12056. + u8 *pout = (u8 *) &out;
  12057. + u8 *pin = (u8 *) &in;
  12058. +
  12059. + pout[0] = pin[5];
  12060. + pout[1] = pin[4];
  12061. + pout[2] = pin[3];
  12062. + pout[3] = pin[2];
  12063. + pout[4] = pin[1];
  12064. + pout[5] = pin[0];
  12065. + return out;
  12066. +#endif
  12067. +}
  12068. +static inline void cpu_to_hw_fd(struct qm_fd *fd)
  12069. +{
  12070. + fd->addr = cpu_to_be40(fd->addr);
  12071. + fd->status = cpu_to_be32(fd->status);
  12072. + fd->opaque = cpu_to_be32(fd->opaque);
  12073. +}
  12074. +
  12075. +static inline void hw_fd_to_cpu(struct qm_fd *fd)
  12076. +{
  12077. + fd->addr = be40_to_cpu(fd->addr);
  12078. + fd->status = be32_to_cpu(fd->status);
  12079. + fd->opaque = be32_to_cpu(fd->opaque);
  12080. +}
  12081. +
  12082. +static inline void hw_cq_query_to_cpu(struct qm_mcr_ceetm_cq_query *cq_query)
  12083. +{
  12084. + cq_query->ccgid = be16_to_cpu(cq_query->ccgid);
  12085. + cq_query->state = be16_to_cpu(cq_query->state);
  12086. + cq_query->pfdr_hptr = be24_to_cpu(cq_query->pfdr_hptr);
  12087. + cq_query->pfdr_tptr = be24_to_cpu(cq_query->pfdr_tptr);
  12088. + cq_query->od1_xsfdr = be16_to_cpu(cq_query->od1_xsfdr);
  12089. + cq_query->od2_xsfdr = be16_to_cpu(cq_query->od2_xsfdr);
  12090. + cq_query->od3_xsfdr = be16_to_cpu(cq_query->od3_xsfdr);
  12091. + cq_query->od4_xsfdr = be16_to_cpu(cq_query->od4_xsfdr);
  12092. + cq_query->od5_xsfdr = be16_to_cpu(cq_query->od5_xsfdr);
  12093. + cq_query->od6_xsfdr = be16_to_cpu(cq_query->od6_xsfdr);
  12094. + cq_query->ra1_xsfdr = be16_to_cpu(cq_query->ra1_xsfdr);
  12095. + cq_query->ra2_xsfdr = be16_to_cpu(cq_query->ra2_xsfdr);
  12096. + cq_query->frm_cnt = be24_to_cpu(cq_query->frm_cnt);
  12097. +}
  12098. +
  12099. +static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q)
  12100. +{
  12101. + int i;
  12102. +
  12103. + ccgr_q->cm_query.cscn_targ_dcp =
  12104. + be16_to_cpu(ccgr_q->cm_query.cscn_targ_dcp);
  12105. + ccgr_q->cm_query.i_cnt = be40_to_cpu(ccgr_q->cm_query.i_cnt);
  12106. + ccgr_q->cm_query.a_cnt = be40_to_cpu(ccgr_q->cm_query.a_cnt);
  12107. + for (i = 0; i < ARRAY_SIZE(ccgr_q->cm_query.cscn_targ_swp); i++)
  12108. + ccgr_q->cm_query.cscn_targ_swp[i] =
  12109. + be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]);
  12110. +}
  12111. +
  12112. +/* In the case that slow- and fast-path handling are both done by qman_poll()
  12113. + * (ie. because there is no interrupt handling), we ought to balance how often
  12114. + * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
  12115. + * sources, so we call the fast poll 'n' times before calling the slow poll
  12116. + * once. The idle decrementer constant is used when the last slow-poll detected
  12117. + * no work to do, and the busy decrementer constant when the last slow-poll had
  12118. + * work to do. */
  12119. +#define SLOW_POLL_IDLE 1000
  12120. +#define SLOW_POLL_BUSY 10
  12121. +static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
  12122. +static inline unsigned int __poll_portal_fast(struct qman_portal *p,
  12123. + unsigned int poll_limit);
  12124. +
  12125. +/* Portal interrupt handler */
  12126. +static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
  12127. +{
  12128. + struct qman_portal *p = ptr;
  12129. + /*
  12130. + * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
  12131. + * it could race against a Query Congestion State command also given
  12132. + * as part of the handling of this interrupt source. We mustn't
  12133. + * clear it a second time in this top-level function.
  12134. + */
  12135. + u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
  12136. + ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
  12137. + u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
  12138. + /* DQRR-handling if it's interrupt-driven */
  12139. + if (is & QM_PIRQ_DQRI)
  12140. + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
  12141. + /* Handling of anything else that's interrupt-driven */
  12142. + clear |= __poll_portal_slow(p, is);
  12143. + qm_isr_status_clear(&p->p, clear);
  12144. + return IRQ_HANDLED;
  12145. +}
  12146. +
  12147. +/* This inner version is used privately by qman_create_affine_portal(), as well
  12148. + * as by the exported qman_stop_dequeues(). */
  12149. +static inline void qman_stop_dequeues_ex(struct qman_portal *p)
  12150. +{
  12151. + unsigned long irqflags __maybe_unused;
  12152. + PORTAL_IRQ_LOCK(p, irqflags);
  12153. + if (!(p->dqrr_disable_ref++))
  12154. + qm_dqrr_set_maxfill(&p->p, 0);
  12155. + PORTAL_IRQ_UNLOCK(p, irqflags);
  12156. +}
  12157. +
  12158. +static int drain_mr(struct qm_portal *p)
  12159. +{
  12160. + const struct qm_mr_entry *msg;
  12161. +loop:
  12162. + msg = qm_mr_current(p);
  12163. + if (!msg) {
  12164. + /* if MR was full and h/w had other FQRNI entries to produce, we
  12165. + * need to allow it time to produce those entries once the
  12166. + * existing entries are consumed. A worst-case situation
  12167. + * (fully-loaded system) means h/w sequencers may have to do 3-4
  12168. + * other things before servicing the portal's MR pump, each of
  12169. + * which (if slow) may take ~50 qman cycles (which is ~200
  12170. + * processor cycles). So rounding up and then multiplying this
  12171. + * worst-case estimate by a factor of 10, just to be
  12172. + * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
  12173. + * one entry at a time, so h/w has an opportunity to produce new
  12174. + * entries well before the ring has been fully consumed, so
  12175. + * we're being *really* paranoid here. */
  12176. + u64 now, then = mfatb();
  12177. + do {
  12178. + now = mfatb();
  12179. + } while ((then + 10000) > now);
  12180. + msg = qm_mr_current(p);
  12181. + if (!msg)
  12182. + return 0;
  12183. + }
  12184. + qm_mr_next(p);
  12185. + qm_mr_cci_consume(p, 1);
  12186. + goto loop;
  12187. +}
  12188. +
  12189. +#ifdef CONFIG_SUSPEND
  12190. +static int _qman_portal_suspend_noirq(struct device *dev)
  12191. +{
  12192. + struct qman_portal *p = (struct qman_portal *)dev->platform_data;
  12193. +#ifdef CONFIG_PM_DEBUG
  12194. + struct platform_device *pdev = to_platform_device(dev);
  12195. +#endif
  12196. +
  12197. + p->save_isdr = qm_isr_disable_read(&p->p);
  12198. + qm_isr_disable_write(&p->p, 0xffffffff);
  12199. + qm_isr_status_clear(&p->p, 0xffffffff);
  12200. +#ifdef CONFIG_PM_DEBUG
  12201. + pr_info("Suspend for %s\n", pdev->name);
  12202. +#endif
  12203. + return 0;
  12204. +}
  12205. +
  12206. +static int _qman_portal_resume_noirq(struct device *dev)
  12207. +{
  12208. + struct qman_portal *p = (struct qman_portal *)dev->platform_data;
  12209. +
  12210. + /* restore isdr */
  12211. + qm_isr_disable_write(&p->p, p->save_isdr);
  12212. + return 0;
  12213. +}
  12214. +#else
  12215. +#define _qman_portal_suspend_noirq NULL
  12216. +#define _qman_portal_resume_noirq NULL
  12217. +#endif
  12218. +
  12219. +struct dev_pm_domain qman_portal_device_pm_domain = {
  12220. + .ops = {
  12221. + USE_PLATFORM_PM_SLEEP_OPS
  12222. + .suspend_noirq = _qman_portal_suspend_noirq,
  12223. + .resume_noirq = _qman_portal_resume_noirq,
  12224. + }
  12225. +};
  12226. +
  12227. +struct qman_portal *qman_create_portal(
  12228. + struct qman_portal *portal,
  12229. + const struct qm_portal_config *config,
  12230. + const struct qman_cgrs *cgrs)
  12231. +{
  12232. + struct qm_portal *__p;
  12233. + char buf[16];
  12234. + int ret;
  12235. + u32 isdr;
  12236. +
  12237. + if (!portal) {
  12238. + portal = kmalloc(sizeof(*portal), GFP_KERNEL);
  12239. + if (!portal)
  12240. + return portal;
  12241. + portal->alloced = 1;
  12242. + } else
  12243. + portal->alloced = 0;
  12244. +
  12245. + __p = &portal->p;
  12246. +
  12247. +#ifdef CONFIG_FSL_PAMU
  12248. + /* PAMU is required for stashing */
  12249. + portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
  12250. + 1 : 0);
  12251. +#else
  12252. + portal->use_eqcr_ci_stashing = 0;
  12253. +#endif
  12254. +
  12255. + /* prep the low-level portal struct with the mapped addresses from the
  12256. + * config, everything that follows depends on it and "config" is more
  12257. + * for (de)reference... */
  12258. + __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
  12259. + __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
  12260. + /*
  12261. + * If CI-stashing is used, the current defaults use a threshold of 3,
  12262. + * and stash with high-than-DQRR priority.
  12263. + */
  12264. + if (qm_eqcr_init(__p, qm_eqcr_pvb,
  12265. + portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
  12266. + pr_err("Qman EQCR initialisation failed\n");
  12267. + goto fail_eqcr;
  12268. + }
  12269. + if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
  12270. + qm_dqrr_cdc, DQRR_MAXFILL)) {
  12271. + pr_err("Qman DQRR initialisation failed\n");
  12272. + goto fail_dqrr;
  12273. + }
  12274. + if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
  12275. + pr_err("Qman MR initialisation failed\n");
  12276. + goto fail_mr;
  12277. + }
  12278. + if (qm_mc_init(__p)) {
  12279. + pr_err("Qman MC initialisation failed\n");
  12280. + goto fail_mc;
  12281. + }
  12282. + if (qm_isr_init(__p)) {
  12283. + pr_err("Qman ISR initialisation failed\n");
  12284. + goto fail_isr;
  12285. + }
  12286. + /* static interrupt-gating controls */
  12287. + qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH);
  12288. + qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH);
  12289. + qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD);
  12290. + portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
  12291. + if (!portal->cgrs)
  12292. + goto fail_cgrs;
  12293. + /* initial snapshot is no-depletion */
  12294. + qman_cgrs_init(&portal->cgrs[1]);
  12295. + if (cgrs)
  12296. + portal->cgrs[0] = *cgrs;
  12297. + else
  12298. + /* if the given mask is NULL, assume all CGRs can be seen */
  12299. + qman_cgrs_fill(&portal->cgrs[0]);
  12300. + INIT_LIST_HEAD(&portal->cgr_cbs);
  12301. + spin_lock_init(&portal->cgr_lock);
  12302. + if (num_ceetms) {
  12303. + for (ret = 0; ret < num_ceetms; ret++) {
  12304. + portal->ccgrs[ret] = kmalloc(2 *
  12305. + sizeof(struct qman_ccgrs), GFP_KERNEL);
  12306. + if (!portal->ccgrs[ret])
  12307. + goto fail_ccgrs;
  12308. + qman_ccgrs_init(&portal->ccgrs[ret][1]);
  12309. + qman_ccgrs_fill(&portal->ccgrs[ret][0]);
  12310. + INIT_LIST_HEAD(&portal->ccgr_cbs[ret]);
  12311. + }
  12312. + }
  12313. + spin_lock_init(&portal->ccgr_lock);
  12314. + portal->bits = 0;
  12315. + portal->slowpoll = 0;
  12316. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  12317. + portal->eqci_owned = NULL;
  12318. +#endif
  12319. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  12320. + raw_spin_lock_init(&portal->sharing_lock);
  12321. + portal->is_shared = config->public_cfg.is_shared;
  12322. + portal->sharing_redirect = NULL;
  12323. +#endif
  12324. + portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
  12325. + QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
  12326. + QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
  12327. + portal->dqrr_disable_ref = 0;
  12328. + portal->cb_dc_ern = NULL;
  12329. + sprintf(buf, "qportal-%d", config->public_cfg.channel);
  12330. + portal->pdev = platform_device_alloc(buf, -1);
  12331. + if (!portal->pdev) {
  12332. + pr_err("qman_portal - platform_device_alloc() failed\n");
  12333. + goto fail_devalloc;
  12334. + }
  12335. +#ifdef CONFIG_ARM
  12336. + portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
  12337. + portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask;
  12338. +#else
  12339. + if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) {
  12340. + pr_err("qman_portal - dma_set_mask() failed\n");
  12341. + goto fail_devadd;
  12342. + }
  12343. +#endif
  12344. + portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain;
  12345. + portal->pdev->dev.platform_data = portal;
  12346. + ret = platform_device_add(portal->pdev);
  12347. + if (ret) {
  12348. + pr_err("qman_portal - platform_device_add() failed\n");
  12349. + goto fail_devadd;
  12350. + }
  12351. + dpa_rbtree_init(&portal->retire_table);
  12352. + isdr = 0xffffffff;
  12353. + qm_isr_disable_write(__p, isdr);
  12354. + portal->irq_sources = 0;
  12355. + qm_isr_enable_write(__p, portal->irq_sources);
  12356. + qm_isr_status_clear(__p, 0xffffffff);
  12357. + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
  12358. + if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
  12359. + portal)) {
  12360. + pr_err("request_irq() failed\n");
  12361. + goto fail_irq;
  12362. + }
  12363. + if ((config->public_cfg.cpu != -1) &&
  12364. + irq_can_set_affinity(config->public_cfg.irq) &&
  12365. + irq_set_affinity(config->public_cfg.irq,
  12366. + cpumask_of(config->public_cfg.cpu))) {
  12367. + pr_err("irq_set_affinity() failed\n");
  12368. + goto fail_affinity;
  12369. + }
  12370. +
  12371. + /* Need EQCR to be empty before continuing */
  12372. + isdr ^= QM_PIRQ_EQCI;
  12373. + qm_isr_disable_write(__p, isdr);
  12374. + ret = qm_eqcr_get_fill(__p);
  12375. + if (ret) {
  12376. + pr_err("Qman EQCR unclean\n");
  12377. + goto fail_eqcr_empty;
  12378. + }
  12379. + isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
  12380. + qm_isr_disable_write(__p, isdr);
  12381. + while (qm_dqrr_current(__p) != NULL)
  12382. + qm_dqrr_cdc_consume_n(__p, 0xffff);
  12383. + drain_mr(__p);
  12384. + /* Success */
  12385. + portal->config = config;
  12386. + qm_isr_disable_write(__p, 0);
  12387. + qm_isr_uninhibit(__p);
  12388. + /* Write a sane SDQCR */
  12389. + qm_dqrr_sdqcr_set(__p, portal->sdqcr);
  12390. + return portal;
  12391. +fail_eqcr_empty:
  12392. +fail_affinity:
  12393. + free_irq(config->public_cfg.irq, portal);
  12394. +fail_irq:
  12395. + platform_device_del(portal->pdev);
  12396. +fail_devadd:
  12397. + platform_device_put(portal->pdev);
  12398. +fail_devalloc:
  12399. + if (num_ceetms)
  12400. + for (ret = 0; ret < num_ceetms; ret++)
  12401. + kfree(portal->ccgrs[ret]);
  12402. +fail_ccgrs:
  12403. + kfree(portal->cgrs);
  12404. +fail_cgrs:
  12405. + qm_isr_finish(__p);
  12406. +fail_isr:
  12407. + qm_mc_finish(__p);
  12408. +fail_mc:
  12409. + qm_mr_finish(__p);
  12410. +fail_mr:
  12411. + qm_dqrr_finish(__p);
  12412. +fail_dqrr:
  12413. + qm_eqcr_finish(__p);
  12414. +fail_eqcr:
  12415. + if (portal->alloced)
  12416. + kfree(portal);
  12417. + return NULL;
  12418. +}
  12419. +
  12420. +struct qman_portal *qman_create_affine_portal(
  12421. + const struct qm_portal_config *config,
  12422. + const struct qman_cgrs *cgrs)
  12423. +{
  12424. + struct qman_portal *res;
  12425. + struct qman_portal *portal;
  12426. +
  12427. + portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
  12428. + res = qman_create_portal(portal, config, cgrs);
  12429. + if (res) {
  12430. + spin_lock(&affine_mask_lock);
  12431. + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
  12432. + affine_channels[config->public_cfg.cpu] =
  12433. + config->public_cfg.channel;
  12434. + affine_portals[config->public_cfg.cpu] = portal;
  12435. + spin_unlock(&affine_mask_lock);
  12436. + }
  12437. + return res;
  12438. +}
  12439. +
  12440. +/* These checks are BUG_ON()s because the driver is already supposed to avoid
  12441. + * these cases. */
  12442. +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
  12443. + int cpu)
  12444. +{
  12445. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  12446. + struct qman_portal *p;
  12447. + p = &per_cpu(qman_affine_portal, cpu);
  12448. + /* Check that we don't already have our own portal */
  12449. + BUG_ON(p->config);
  12450. + /* Check that we aren't already slaving to another portal */
  12451. + BUG_ON(p->is_shared);
  12452. + /* Check that 'redirect' is prepared to have us */
  12453. + BUG_ON(!redirect->config->public_cfg.is_shared);
  12454. + /* These are the only elements to initialise when redirecting */
  12455. + p->irq_sources = 0;
  12456. + p->sharing_redirect = redirect;
  12457. + affine_portals[cpu] = p;
  12458. + return p;
  12459. +#else
  12460. + BUG();
  12461. + return NULL;
  12462. +#endif
  12463. +}
  12464. +
  12465. +void qman_destroy_portal(struct qman_portal *qm)
  12466. +{
  12467. + const struct qm_portal_config *pcfg;
  12468. + int i;
  12469. +
  12470. + /* Stop dequeues on the portal */
  12471. + qm_dqrr_sdqcr_set(&qm->p, 0);
  12472. +
  12473. + /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
  12474. + * something related to QM_PIRQ_EQCI, this may need fixing.
  12475. + * Also, due to the prefetching model used for CI updates in the enqueue
  12476. + * path, this update will only invalidate the CI cacheline *after*
  12477. + * working on it, so we need to call this twice to ensure a full update
  12478. + * irrespective of where the enqueue processing was at when the teardown
  12479. + * began. */
  12480. + qm_eqcr_cce_update(&qm->p);
  12481. + qm_eqcr_cce_update(&qm->p);
  12482. + pcfg = qm->config;
  12483. +
  12484. + free_irq(pcfg->public_cfg.irq, qm);
  12485. +
  12486. + kfree(qm->cgrs);
  12487. + if (num_ceetms)
  12488. + for (i = 0; i < num_ceetms; i++)
  12489. + kfree(qm->ccgrs[i]);
  12490. + qm_isr_finish(&qm->p);
  12491. + qm_mc_finish(&qm->p);
  12492. + qm_mr_finish(&qm->p);
  12493. + qm_dqrr_finish(&qm->p);
  12494. + qm_eqcr_finish(&qm->p);
  12495. +
  12496. + platform_device_del(qm->pdev);
  12497. + platform_device_put(qm->pdev);
  12498. +
  12499. + qm->config = NULL;
  12500. + if (qm->alloced)
  12501. + kfree(qm);
  12502. +}
  12503. +
  12504. +const struct qm_portal_config *qman_destroy_affine_portal(void)
  12505. +{
  12506. + /* We don't want to redirect if we're a slave, use "raw" */
  12507. + struct qman_portal *qm = get_raw_affine_portal();
  12508. + const struct qm_portal_config *pcfg;
  12509. + int cpu;
  12510. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  12511. + if (qm->sharing_redirect) {
  12512. + qm->sharing_redirect = NULL;
  12513. + put_affine_portal();
  12514. + return NULL;
  12515. + }
  12516. + qm->is_shared = 0;
  12517. +#endif
  12518. + pcfg = qm->config;
  12519. + cpu = pcfg->public_cfg.cpu;
  12520. +
  12521. + qman_destroy_portal(qm);
  12522. +
  12523. + spin_lock(&affine_mask_lock);
  12524. + cpumask_clear_cpu(cpu, &affine_mask);
  12525. + spin_unlock(&affine_mask_lock);
  12526. + put_affine_portal();
  12527. + return pcfg;
  12528. +}
  12529. +
  12530. +const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
  12531. +{
  12532. + return &p->config->public_cfg;
  12533. +}
  12534. +EXPORT_SYMBOL(qman_p_get_portal_config);
  12535. +
  12536. +const struct qman_portal_config *qman_get_portal_config(void)
  12537. +{
  12538. + struct qman_portal *p = get_affine_portal();
  12539. + const struct qman_portal_config *ret = qman_p_get_portal_config(p);
  12540. + put_affine_portal();
  12541. + return ret;
  12542. +}
  12543. +EXPORT_SYMBOL(qman_get_portal_config);
  12544. +
  12545. +/* Inline helper to reduce nesting in __poll_portal_slow() */
  12546. +static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
  12547. + const struct qm_mr_entry *msg, u8 verb)
  12548. +{
  12549. + FQLOCK(fq);
  12550. + switch (verb) {
  12551. + case QM_MR_VERB_FQRL:
  12552. + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
  12553. + fq_clear(fq, QMAN_FQ_STATE_ORL);
  12554. + table_del_fq(p, fq);
  12555. + break;
  12556. + case QM_MR_VERB_FQRN:
  12557. + DPA_ASSERT((fq->state == qman_fq_state_parked) ||
  12558. + (fq->state == qman_fq_state_sched));
  12559. + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
  12560. + fq_clear(fq, QMAN_FQ_STATE_CHANGING);
  12561. + if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
  12562. + fq_set(fq, QMAN_FQ_STATE_NE);
  12563. + if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
  12564. + fq_set(fq, QMAN_FQ_STATE_ORL);
  12565. + else
  12566. + table_del_fq(p, fq);
  12567. + fq->state = qman_fq_state_retired;
  12568. + break;
  12569. + case QM_MR_VERB_FQPN:
  12570. + DPA_ASSERT(fq->state == qman_fq_state_sched);
  12571. + DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
  12572. + fq->state = qman_fq_state_parked;
  12573. + }
  12574. + FQUNLOCK(fq);
  12575. +}
  12576. +
  12577. +static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
  12578. +{
  12579. + const struct qm_mr_entry *msg;
  12580. +
  12581. + if (is & QM_PIRQ_CSCI) {
  12582. + struct qman_cgrs rr, c;
  12583. + struct qm_mc_result *mcr;
  12584. + struct qman_cgr *cgr;
  12585. + unsigned long irqflags __maybe_unused;
  12586. +
  12587. + spin_lock_irqsave(&p->cgr_lock, irqflags);
  12588. + /*
  12589. + * The CSCI bit must be cleared _before_ issuing the
  12590. + * Query Congestion State command, to ensure that a long
  12591. + * CGR State Change callback cannot miss an intervening
  12592. + * state change.
  12593. + */
  12594. + qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
  12595. + qm_mc_start(&p->p);
  12596. + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
  12597. + while (!(mcr = qm_mc_result(&p->p)))
  12598. + cpu_relax();
  12599. + /* mask out the ones I'm not interested in */
  12600. + qman_cgrs_and(&rr, (const struct qman_cgrs *)
  12601. + &mcr->querycongestion.state, &p->cgrs[0]);
  12602. + /* check previous snapshot for delta, enter/exit congestion */
  12603. + qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
  12604. + /* update snapshot */
  12605. + qman_cgrs_cp(&p->cgrs[1], &rr);
  12606. + /* Invoke callback */
  12607. + list_for_each_entry(cgr, &p->cgr_cbs, node)
  12608. + if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
  12609. + cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
  12610. + spin_unlock_irqrestore(&p->cgr_lock, irqflags);
  12611. + }
  12612. + if (is & QM_PIRQ_CCSCI) {
  12613. + struct qman_ccgrs rr, c, congestion_result;
  12614. + struct qm_mc_result *mcr;
  12615. + struct qm_mc_command *mcc;
  12616. + struct qm_ceetm_ccg *ccg;
  12617. + unsigned long irqflags __maybe_unused;
  12618. + int i, j, k;
  12619. +
  12620. + spin_lock_irqsave(&p->ccgr_lock, irqflags);
  12621. + /*
  12622. + * The CCSCI bit must be cleared _before_ issuing the
  12623. + * Query Congestion State command, to ensure that a long
  12624. + * CCGR State Change callback cannot miss an intervening
  12625. + * state change.
  12626. + */
  12627. + qm_isr_status_clear(&p->p, QM_PIRQ_CCSCI);
  12628. +
  12629. + for (i = 0; i < num_ceetms; i++) {
  12630. + for (j = 0; j < 2; j++) {
  12631. + mcc = qm_mc_start(&p->p);
  12632. + mcc->ccgr_query.ccgrid = cpu_to_be16(
  12633. + CEETM_QUERY_CONGESTION_STATE | j);
  12634. + mcc->ccgr_query.dcpid = i;
  12635. + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
  12636. + while (!(mcr = qm_mc_result(&p->p)))
  12637. + cpu_relax();
  12638. + for (k = 0; k < 8; k++)
  12639. + mcr->ccgr_query.congestion_state.state.
  12640. + __state[k] = be32_to_cpu(
  12641. + mcr->ccgr_query.
  12642. + congestion_state.state.
  12643. + __state[k]);
  12644. + congestion_result.q[j] =
  12645. + mcr->ccgr_query.congestion_state.state;
  12646. + }
  12647. + /* mask out the ones I'm not interested in */
  12648. + qman_ccgrs_and(&rr, &congestion_result,
  12649. + &p->ccgrs[i][0]);
  12650. + /*
  12651. + * check previous snapshot for delta, enter/exit
  12652. + * congestion.
  12653. + */
  12654. + qman_ccgrs_xor(&c, &rr, &p->ccgrs[i][1]);
  12655. + /* update snapshot */
  12656. + qman_ccgrs_cp(&p->ccgrs[i][1], &rr);
  12657. + /* Invoke callback */
  12658. + list_for_each_entry(ccg, &p->ccgr_cbs[i], cb_node)
  12659. + if (ccg->cb && qman_ccgrs_get(&c,
  12660. + (ccg->parent->idx << 4) | ccg->idx))
  12661. + ccg->cb(ccg, ccg->cb_ctx,
  12662. + qman_ccgrs_get(&rr,
  12663. + (ccg->parent->idx << 4)
  12664. + | ccg->idx));
  12665. + }
  12666. + spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
  12667. + }
  12668. +
  12669. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  12670. + if (is & QM_PIRQ_EQCI) {
  12671. + unsigned long irqflags;
  12672. + PORTAL_IRQ_LOCK(p, irqflags);
  12673. + p->eqci_owned = NULL;
  12674. + PORTAL_IRQ_UNLOCK(p, irqflags);
  12675. + wake_up(&affine_queue);
  12676. + }
  12677. +#endif
  12678. +
  12679. + if (is & QM_PIRQ_EQRI) {
  12680. + unsigned long irqflags __maybe_unused;
  12681. + PORTAL_IRQ_LOCK(p, irqflags);
  12682. + qm_eqcr_cce_update(&p->p);
  12683. + qm_eqcr_set_ithresh(&p->p, 0);
  12684. + PORTAL_IRQ_UNLOCK(p, irqflags);
  12685. + wake_up(&affine_queue);
  12686. + }
  12687. +
  12688. + if (is & QM_PIRQ_MRI) {
  12689. + struct qman_fq *fq;
  12690. + u8 verb, num = 0;
  12691. +mr_loop:
  12692. + qm_mr_pvb_update(&p->p);
  12693. + msg = qm_mr_current(&p->p);
  12694. + if (!msg)
  12695. + goto mr_done;
  12696. + verb = msg->verb & QM_MR_VERB_TYPE_MASK;
  12697. + /* The message is a software ERN iff the 0x20 bit is set */
  12698. + if (verb & 0x20) {
  12699. + switch (verb) {
  12700. + case QM_MR_VERB_FQRNI:
  12701. + /* nada, we drop FQRNIs on the floor */
  12702. + break;
  12703. + case QM_MR_VERB_FQRN:
  12704. + case QM_MR_VERB_FQRL:
  12705. + /* Lookup in the retirement table */
  12706. + fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid));
  12707. + BUG_ON(!fq);
  12708. + fq_state_change(p, fq, msg, verb);
  12709. + if (fq->cb.fqs)
  12710. + fq->cb.fqs(p, fq, msg);
  12711. + break;
  12712. + case QM_MR_VERB_FQPN:
  12713. + /* Parked */
  12714. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  12715. + fq = get_fq_table_entry(
  12716. + be32_to_cpu(msg->fq.contextB));
  12717. +#else
  12718. + fq = (void *)(uintptr_t)
  12719. + be32_to_cpu(msg->fq.contextB);
  12720. +#endif
  12721. + fq_state_change(p, fq, msg, verb);
  12722. + if (fq->cb.fqs)
  12723. + fq->cb.fqs(p, fq, msg);
  12724. + break;
  12725. + case QM_MR_VERB_DC_ERN:
  12726. + /* DCP ERN */
  12727. + if (p->cb_dc_ern)
  12728. + p->cb_dc_ern(p, msg);
  12729. + else if (cb_dc_ern)
  12730. + cb_dc_ern(p, msg);
  12731. + else {
  12732. + static int warn_once;
  12733. + if (!warn_once) {
  12734. + pr_crit("Leaking DCP ERNs!\n");
  12735. + warn_once = 1;
  12736. + }
  12737. + }
  12738. + break;
  12739. + default:
  12740. + pr_crit("Invalid MR verb 0x%02x\n", verb);
  12741. + }
  12742. + } else {
  12743. + /* Its a software ERN */
  12744. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  12745. + pr_info("ROY\n");
  12746. + fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
  12747. +#else
  12748. + fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
  12749. +#endif
  12750. + fq->cb.ern(p, fq, msg);
  12751. + }
  12752. + num++;
  12753. + qm_mr_next(&p->p);
  12754. + goto mr_loop;
  12755. +mr_done:
  12756. + qm_mr_cci_consume(&p->p, num);
  12757. + }
  12758. + /*
  12759. + * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
  12760. + * processing. If that interrupt source has meanwhile been re-asserted,
  12761. + * we mustn't clear it here (or in the top-level interrupt handler).
  12762. + */
  12763. + return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
  12764. +}
  12765. +
  12766. +/* remove some slowish-path stuff from the "fast path" and make sure it isn't
  12767. + * inlined. */
  12768. +static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
  12769. +{
  12770. + p->vdqcr_owned = NULL;
  12771. + FQLOCK(fq);
  12772. + fq_clear(fq, QMAN_FQ_STATE_VDQCR);
  12773. + FQUNLOCK(fq);
  12774. + wake_up(&affine_queue);
  12775. +}
  12776. +
  12777. +/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
  12778. + * that would conflict with other things if they ran at the same time on the
  12779. + * same cpu are;
  12780. + *
  12781. + * (i) setting/clearing vdqcr_owned, and
  12782. + * (ii) clearing the NE (Not Empty) flag.
  12783. + *
  12784. + * Both are safe. Because;
  12785. + *
  12786. + * (i) this clearing can only occur after qman_volatile_dequeue() has set the
  12787. + * vdqcr_owned field (which it does before setting VDQCR), and
  12788. + * qman_volatile_dequeue() blocks interrupts and preemption while this is
  12789. + * done so that we can't interfere.
  12790. + * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
  12791. + * with (i) that API prevents us from interfering until it's safe.
  12792. + *
  12793. + * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
  12794. + * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
  12795. + * advantage comes from this function not having to "lock" anything at all.
  12796. + *
  12797. + * Note also that the callbacks are invoked at points which are safe against the
  12798. + * above potential conflicts, but that this function itself is not re-entrant
  12799. + * (this is because the function tracks one end of each FIFO in the portal and
  12800. + * we do *not* want to lock that). So the consequence is that it is safe for
  12801. + * user callbacks to call into any Qman API *except* qman_poll() (as that's the
  12802. + * sole API that could be invoking the callback through this function).
  12803. + */
  12804. +static inline unsigned int __poll_portal_fast(struct qman_portal *p,
  12805. + unsigned int poll_limit)
  12806. +{
  12807. + const struct qm_dqrr_entry *dq;
  12808. + struct qman_fq *fq;
  12809. + enum qman_cb_dqrr_result res;
  12810. + unsigned int limit = 0;
  12811. +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  12812. + struct qm_dqrr_entry *shadow;
  12813. +#endif
  12814. +loop:
  12815. + qm_dqrr_pvb_update(&p->p);
  12816. + dq = qm_dqrr_current(&p->p);
  12817. + if (!dq)
  12818. + goto done;
  12819. +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  12820. + /* If running on an LE system the fields of the
  12821. + dequeue entry must be swapped. Because the
  12822. + QMan HW will ignore writes the DQRR entry is
  12823. + copied and the index stored within the copy */
  12824. + shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
  12825. + *shadow = *dq;
  12826. + dq = shadow;
  12827. + shadow->fqid = be32_to_cpu(shadow->fqid);
  12828. + shadow->contextB = be32_to_cpu(shadow->contextB);
  12829. + shadow->seqnum = be16_to_cpu(shadow->seqnum);
  12830. + hw_fd_to_cpu(&shadow->fd);
  12831. +#endif
  12832. + if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
  12833. + /* VDQCR: don't trust contextB as the FQ may have been
  12834. + * configured for h/w consumption and we're draining it
  12835. + * post-retirement. */
  12836. + fq = p->vdqcr_owned;
  12837. + /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
  12838. + * to check for clearing it when doing volatile dequeues. It's
  12839. + * one less thing to check in the critical path (SDQCR). */
  12840. + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
  12841. + fq_clear(fq, QMAN_FQ_STATE_NE);
  12842. + /* this is duplicated from the SDQCR code, but we have stuff to
  12843. + * do before *and* after this callback, and we don't want
  12844. + * multiple if()s in the critical path (SDQCR). */
  12845. + res = fq->cb.dqrr(p, fq, dq);
  12846. + if (res == qman_cb_dqrr_stop)
  12847. + goto done;
  12848. + /* Check for VDQCR completion */
  12849. + if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
  12850. + clear_vdqcr(p, fq);
  12851. + } else {
  12852. + /* SDQCR: contextB points to the FQ */
  12853. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  12854. + fq = get_fq_table_entry(dq->contextB);
  12855. +#else
  12856. + fq = (void *)(uintptr_t)dq->contextB;
  12857. +#endif
  12858. + /* Now let the callback do its stuff */
  12859. + res = fq->cb.dqrr(p, fq, dq);
  12860. +
  12861. + /* The callback can request that we exit without consuming this
  12862. + * entry nor advancing; */
  12863. + if (res == qman_cb_dqrr_stop)
  12864. + goto done;
  12865. + }
  12866. + /* Interpret 'dq' from a driver perspective. */
  12867. + /* Parking isn't possible unless HELDACTIVE was set. NB,
  12868. + * FORCEELIGIBLE implies HELDACTIVE, so we only need to
  12869. + * check for HELDACTIVE to cover both. */
  12870. + DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
  12871. + (res != qman_cb_dqrr_park));
  12872. + /* Defer just means "skip it, I'll consume it myself later on" */
  12873. + if (res != qman_cb_dqrr_defer)
  12874. + qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
  12875. + /* Move forward */
  12876. + qm_dqrr_next(&p->p);
  12877. + /* Entry processed and consumed, increment our counter. The callback can
  12878. + * request that we exit after consuming the entry, and we also exit if
  12879. + * we reach our processing limit, so loop back only if neither of these
  12880. + * conditions is met. */
  12881. + if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
  12882. + goto loop;
  12883. +done:
  12884. + return limit;
  12885. +}
  12886. +
  12887. +u32 qman_irqsource_get(void)
  12888. +{
  12889. + /* "irqsource" and "poll" APIs mustn't redirect when sharing, they
  12890. + * should shut the user out if they are not the primary CPU hosting the
  12891. + * portal. That's why we use the "raw" interface. */
  12892. + struct qman_portal *p = get_raw_affine_portal();
  12893. + u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
  12894. + put_affine_portal();
  12895. + return ret;
  12896. +}
  12897. +EXPORT_SYMBOL(qman_irqsource_get);
  12898. +
  12899. +int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
  12900. +{
  12901. + __maybe_unused unsigned long irqflags;
  12902. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  12903. + if (p->sharing_redirect)
  12904. + return -EINVAL;
  12905. + else
  12906. +#endif
  12907. + {
  12908. + PORTAL_IRQ_LOCK(p, irqflags);
  12909. + set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
  12910. + qm_isr_enable_write(&p->p, p->irq_sources);
  12911. + PORTAL_IRQ_UNLOCK(p, irqflags);
  12912. + }
  12913. + return 0;
  12914. +}
  12915. +EXPORT_SYMBOL(qman_p_irqsource_add);
  12916. +
  12917. +int qman_irqsource_add(u32 bits __maybe_unused)
  12918. +{
  12919. + struct qman_portal *p = get_raw_affine_portal();
  12920. + int ret;
  12921. + ret = qman_p_irqsource_add(p, bits);
  12922. + put_affine_portal();
  12923. + return ret;
  12924. +}
  12925. +EXPORT_SYMBOL(qman_irqsource_add);
  12926. +
  12927. +int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
  12928. +{
  12929. + __maybe_unused unsigned long irqflags;
  12930. + u32 ier;
  12931. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  12932. + if (p->sharing_redirect) {
  12933. + put_affine_portal();
  12934. + return -EINVAL;
  12935. + }
  12936. +#endif
  12937. + /* Our interrupt handler only processes+clears status register bits that
  12938. + * are in p->irq_sources. As we're trimming that mask, if one of them
  12939. + * were to assert in the status register just before we remove it from
  12940. + * the enable register, there would be an interrupt-storm when we
  12941. + * release the IRQ lock. So we wait for the enable register update to
  12942. + * take effect in h/w (by reading it back) and then clear all other bits
  12943. + * in the status register. Ie. we clear them from ISR once it's certain
  12944. + * IER won't allow them to reassert. */
  12945. + PORTAL_IRQ_LOCK(p, irqflags);
  12946. + bits &= QM_PIRQ_VISIBLE;
  12947. + clear_bits(bits, &p->irq_sources);
  12948. + qm_isr_enable_write(&p->p, p->irq_sources);
  12949. +
  12950. + ier = qm_isr_enable_read(&p->p);
  12951. + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
  12952. + * data-dependency, ie. to protect against re-ordering. */
  12953. + qm_isr_status_clear(&p->p, ~ier);
  12954. + PORTAL_IRQ_UNLOCK(p, irqflags);
  12955. + return 0;
  12956. +}
  12957. +EXPORT_SYMBOL(qman_p_irqsource_remove);
  12958. +
  12959. +int qman_irqsource_remove(u32 bits)
  12960. +{
  12961. + struct qman_portal *p = get_raw_affine_portal();
  12962. + int ret;
  12963. + ret = qman_p_irqsource_remove(p, bits);
  12964. + put_affine_portal();
  12965. + return ret;
  12966. +}
  12967. +EXPORT_SYMBOL(qman_irqsource_remove);
  12968. +
  12969. +const cpumask_t *qman_affine_cpus(void)
  12970. +{
  12971. + return &affine_mask;
  12972. +}
  12973. +EXPORT_SYMBOL(qman_affine_cpus);
  12974. +
  12975. +u16 qman_affine_channel(int cpu)
  12976. +{
  12977. + if (cpu < 0) {
  12978. + struct qman_portal *portal = get_raw_affine_portal();
  12979. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  12980. + BUG_ON(portal->sharing_redirect);
  12981. +#endif
  12982. + cpu = portal->config->public_cfg.cpu;
  12983. + put_affine_portal();
  12984. + }
  12985. + BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
  12986. + return affine_channels[cpu];
  12987. +}
  12988. +EXPORT_SYMBOL(qman_affine_channel);
  12989. +
  12990. +void *qman_get_affine_portal(int cpu)
  12991. +{
  12992. + return affine_portals[cpu];
  12993. +}
  12994. +EXPORT_SYMBOL(qman_get_affine_portal);
  12995. +
  12996. +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
  12997. +{
  12998. + int ret;
  12999. +
  13000. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  13001. + if (unlikely(p->sharing_redirect))
  13002. + ret = -EINVAL;
  13003. + else
  13004. +#endif
  13005. + {
  13006. + BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
  13007. + ret = __poll_portal_fast(p, limit);
  13008. + }
  13009. + return ret;
  13010. +}
  13011. +EXPORT_SYMBOL(qman_p_poll_dqrr);
  13012. +
  13013. +int qman_poll_dqrr(unsigned int limit)
  13014. +{
  13015. + struct qman_portal *p = get_poll_portal();
  13016. + int ret;
  13017. + ret = qman_p_poll_dqrr(p, limit);
  13018. + put_poll_portal();
  13019. + return ret;
  13020. +}
  13021. +EXPORT_SYMBOL(qman_poll_dqrr);
  13022. +
  13023. +u32 qman_p_poll_slow(struct qman_portal *p)
  13024. +{
  13025. + u32 ret;
  13026. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  13027. + if (unlikely(p->sharing_redirect))
  13028. + ret = (u32)-1;
  13029. + else
  13030. +#endif
  13031. + {
  13032. + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
  13033. + ret = __poll_portal_slow(p, is);
  13034. + qm_isr_status_clear(&p->p, ret);
  13035. + }
  13036. + return ret;
  13037. +}
  13038. +EXPORT_SYMBOL(qman_p_poll_slow);
  13039. +
  13040. +u32 qman_poll_slow(void)
  13041. +{
  13042. + struct qman_portal *p = get_poll_portal();
  13043. + u32 ret;
  13044. + ret = qman_p_poll_slow(p);
  13045. + put_poll_portal();
  13046. + return ret;
  13047. +}
  13048. +EXPORT_SYMBOL(qman_poll_slow);
  13049. +
  13050. +/* Legacy wrapper */
  13051. +void qman_p_poll(struct qman_portal *p)
  13052. +{
  13053. +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
  13054. + if (unlikely(p->sharing_redirect))
  13055. + return;
  13056. +#endif
  13057. + if ((~p->irq_sources) & QM_PIRQ_SLOW) {
  13058. + if (!(p->slowpoll--)) {
  13059. + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
  13060. + u32 active = __poll_portal_slow(p, is);
  13061. + if (active) {
  13062. + qm_isr_status_clear(&p->p, active);
  13063. + p->slowpoll = SLOW_POLL_BUSY;
  13064. + } else
  13065. + p->slowpoll = SLOW_POLL_IDLE;
  13066. + }
  13067. + }
  13068. + if ((~p->irq_sources) & QM_PIRQ_DQRI)
  13069. + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
  13070. +}
  13071. +EXPORT_SYMBOL(qman_p_poll);
  13072. +
  13073. +void qman_poll(void)
  13074. +{
  13075. + struct qman_portal *p = get_poll_portal();
  13076. + qman_p_poll(p);
  13077. + put_poll_portal();
  13078. +}
  13079. +EXPORT_SYMBOL(qman_poll);
  13080. +
  13081. +void qman_p_stop_dequeues(struct qman_portal *p)
  13082. +{
  13083. + qman_stop_dequeues_ex(p);
  13084. +}
  13085. +EXPORT_SYMBOL(qman_p_stop_dequeues);
  13086. +
  13087. +void qman_stop_dequeues(void)
  13088. +{
  13089. + struct qman_portal *p = get_affine_portal();
  13090. + qman_p_stop_dequeues(p);
  13091. + put_affine_portal();
  13092. +}
  13093. +EXPORT_SYMBOL(qman_stop_dequeues);
  13094. +
  13095. +void qman_p_start_dequeues(struct qman_portal *p)
  13096. +{
  13097. + unsigned long irqflags __maybe_unused;
  13098. + PORTAL_IRQ_LOCK(p, irqflags);
  13099. + DPA_ASSERT(p->dqrr_disable_ref > 0);
  13100. + if (!(--p->dqrr_disable_ref))
  13101. + qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
  13102. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13103. +}
  13104. +EXPORT_SYMBOL(qman_p_start_dequeues);
  13105. +
  13106. +void qman_start_dequeues(void)
  13107. +{
  13108. + struct qman_portal *p = get_affine_portal();
  13109. + qman_p_start_dequeues(p);
  13110. + put_affine_portal();
  13111. +}
  13112. +EXPORT_SYMBOL(qman_start_dequeues);
  13113. +
  13114. +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
  13115. +{
  13116. + unsigned long irqflags __maybe_unused;
  13117. + PORTAL_IRQ_LOCK(p, irqflags);
  13118. + pools &= p->config->public_cfg.pools;
  13119. + p->sdqcr |= pools;
  13120. + qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
  13121. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13122. +}
  13123. +EXPORT_SYMBOL(qman_p_static_dequeue_add);
  13124. +
  13125. +void qman_static_dequeue_add(u32 pools)
  13126. +{
  13127. + struct qman_portal *p = get_affine_portal();
  13128. + qman_p_static_dequeue_add(p, pools);
  13129. + put_affine_portal();
  13130. +}
  13131. +EXPORT_SYMBOL(qman_static_dequeue_add);
  13132. +
  13133. +void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
  13134. +{
  13135. + unsigned long irqflags __maybe_unused;
  13136. + PORTAL_IRQ_LOCK(p, irqflags);
  13137. + pools &= p->config->public_cfg.pools;
  13138. + p->sdqcr &= ~pools;
  13139. + qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
  13140. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13141. +}
  13142. +EXPORT_SYMBOL(qman_p_static_dequeue_del);
  13143. +
  13144. +void qman_static_dequeue_del(u32 pools)
  13145. +{
  13146. + struct qman_portal *p = get_affine_portal();
  13147. + qman_p_static_dequeue_del(p, pools);
  13148. + put_affine_portal();
  13149. +}
  13150. +EXPORT_SYMBOL(qman_static_dequeue_del);
  13151. +
  13152. +u32 qman_p_static_dequeue_get(struct qman_portal *p)
  13153. +{
  13154. + return p->sdqcr;
  13155. +}
  13156. +EXPORT_SYMBOL(qman_p_static_dequeue_get);
  13157. +
  13158. +u32 qman_static_dequeue_get(void)
  13159. +{
  13160. + struct qman_portal *p = get_affine_portal();
  13161. + u32 ret = qman_p_static_dequeue_get(p);
  13162. + put_affine_portal();
  13163. + return ret;
  13164. +}
  13165. +EXPORT_SYMBOL(qman_static_dequeue_get);
  13166. +
  13167. +void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
  13168. + int park_request)
  13169. +{
  13170. + qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
  13171. +}
  13172. +EXPORT_SYMBOL(qman_p_dca);
  13173. +
  13174. +void qman_dca(struct qm_dqrr_entry *dq, int park_request)
  13175. +{
  13176. + struct qman_portal *p = get_affine_portal();
  13177. + qman_p_dca(p, dq, park_request);
  13178. + put_affine_portal();
  13179. +}
  13180. +EXPORT_SYMBOL(qman_dca);
  13181. +
  13182. +/*******************/
  13183. +/* Frame queue API */
  13184. +/*******************/
  13185. +
  13186. +static const char *mcr_result_str(u8 result)
  13187. +{
  13188. + switch (result) {
  13189. + case QM_MCR_RESULT_NULL:
  13190. + return "QM_MCR_RESULT_NULL";
  13191. + case QM_MCR_RESULT_OK:
  13192. + return "QM_MCR_RESULT_OK";
  13193. + case QM_MCR_RESULT_ERR_FQID:
  13194. + return "QM_MCR_RESULT_ERR_FQID";
  13195. + case QM_MCR_RESULT_ERR_FQSTATE:
  13196. + return "QM_MCR_RESULT_ERR_FQSTATE";
  13197. + case QM_MCR_RESULT_ERR_NOTEMPTY:
  13198. + return "QM_MCR_RESULT_ERR_NOTEMPTY";
  13199. + case QM_MCR_RESULT_PENDING:
  13200. + return "QM_MCR_RESULT_PENDING";
  13201. + case QM_MCR_RESULT_ERR_BADCOMMAND:
  13202. + return "QM_MCR_RESULT_ERR_BADCOMMAND";
  13203. + }
  13204. + return "<unknown MCR result>";
  13205. +}
  13206. +
  13207. +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
  13208. +{
  13209. + struct qm_fqd fqd;
  13210. + struct qm_mcr_queryfq_np np;
  13211. + struct qm_mc_command *mcc;
  13212. + struct qm_mc_result *mcr;
  13213. + struct qman_portal *p;
  13214. + unsigned long irqflags __maybe_unused;
  13215. +
  13216. + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
  13217. + int ret = qman_alloc_fqid(&fqid);
  13218. + if (ret)
  13219. + return ret;
  13220. + }
  13221. + spin_lock_init(&fq->fqlock);
  13222. + fq->fqid = fqid;
  13223. + fq->flags = flags;
  13224. + fq->state = qman_fq_state_oos;
  13225. + fq->cgr_groupid = 0;
  13226. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  13227. + if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
  13228. + return -ENOMEM;
  13229. +#endif
  13230. + if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
  13231. + return 0;
  13232. + /* Everything else is AS_IS support */
  13233. + p = get_affine_portal();
  13234. + PORTAL_IRQ_LOCK(p, irqflags);
  13235. + mcc = qm_mc_start(&p->p);
  13236. + mcc->queryfq.fqid = cpu_to_be32(fqid);
  13237. + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
  13238. + while (!(mcr = qm_mc_result(&p->p)))
  13239. + cpu_relax();
  13240. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
  13241. + if (mcr->result != QM_MCR_RESULT_OK) {
  13242. + pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
  13243. + goto err;
  13244. + }
  13245. + fqd = mcr->queryfq.fqd;
  13246. + hw_fqd_to_cpu(&fqd);
  13247. + mcc = qm_mc_start(&p->p);
  13248. + mcc->queryfq_np.fqid = cpu_to_be32(fqid);
  13249. + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
  13250. + while (!(mcr = qm_mc_result(&p->p)))
  13251. + cpu_relax();
  13252. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
  13253. + if (mcr->result != QM_MCR_RESULT_OK) {
  13254. + pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
  13255. + goto err;
  13256. + }
  13257. + np = mcr->queryfq_np;
  13258. + /* Phew, have queryfq and queryfq_np results, stitch together
  13259. + * the FQ object from those. */
  13260. + fq->cgr_groupid = fqd.cgid;
  13261. + switch (np.state & QM_MCR_NP_STATE_MASK) {
  13262. + case QM_MCR_NP_STATE_OOS:
  13263. + break;
  13264. + case QM_MCR_NP_STATE_RETIRED:
  13265. + fq->state = qman_fq_state_retired;
  13266. + if (np.frm_cnt)
  13267. + fq_set(fq, QMAN_FQ_STATE_NE);
  13268. + break;
  13269. + case QM_MCR_NP_STATE_TEN_SCHED:
  13270. + case QM_MCR_NP_STATE_TRU_SCHED:
  13271. + case QM_MCR_NP_STATE_ACTIVE:
  13272. + fq->state = qman_fq_state_sched;
  13273. + if (np.state & QM_MCR_NP_STATE_R)
  13274. + fq_set(fq, QMAN_FQ_STATE_CHANGING);
  13275. + break;
  13276. + case QM_MCR_NP_STATE_PARKED:
  13277. + fq->state = qman_fq_state_parked;
  13278. + break;
  13279. + default:
  13280. + DPA_ASSERT(NULL == "invalid FQ state");
  13281. + }
  13282. + if (fqd.fq_ctrl & QM_FQCTRL_CGE)
  13283. + fq->state |= QMAN_FQ_STATE_CGR_EN;
  13284. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13285. + put_affine_portal();
  13286. + return 0;
  13287. +err:
  13288. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13289. + put_affine_portal();
  13290. + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
  13291. + qman_release_fqid(fqid);
  13292. + return -EIO;
  13293. +}
  13294. +EXPORT_SYMBOL(qman_create_fq);
  13295. +
  13296. +void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
  13297. +{
  13298. +
  13299. + /* We don't need to lock the FQ as it is a pre-condition that the FQ be
  13300. + * quiesced. Instead, run some checks. */
  13301. + switch (fq->state) {
  13302. + case qman_fq_state_parked:
  13303. + DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
  13304. + case qman_fq_state_oos:
  13305. + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
  13306. + qman_release_fqid(fq->fqid);
  13307. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  13308. + clear_fq_table_entry(fq->key);
  13309. +#endif
  13310. + return;
  13311. + default:
  13312. + break;
  13313. + }
  13314. + DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
  13315. +}
  13316. +EXPORT_SYMBOL(qman_destroy_fq);
  13317. +
  13318. +u32 qman_fq_fqid(struct qman_fq *fq)
  13319. +{
  13320. + return fq->fqid;
  13321. +}
  13322. +EXPORT_SYMBOL(qman_fq_fqid);
  13323. +
  13324. +void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
  13325. +{
  13326. + if (state)
  13327. + *state = fq->state;
  13328. + if (flags)
  13329. + *flags = fq->flags;
  13330. +}
  13331. +EXPORT_SYMBOL(qman_fq_state);
  13332. +
  13333. +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
  13334. +{
  13335. + struct qm_mc_command *mcc;
  13336. + struct qm_mc_result *mcr;
  13337. + struct qman_portal *p;
  13338. + unsigned long irqflags __maybe_unused;
  13339. + u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
  13340. + QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
  13341. +
  13342. + if ((fq->state != qman_fq_state_oos) &&
  13343. + (fq->state != qman_fq_state_parked))
  13344. + return -EINVAL;
  13345. +#ifdef CONFIG_FSL_DPA_CHECKING
  13346. + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
  13347. + return -EINVAL;
  13348. +#endif
  13349. + if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
  13350. + /* And can't be set at the same time as TDTHRESH */
  13351. + if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
  13352. + return -EINVAL;
  13353. + }
  13354. + /* Issue an INITFQ_[PARKED|SCHED] management command */
  13355. + p = get_affine_portal();
  13356. + PORTAL_IRQ_LOCK(p, irqflags);
  13357. + FQLOCK(fq);
  13358. + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
  13359. + ((fq->state != qman_fq_state_oos) &&
  13360. + (fq->state != qman_fq_state_parked)))) {
  13361. + FQUNLOCK(fq);
  13362. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13363. + put_affine_portal();
  13364. + return -EBUSY;
  13365. + }
  13366. + mcc = qm_mc_start(&p->p);
  13367. + if (opts)
  13368. + mcc->initfq = *opts;
  13369. + mcc->initfq.fqid = cpu_to_be32(fq->fqid);
  13370. + mcc->initfq.count = 0;
  13371. +
  13372. + /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
  13373. + * demux pointer. Otherwise, the caller-provided value is allowed to
  13374. + * stand, don't overwrite it. */
  13375. + if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
  13376. + dma_addr_t phys_fq;
  13377. + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
  13378. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  13379. + mcc->initfq.fqd.context_b = fq->key;
  13380. +#else
  13381. + mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
  13382. +#endif
  13383. + /* and the physical address - NB, if the user wasn't trying to
  13384. + * set CONTEXTA, clear the stashing settings. */
  13385. + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
  13386. + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
  13387. + memset(&mcc->initfq.fqd.context_a, 0,
  13388. + sizeof(mcc->initfq.fqd.context_a));
  13389. + } else {
  13390. + phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
  13391. + DMA_TO_DEVICE);
  13392. + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
  13393. + }
  13394. + }
  13395. + if (flags & QMAN_INITFQ_FLAG_LOCAL) {
  13396. + mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
  13397. + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
  13398. + mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
  13399. + mcc->initfq.fqd.dest.wq = 4;
  13400. + }
  13401. + }
  13402. + mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
  13403. + cpu_to_hw_fqd(&mcc->initfq.fqd);
  13404. + qm_mc_commit(&p->p, myverb);
  13405. + while (!(mcr = qm_mc_result(&p->p)))
  13406. + cpu_relax();
  13407. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
  13408. + res = mcr->result;
  13409. + if (res != QM_MCR_RESULT_OK) {
  13410. + FQUNLOCK(fq);
  13411. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13412. + put_affine_portal();
  13413. + return -EIO;
  13414. + }
  13415. + if (opts) {
  13416. + if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
  13417. + if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
  13418. + fq_set(fq, QMAN_FQ_STATE_CGR_EN);
  13419. + else
  13420. + fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
  13421. + }
  13422. + if (opts->we_mask & QM_INITFQ_WE_CGID)
  13423. + fq->cgr_groupid = opts->fqd.cgid;
  13424. + }
  13425. + fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
  13426. + qman_fq_state_sched : qman_fq_state_parked;
  13427. + FQUNLOCK(fq);
  13428. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13429. + put_affine_portal();
  13430. + return 0;
  13431. +}
  13432. +EXPORT_SYMBOL(qman_init_fq);
  13433. +
  13434. +int qman_schedule_fq(struct qman_fq *fq)
  13435. +{
  13436. + struct qm_mc_command *mcc;
  13437. + struct qm_mc_result *mcr;
  13438. + struct qman_portal *p;
  13439. + unsigned long irqflags __maybe_unused;
  13440. + int ret = 0;
  13441. + u8 res;
  13442. +
  13443. + if (fq->state != qman_fq_state_parked)
  13444. + return -EINVAL;
  13445. +#ifdef CONFIG_FSL_DPA_CHECKING
  13446. + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
  13447. + return -EINVAL;
  13448. +#endif
  13449. + /* Issue a ALTERFQ_SCHED management command */
  13450. + p = get_affine_portal();
  13451. + PORTAL_IRQ_LOCK(p, irqflags);
  13452. + FQLOCK(fq);
  13453. + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
  13454. + (fq->state != qman_fq_state_parked))) {
  13455. + ret = -EBUSY;
  13456. + goto out;
  13457. + }
  13458. + mcc = qm_mc_start(&p->p);
  13459. + mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
  13460. + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
  13461. + while (!(mcr = qm_mc_result(&p->p)))
  13462. + cpu_relax();
  13463. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
  13464. + res = mcr->result;
  13465. + if (res != QM_MCR_RESULT_OK) {
  13466. + ret = -EIO;
  13467. + goto out;
  13468. + }
  13469. + fq->state = qman_fq_state_sched;
  13470. +out:
  13471. + FQUNLOCK(fq);
  13472. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13473. + put_affine_portal();
  13474. + return ret;
  13475. +}
  13476. +EXPORT_SYMBOL(qman_schedule_fq);
  13477. +
  13478. +int qman_retire_fq(struct qman_fq *fq, u32 *flags)
  13479. +{
  13480. + struct qm_mc_command *mcc;
  13481. + struct qm_mc_result *mcr;
  13482. + struct qman_portal *p;
  13483. + unsigned long irqflags __maybe_unused;
  13484. + int rval;
  13485. + u8 res;
  13486. +
  13487. + if ((fq->state != qman_fq_state_parked) &&
  13488. + (fq->state != qman_fq_state_sched))
  13489. + return -EINVAL;
  13490. +#ifdef CONFIG_FSL_DPA_CHECKING
  13491. + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
  13492. + return -EINVAL;
  13493. +#endif
  13494. + p = get_affine_portal();
  13495. + PORTAL_IRQ_LOCK(p, irqflags);
  13496. + FQLOCK(fq);
  13497. + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
  13498. + (fq->state == qman_fq_state_retired) ||
  13499. + (fq->state == qman_fq_state_oos))) {
  13500. + rval = -EBUSY;
  13501. + goto out;
  13502. + }
  13503. + rval = table_push_fq(p, fq);
  13504. + if (rval)
  13505. + goto out;
  13506. + mcc = qm_mc_start(&p->p);
  13507. + mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
  13508. + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
  13509. + while (!(mcr = qm_mc_result(&p->p)))
  13510. + cpu_relax();
  13511. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
  13512. + res = mcr->result;
  13513. + /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
  13514. + * and defer the flags until FQRNI or FQRN (respectively) show up. But
  13515. + * "Friendly" is to process OK immediately, and not set CHANGING. We do
  13516. + * friendly, otherwise the caller doesn't necessarily have a fully
  13517. + * "retired" FQ on return even if the retirement was immediate. However
  13518. + * this does mean some code duplication between here and
  13519. + * fq_state_change(). */
  13520. + if (likely(res == QM_MCR_RESULT_OK)) {
  13521. + rval = 0;
  13522. + /* Process 'fq' right away, we'll ignore FQRNI */
  13523. + if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
  13524. + fq_set(fq, QMAN_FQ_STATE_NE);
  13525. + if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
  13526. + fq_set(fq, QMAN_FQ_STATE_ORL);
  13527. + else
  13528. + table_del_fq(p, fq);
  13529. + if (flags)
  13530. + *flags = fq->flags;
  13531. + fq->state = qman_fq_state_retired;
  13532. + if (fq->cb.fqs) {
  13533. + /* Another issue with supporting "immediate" retirement
  13534. + * is that we're forced to drop FQRNIs, because by the
  13535. + * time they're seen it may already be "too late" (the
  13536. + * fq may have been OOS'd and free()'d already). But if
  13537. + * the upper layer wants a callback whether it's
  13538. + * immediate or not, we have to fake a "MR" entry to
  13539. + * look like an FQRNI... */
  13540. + struct qm_mr_entry msg;
  13541. + msg.verb = QM_MR_VERB_FQRNI;
  13542. + msg.fq.fqs = mcr->alterfq.fqs;
  13543. + msg.fq.fqid = fq->fqid;
  13544. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  13545. + msg.fq.contextB = fq->key;
  13546. +#else
  13547. + msg.fq.contextB = (u32)(uintptr_t)fq;
  13548. +#endif
  13549. + fq->cb.fqs(p, fq, &msg);
  13550. + }
  13551. + } else if (res == QM_MCR_RESULT_PENDING) {
  13552. + rval = 1;
  13553. + fq_set(fq, QMAN_FQ_STATE_CHANGING);
  13554. + } else {
  13555. + rval = -EIO;
  13556. + table_del_fq(p, fq);
  13557. + }
  13558. +out:
  13559. + FQUNLOCK(fq);
  13560. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13561. + put_affine_portal();
  13562. + return rval;
  13563. +}
  13564. +EXPORT_SYMBOL(qman_retire_fq);
  13565. +
  13566. +int qman_oos_fq(struct qman_fq *fq)
  13567. +{
  13568. + struct qm_mc_command *mcc;
  13569. + struct qm_mc_result *mcr;
  13570. + struct qman_portal *p;
  13571. + unsigned long irqflags __maybe_unused;
  13572. + int ret = 0;
  13573. + u8 res;
  13574. +
  13575. + if (fq->state != qman_fq_state_retired)
  13576. + return -EINVAL;
  13577. +#ifdef CONFIG_FSL_DPA_CHECKING
  13578. + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
  13579. + return -EINVAL;
  13580. +#endif
  13581. + p = get_affine_portal();
  13582. + PORTAL_IRQ_LOCK(p, irqflags);
  13583. + FQLOCK(fq);
  13584. + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
  13585. + (fq->state != qman_fq_state_retired))) {
  13586. + ret = -EBUSY;
  13587. + goto out;
  13588. + }
  13589. + mcc = qm_mc_start(&p->p);
  13590. + mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
  13591. + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
  13592. + while (!(mcr = qm_mc_result(&p->p)))
  13593. + cpu_relax();
  13594. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
  13595. + res = mcr->result;
  13596. + if (res != QM_MCR_RESULT_OK) {
  13597. + ret = -EIO;
  13598. + goto out;
  13599. + }
  13600. + fq->state = qman_fq_state_oos;
  13601. +out:
  13602. + FQUNLOCK(fq);
  13603. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13604. + put_affine_portal();
  13605. + return ret;
  13606. +}
  13607. +EXPORT_SYMBOL(qman_oos_fq);
  13608. +
  13609. +int qman_fq_flow_control(struct qman_fq *fq, int xon)
  13610. +{
  13611. + struct qm_mc_command *mcc;
  13612. + struct qm_mc_result *mcr;
  13613. + struct qman_portal *p;
  13614. + unsigned long irqflags __maybe_unused;
  13615. + int ret = 0;
  13616. + u8 res;
  13617. + u8 myverb;
  13618. +
  13619. + if ((fq->state == qman_fq_state_oos) ||
  13620. + (fq->state == qman_fq_state_retired) ||
  13621. + (fq->state == qman_fq_state_parked))
  13622. + return -EINVAL;
  13623. +
  13624. +#ifdef CONFIG_FSL_DPA_CHECKING
  13625. + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
  13626. + return -EINVAL;
  13627. +#endif
  13628. + /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
  13629. + p = get_affine_portal();
  13630. + PORTAL_IRQ_LOCK(p, irqflags);
  13631. + FQLOCK(fq);
  13632. + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
  13633. + (fq->state == qman_fq_state_parked) ||
  13634. + (fq->state == qman_fq_state_oos) ||
  13635. + (fq->state == qman_fq_state_retired))) {
  13636. + ret = -EBUSY;
  13637. + goto out;
  13638. + }
  13639. + mcc = qm_mc_start(&p->p);
  13640. + mcc->alterfq.fqid = fq->fqid;
  13641. + mcc->alterfq.count = 0;
  13642. + myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
  13643. +
  13644. + qm_mc_commit(&p->p, myverb);
  13645. + while (!(mcr = qm_mc_result(&p->p)))
  13646. + cpu_relax();
  13647. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
  13648. +
  13649. + res = mcr->result;
  13650. + if (res != QM_MCR_RESULT_OK) {
  13651. + ret = -EIO;
  13652. + goto out;
  13653. + }
  13654. +out:
  13655. + FQUNLOCK(fq);
  13656. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13657. + put_affine_portal();
  13658. + return ret;
  13659. +}
  13660. +EXPORT_SYMBOL(qman_fq_flow_control);
  13661. +
  13662. +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
  13663. +{
  13664. + struct qm_mc_command *mcc;
  13665. + struct qm_mc_result *mcr;
  13666. + struct qman_portal *p = get_affine_portal();
  13667. + unsigned long irqflags __maybe_unused;
  13668. + u8 res;
  13669. +
  13670. + PORTAL_IRQ_LOCK(p, irqflags);
  13671. + mcc = qm_mc_start(&p->p);
  13672. + mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
  13673. + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
  13674. + while (!(mcr = qm_mc_result(&p->p)))
  13675. + cpu_relax();
  13676. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
  13677. + res = mcr->result;
  13678. + if (res == QM_MCR_RESULT_OK)
  13679. + memcpy_fromio(fqd, &mcr->queryfq.fqd, sizeof(*fqd));
  13680. + hw_fqd_to_cpu(fqd);
  13681. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13682. + put_affine_portal();
  13683. + if (res != QM_MCR_RESULT_OK)
  13684. + return -EIO;
  13685. + return 0;
  13686. +}
  13687. +EXPORT_SYMBOL(qman_query_fq);
  13688. +
  13689. +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
  13690. +{
  13691. + struct qm_mc_command *mcc;
  13692. + struct qm_mc_result *mcr;
  13693. + struct qman_portal *p = get_affine_portal();
  13694. + unsigned long irqflags __maybe_unused;
  13695. + u8 res;
  13696. +
  13697. + PORTAL_IRQ_LOCK(p, irqflags);
  13698. + mcc = qm_mc_start(&p->p);
  13699. + mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
  13700. + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
  13701. + while (!(mcr = qm_mc_result(&p->p)))
  13702. + cpu_relax();
  13703. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
  13704. + res = mcr->result;
  13705. + if (res == QM_MCR_RESULT_OK) {
  13706. + memcpy_fromio(np, &mcr->queryfq_np, sizeof(*np));
  13707. + np->fqd_link = be24_to_cpu(np->fqd_link);
  13708. + np->odp_seq = be16_to_cpu(np->odp_seq);
  13709. + np->orp_nesn = be16_to_cpu(np->orp_nesn);
  13710. + np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
  13711. + np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
  13712. + np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
  13713. + np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
  13714. + np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
  13715. + np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
  13716. + np->ics_surp = be16_to_cpu(np->ics_surp);
  13717. + np->byte_cnt = be32_to_cpu(np->byte_cnt);
  13718. + np->frm_cnt = be24_to_cpu(np->frm_cnt);
  13719. + np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
  13720. + np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
  13721. + np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
  13722. + np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
  13723. + np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
  13724. +
  13725. +
  13726. + }
  13727. +
  13728. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13729. + put_affine_portal();
  13730. + if (res == QM_MCR_RESULT_ERR_FQID)
  13731. + return -ERANGE;
  13732. + else if (res != QM_MCR_RESULT_OK)
  13733. + return -EIO;
  13734. + return 0;
  13735. +}
  13736. +EXPORT_SYMBOL(qman_query_fq_np);
  13737. +
  13738. +int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
  13739. +{
  13740. + struct qm_mc_command *mcc;
  13741. + struct qm_mc_result *mcr;
  13742. + struct qman_portal *p = get_affine_portal();
  13743. + unsigned long irqflags __maybe_unused;
  13744. + u8 res, myverb;
  13745. +
  13746. + PORTAL_IRQ_LOCK(p, irqflags);
  13747. + myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
  13748. + QM_MCR_VERB_QUERYWQ;
  13749. + mcc = qm_mc_start(&p->p);
  13750. + mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
  13751. + qm_mc_commit(&p->p, myverb);
  13752. + while (!(mcr = qm_mc_result(&p->p)))
  13753. + cpu_relax();
  13754. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
  13755. + res = mcr->result;
  13756. + if (res == QM_MCR_RESULT_OK) {
  13757. + int i, array_len;
  13758. + wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
  13759. + array_len = ARRAY_SIZE(mcr->querywq.wq_len);
  13760. + for (i = 0; i < array_len; i++)
  13761. + wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
  13762. + }
  13763. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13764. + put_affine_portal();
  13765. + if (res != QM_MCR_RESULT_OK) {
  13766. + pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
  13767. + return -EIO;
  13768. + }
  13769. + return 0;
  13770. +}
  13771. +EXPORT_SYMBOL(qman_query_wq);
  13772. +
  13773. +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
  13774. + struct qm_mcr_cgrtestwrite *result)
  13775. +{
  13776. + struct qm_mc_command *mcc;
  13777. + struct qm_mc_result *mcr;
  13778. + struct qman_portal *p = get_affine_portal();
  13779. + unsigned long irqflags __maybe_unused;
  13780. + u8 res;
  13781. +
  13782. + PORTAL_IRQ_LOCK(p, irqflags);
  13783. + mcc = qm_mc_start(&p->p);
  13784. + mcc->cgrtestwrite.cgid = cgr->cgrid;
  13785. + mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
  13786. + mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
  13787. + qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
  13788. + while (!(mcr = qm_mc_result(&p->p)))
  13789. + cpu_relax();
  13790. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
  13791. + res = mcr->result;
  13792. + if (res == QM_MCR_RESULT_OK)
  13793. + memcpy_fromio(result, &mcr->cgrtestwrite, sizeof(*result));
  13794. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13795. + put_affine_portal();
  13796. + if (res != QM_MCR_RESULT_OK) {
  13797. + pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
  13798. + return -EIO;
  13799. + }
  13800. + return 0;
  13801. +}
  13802. +EXPORT_SYMBOL(qman_testwrite_cgr);
  13803. +
  13804. +int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
  13805. +{
  13806. + struct qm_mc_command *mcc;
  13807. + struct qm_mc_result *mcr;
  13808. + struct qman_portal *p = get_affine_portal();
  13809. + unsigned long irqflags __maybe_unused;
  13810. + u8 res;
  13811. + int i;
  13812. +
  13813. + PORTAL_IRQ_LOCK(p, irqflags);
  13814. + mcc = qm_mc_start(&p->p);
  13815. + mcc->querycgr.cgid = cgr->cgrid;
  13816. + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
  13817. + while (!(mcr = qm_mc_result(&p->p)))
  13818. + cpu_relax();
  13819. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
  13820. + res = mcr->result;
  13821. + if (res == QM_MCR_RESULT_OK)
  13822. + memcpy_fromio(cgrd, &mcr->querycgr, sizeof(*cgrd));
  13823. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13824. + put_affine_portal();
  13825. + if (res != QM_MCR_RESULT_OK) {
  13826. + pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
  13827. + return -EIO;
  13828. + }
  13829. + cgrd->cgr.wr_parm_g.word =
  13830. + be32_to_cpu(cgrd->cgr.wr_parm_g.word);
  13831. + cgrd->cgr.wr_parm_y.word =
  13832. + be32_to_cpu(cgrd->cgr.wr_parm_y.word);
  13833. + cgrd->cgr.wr_parm_r.word =
  13834. + be32_to_cpu(cgrd->cgr.wr_parm_r.word);
  13835. + cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
  13836. + cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
  13837. + for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
  13838. + be32_to_cpus(&cgrd->cscn_targ_swp[i]);
  13839. + return 0;
  13840. +}
  13841. +EXPORT_SYMBOL(qman_query_cgr);
  13842. +
  13843. +int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
  13844. +{
  13845. + struct qm_mc_result *mcr;
  13846. + struct qman_portal *p = get_affine_portal();
  13847. + unsigned long irqflags __maybe_unused;
  13848. + u8 res;
  13849. + int i;
  13850. +
  13851. + PORTAL_IRQ_LOCK(p, irqflags);
  13852. + qm_mc_start(&p->p);
  13853. + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
  13854. + while (!(mcr = qm_mc_result(&p->p)))
  13855. + cpu_relax();
  13856. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  13857. + QM_MCC_VERB_QUERYCONGESTION);
  13858. + res = mcr->result;
  13859. + if (res == QM_MCR_RESULT_OK)
  13860. + memcpy_fromio(congestion, &mcr->querycongestion,
  13861. + sizeof(*congestion));
  13862. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13863. + put_affine_portal();
  13864. + if (res != QM_MCR_RESULT_OK) {
  13865. + pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
  13866. + return -EIO;
  13867. + }
  13868. +
  13869. + for (i = 0; i < ARRAY_SIZE(congestion->state.__state); i++)
  13870. + be32_to_cpus(&congestion->state.__state[i]);
  13871. + return 0;
  13872. +}
  13873. +EXPORT_SYMBOL(qman_query_congestion);
  13874. +
  13875. +/* internal function used as a wait_event() expression */
  13876. +static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
  13877. +{
  13878. + unsigned long irqflags __maybe_unused;
  13879. + int ret = -EBUSY;
  13880. + PORTAL_IRQ_LOCK(p, irqflags);
  13881. + if (!p->vdqcr_owned) {
  13882. + FQLOCK(fq);
  13883. + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
  13884. + goto escape;
  13885. + fq_set(fq, QMAN_FQ_STATE_VDQCR);
  13886. + FQUNLOCK(fq);
  13887. + p->vdqcr_owned = fq;
  13888. + ret = 0;
  13889. + }
  13890. +escape:
  13891. + PORTAL_IRQ_UNLOCK(p, irqflags);
  13892. + if (!ret)
  13893. + qm_dqrr_vdqcr_set(&p->p, vdqcr);
  13894. + return ret;
  13895. +}
  13896. +
  13897. +static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
  13898. +{
  13899. + int ret;
  13900. + *p = get_affine_portal();
  13901. + ret = set_p_vdqcr(*p, fq, vdqcr);
  13902. + put_affine_portal();
  13903. + return ret;
  13904. +}
  13905. +
  13906. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  13907. +static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
  13908. + u32 vdqcr, u32 flags)
  13909. +{
  13910. + int ret = 0;
  13911. + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
  13912. + ret = wait_event_interruptible(affine_queue,
  13913. + !(ret = set_p_vdqcr(p, fq, vdqcr)));
  13914. + else
  13915. + wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
  13916. + return ret;
  13917. +}
  13918. +
  13919. +static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
  13920. + u32 vdqcr, u32 flags)
  13921. +{
  13922. + int ret = 0;
  13923. + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
  13924. + ret = wait_event_interruptible(affine_queue,
  13925. + !(ret = set_vdqcr(p, fq, vdqcr)));
  13926. + else
  13927. + wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
  13928. + return ret;
  13929. +}
  13930. +#endif
  13931. +
  13932. +int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
  13933. + u32 flags __maybe_unused, u32 vdqcr)
  13934. +{
  13935. + int ret;
  13936. +
  13937. + if ((fq->state != qman_fq_state_parked) &&
  13938. + (fq->state != qman_fq_state_retired))
  13939. + return -EINVAL;
  13940. + if (vdqcr & QM_VDQCR_FQID_MASK)
  13941. + return -EINVAL;
  13942. + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
  13943. + return -EBUSY;
  13944. + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
  13945. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  13946. + if (flags & QMAN_VOLATILE_FLAG_WAIT)
  13947. + ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
  13948. + else
  13949. +#endif
  13950. + ret = set_p_vdqcr(p, fq, vdqcr);
  13951. + if (ret)
  13952. + return ret;
  13953. + /* VDQCR is set */
  13954. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  13955. + if (flags & QMAN_VOLATILE_FLAG_FINISH) {
  13956. + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
  13957. + /* NB: don't propagate any error - the caller wouldn't
  13958. + * know whether the VDQCR was issued or not. A signal
  13959. + * could arrive after returning anyway, so the caller
  13960. + * can check signal_pending() if that's an issue. */
  13961. + wait_event_interruptible(affine_queue,
  13962. + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
  13963. + else
  13964. + wait_event(affine_queue,
  13965. + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
  13966. + }
  13967. +#endif
  13968. + return 0;
  13969. +}
  13970. +EXPORT_SYMBOL(qman_p_volatile_dequeue);
  13971. +
  13972. +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
  13973. + u32 vdqcr)
  13974. +{
  13975. + struct qman_portal *p;
  13976. + int ret;
  13977. +
  13978. + if ((fq->state != qman_fq_state_parked) &&
  13979. + (fq->state != qman_fq_state_retired))
  13980. + return -EINVAL;
  13981. + if (vdqcr & QM_VDQCR_FQID_MASK)
  13982. + return -EINVAL;
  13983. + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
  13984. + return -EBUSY;
  13985. + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
  13986. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  13987. + if (flags & QMAN_VOLATILE_FLAG_WAIT)
  13988. + ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
  13989. + else
  13990. +#endif
  13991. + ret = set_vdqcr(&p, fq, vdqcr);
  13992. + if (ret)
  13993. + return ret;
  13994. + /* VDQCR is set */
  13995. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  13996. + if (flags & QMAN_VOLATILE_FLAG_FINISH) {
  13997. + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
  13998. + /* NB: don't propagate any error - the caller wouldn't
  13999. + * know whether the VDQCR was issued or not. A signal
  14000. + * could arrive after returning anyway, so the caller
  14001. + * can check signal_pending() if that's an issue. */
  14002. + wait_event_interruptible(affine_queue,
  14003. + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
  14004. + else
  14005. + wait_event(affine_queue,
  14006. + !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
  14007. + }
  14008. +#endif
  14009. + return 0;
  14010. +}
  14011. +EXPORT_SYMBOL(qman_volatile_dequeue);
  14012. +
  14013. +static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
  14014. +{
  14015. + if (avail)
  14016. + qm_eqcr_cce_prefetch(&p->p);
  14017. + else
  14018. + qm_eqcr_cce_update(&p->p);
  14019. +}
  14020. +
  14021. +int qman_eqcr_is_empty(void)
  14022. +{
  14023. + unsigned long irqflags __maybe_unused;
  14024. + struct qman_portal *p = get_affine_portal();
  14025. + u8 avail;
  14026. +
  14027. + PORTAL_IRQ_LOCK(p, irqflags);
  14028. + update_eqcr_ci(p, 0);
  14029. + avail = qm_eqcr_get_fill(&p->p);
  14030. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14031. + put_affine_portal();
  14032. + return avail == 0;
  14033. +}
  14034. +EXPORT_SYMBOL(qman_eqcr_is_empty);
  14035. +
  14036. +void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
  14037. +{
  14038. + if (affine) {
  14039. + unsigned long irqflags __maybe_unused;
  14040. + struct qman_portal *p = get_affine_portal();
  14041. + PORTAL_IRQ_LOCK(p, irqflags);
  14042. + p->cb_dc_ern = handler;
  14043. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14044. + put_affine_portal();
  14045. + } else
  14046. + cb_dc_ern = handler;
  14047. +}
  14048. +EXPORT_SYMBOL(qman_set_dc_ern);
  14049. +
  14050. +static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
  14051. + unsigned long *irqflags __maybe_unused,
  14052. + struct qman_fq *fq,
  14053. + const struct qm_fd *fd,
  14054. + u32 flags)
  14055. +{
  14056. + struct qm_eqcr_entry *eq;
  14057. + u8 avail;
  14058. + PORTAL_IRQ_LOCK(p, (*irqflags));
  14059. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14060. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14061. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
  14062. + if (p->eqci_owned) {
  14063. + PORTAL_IRQ_UNLOCK(p, (*irqflags));
  14064. + return NULL;
  14065. + }
  14066. + p->eqci_owned = fq;
  14067. + }
  14068. +#endif
  14069. + if (p->use_eqcr_ci_stashing) {
  14070. + /*
  14071. + * The stashing case is easy, only update if we need to in
  14072. + * order to try and liberate ring entries.
  14073. + */
  14074. + eq = qm_eqcr_start_stash(&p->p);
  14075. + } else {
  14076. + /*
  14077. + * The non-stashing case is harder, need to prefetch ahead of
  14078. + * time.
  14079. + */
  14080. + avail = qm_eqcr_get_avail(&p->p);
  14081. + if (avail < 2)
  14082. + update_eqcr_ci(p, avail);
  14083. + eq = qm_eqcr_start_no_stash(&p->p);
  14084. + }
  14085. +
  14086. + if (unlikely(!eq)) {
  14087. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14088. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14089. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
  14090. + p->eqci_owned = NULL;
  14091. +#endif
  14092. + PORTAL_IRQ_UNLOCK(p, (*irqflags));
  14093. + return NULL;
  14094. + }
  14095. + if (flags & QMAN_ENQUEUE_FLAG_DCA)
  14096. + eq->dca = QM_EQCR_DCA_ENABLE |
  14097. + ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
  14098. + QM_EQCR_DCA_PARK : 0) |
  14099. + ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
  14100. + eq->fqid = cpu_to_be32(fq->fqid);
  14101. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  14102. + eq->tag = cpu_to_be32(fq->key);
  14103. +#else
  14104. + eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
  14105. +#endif
  14106. + eq->fd = *fd;
  14107. + cpu_to_hw_fd(&eq->fd);
  14108. + return eq;
  14109. +}
  14110. +
  14111. +static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
  14112. + unsigned long *irqflags __maybe_unused,
  14113. + struct qman_fq *fq,
  14114. + const struct qm_fd *fd,
  14115. + u32 flags)
  14116. +{
  14117. + struct qm_eqcr_entry *eq;
  14118. + *p = get_affine_portal();
  14119. + eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
  14120. + if (!eq)
  14121. + put_affine_portal();
  14122. + return eq;
  14123. +}
  14124. +
  14125. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  14126. +static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
  14127. + unsigned long *irqflags __maybe_unused,
  14128. + struct qman_fq *fq,
  14129. + const struct qm_fd *fd,
  14130. + u32 flags)
  14131. +{
  14132. + struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
  14133. + if (!eq)
  14134. + qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
  14135. + return eq;
  14136. +}
  14137. +static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
  14138. + unsigned long *irqflags __maybe_unused,
  14139. + struct qman_fq *fq,
  14140. + const struct qm_fd *fd,
  14141. + u32 flags)
  14142. +{
  14143. + struct qm_eqcr_entry *eq;
  14144. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14145. + /* NB: return NULL if signal occurs before completion. Signal
  14146. + * can occur during return. Caller must check for signal */
  14147. + wait_event_interruptible(affine_queue,
  14148. + (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
  14149. + else
  14150. + wait_event(affine_queue,
  14151. + (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
  14152. + return eq;
  14153. +}
  14154. +static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
  14155. + unsigned long *irqflags __maybe_unused,
  14156. + struct qman_fq *fq,
  14157. + const struct qm_fd *fd,
  14158. + u32 flags)
  14159. +{
  14160. + struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
  14161. + if (!eq)
  14162. + qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
  14163. + return eq;
  14164. +}
  14165. +static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
  14166. + unsigned long *irqflags __maybe_unused,
  14167. + struct qman_fq *fq,
  14168. + const struct qm_fd *fd,
  14169. + u32 flags)
  14170. +{
  14171. + struct qm_eqcr_entry *eq;
  14172. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14173. + /* NB: return NULL if signal occurs before completion. Signal
  14174. + * can occur during return. Caller must check for signal */
  14175. + wait_event_interruptible(affine_queue,
  14176. + (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
  14177. + else
  14178. + wait_event(affine_queue,
  14179. + (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
  14180. + return eq;
  14181. +}
  14182. +#endif
  14183. +
  14184. +int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
  14185. + const struct qm_fd *fd, u32 flags)
  14186. +{
  14187. + struct qm_eqcr_entry *eq;
  14188. + unsigned long irqflags __maybe_unused;
  14189. +
  14190. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  14191. + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
  14192. + eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
  14193. + else
  14194. +#endif
  14195. + eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
  14196. + if (!eq)
  14197. + return -EBUSY;
  14198. + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
  14199. + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
  14200. + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
  14201. + /* Factor the below out, it's used from qman_enqueue_orp() too */
  14202. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14203. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14204. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14205. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
  14206. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14207. + /* NB: return success even if signal occurs before
  14208. + * condition is true. pvb_commit guarantees success */
  14209. + wait_event_interruptible(affine_queue,
  14210. + (p->eqci_owned != fq));
  14211. + else
  14212. + wait_event(affine_queue, (p->eqci_owned != fq));
  14213. + }
  14214. +#endif
  14215. + return 0;
  14216. +}
  14217. +EXPORT_SYMBOL(qman_p_enqueue);
  14218. +
  14219. +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
  14220. +{
  14221. + struct qman_portal *p;
  14222. + struct qm_eqcr_entry *eq;
  14223. + unsigned long irqflags __maybe_unused;
  14224. +
  14225. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  14226. + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
  14227. + eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
  14228. + else
  14229. +#endif
  14230. + eq = try_eq_start(&p, &irqflags, fq, fd, flags);
  14231. + if (!eq)
  14232. + return -EBUSY;
  14233. + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
  14234. + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
  14235. + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
  14236. + /* Factor the below out, it's used from qman_enqueue_orp() too */
  14237. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14238. + put_affine_portal();
  14239. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14240. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14241. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
  14242. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14243. + /* NB: return success even if signal occurs before
  14244. + * condition is true. pvb_commit guarantees success */
  14245. + wait_event_interruptible(affine_queue,
  14246. + (p->eqci_owned != fq));
  14247. + else
  14248. + wait_event(affine_queue, (p->eqci_owned != fq));
  14249. + }
  14250. +#endif
  14251. + return 0;
  14252. +}
  14253. +EXPORT_SYMBOL(qman_enqueue);
  14254. +
  14255. +int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
  14256. + const struct qm_fd *fd, u32 flags,
  14257. + struct qman_fq *orp, u16 orp_seqnum)
  14258. +{
  14259. + struct qm_eqcr_entry *eq;
  14260. + unsigned long irqflags __maybe_unused;
  14261. +
  14262. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  14263. + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
  14264. + eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
  14265. + else
  14266. +#endif
  14267. + eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
  14268. + if (!eq)
  14269. + return -EBUSY;
  14270. + /* Process ORP-specifics here */
  14271. + if (flags & QMAN_ENQUEUE_FLAG_NLIS)
  14272. + orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
  14273. + else {
  14274. + orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
  14275. + if (flags & QMAN_ENQUEUE_FLAG_NESN)
  14276. + orp_seqnum |= QM_EQCR_SEQNUM_NESN;
  14277. + else
  14278. + /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
  14279. + orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
  14280. + }
  14281. + eq->seqnum = cpu_to_be16(orp_seqnum);
  14282. + eq->orp = cpu_to_be32(orp->fqid);
  14283. + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
  14284. + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
  14285. + ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
  14286. + 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
  14287. + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
  14288. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14289. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14290. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14291. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
  14292. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14293. + /* NB: return success even if signal occurs before
  14294. + * condition is true. pvb_commit guarantees success */
  14295. + wait_event_interruptible(affine_queue,
  14296. + (p->eqci_owned != fq));
  14297. + else
  14298. + wait_event(affine_queue, (p->eqci_owned != fq));
  14299. + }
  14300. +#endif
  14301. + return 0;
  14302. +}
  14303. +EXPORT_SYMBOL(qman_p_enqueue_orp);
  14304. +
  14305. +int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
  14306. + struct qman_fq *orp, u16 orp_seqnum)
  14307. +{
  14308. + struct qman_portal *p;
  14309. + struct qm_eqcr_entry *eq;
  14310. + unsigned long irqflags __maybe_unused;
  14311. +
  14312. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  14313. + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
  14314. + eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
  14315. + else
  14316. +#endif
  14317. + eq = try_eq_start(&p, &irqflags, fq, fd, flags);
  14318. + if (!eq)
  14319. + return -EBUSY;
  14320. + /* Process ORP-specifics here */
  14321. + if (flags & QMAN_ENQUEUE_FLAG_NLIS)
  14322. + orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
  14323. + else {
  14324. + orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
  14325. + if (flags & QMAN_ENQUEUE_FLAG_NESN)
  14326. + orp_seqnum |= QM_EQCR_SEQNUM_NESN;
  14327. + else
  14328. + /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
  14329. + orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
  14330. + }
  14331. + eq->seqnum = cpu_to_be16(orp_seqnum);
  14332. + eq->orp = cpu_to_be32(orp->fqid);
  14333. + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
  14334. + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
  14335. + ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
  14336. + 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
  14337. + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
  14338. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14339. + put_affine_portal();
  14340. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14341. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14342. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
  14343. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14344. + /* NB: return success even if signal occurs before
  14345. + * condition is true. pvb_commit guarantees success */
  14346. + wait_event_interruptible(affine_queue,
  14347. + (p->eqci_owned != fq));
  14348. + else
  14349. + wait_event(affine_queue, (p->eqci_owned != fq));
  14350. + }
  14351. +#endif
  14352. + return 0;
  14353. +}
  14354. +EXPORT_SYMBOL(qman_enqueue_orp);
  14355. +
  14356. +int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
  14357. + const struct qm_fd *fd, u32 flags,
  14358. + qman_cb_precommit cb, void *cb_arg)
  14359. +{
  14360. + struct qm_eqcr_entry *eq;
  14361. + unsigned long irqflags __maybe_unused;
  14362. +
  14363. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  14364. + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
  14365. + eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
  14366. + else
  14367. +#endif
  14368. + eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
  14369. + if (!eq)
  14370. + return -EBUSY;
  14371. + /* invoke user supplied callback function before writing commit verb */
  14372. + if (cb(cb_arg)) {
  14373. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14374. + return -EINVAL;
  14375. + }
  14376. + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
  14377. + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
  14378. + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
  14379. + /* Factor the below out, it's used from qman_enqueue_orp() too */
  14380. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14381. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14382. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14383. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
  14384. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14385. + /* NB: return success even if signal occurs before
  14386. + * condition is true. pvb_commit guarantees success */
  14387. + wait_event_interruptible(affine_queue,
  14388. + (p->eqci_owned != fq));
  14389. + else
  14390. + wait_event(affine_queue, (p->eqci_owned != fq));
  14391. + }
  14392. +#endif
  14393. + return 0;
  14394. +}
  14395. +EXPORT_SYMBOL(qman_p_enqueue_precommit);
  14396. +
  14397. +int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
  14398. + u32 flags, qman_cb_precommit cb, void *cb_arg)
  14399. +{
  14400. + struct qman_portal *p;
  14401. + struct qm_eqcr_entry *eq;
  14402. + unsigned long irqflags __maybe_unused;
  14403. +
  14404. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  14405. + if (flags & QMAN_ENQUEUE_FLAG_WAIT)
  14406. + eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
  14407. + else
  14408. +#endif
  14409. + eq = try_eq_start(&p, &irqflags, fq, fd, flags);
  14410. + if (!eq)
  14411. + return -EBUSY;
  14412. + /* invoke user supplied callback function before writing commit verb */
  14413. + if (cb(cb_arg)) {
  14414. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14415. + put_affine_portal();
  14416. + return -EINVAL;
  14417. + }
  14418. + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
  14419. + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
  14420. + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
  14421. + /* Factor the below out, it's used from qman_enqueue_orp() too */
  14422. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14423. + put_affine_portal();
  14424. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  14425. + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
  14426. + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
  14427. + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
  14428. + /* NB: return success even if signal occurs before
  14429. + * condition is true. pvb_commit guarantees success */
  14430. + wait_event_interruptible(affine_queue,
  14431. + (p->eqci_owned != fq));
  14432. + else
  14433. + wait_event(affine_queue, (p->eqci_owned != fq));
  14434. + }
  14435. +#endif
  14436. + return 0;
  14437. +}
  14438. +EXPORT_SYMBOL(qman_enqueue_precommit);
  14439. +
  14440. +int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
  14441. + struct qm_mcc_initcgr *opts)
  14442. +{
  14443. + struct qm_mc_command *mcc;
  14444. + struct qm_mc_result *mcr;
  14445. + struct qman_portal *p = get_affine_portal();
  14446. + unsigned long irqflags __maybe_unused;
  14447. + u8 res;
  14448. + u8 verb = QM_MCC_VERB_MODIFYCGR;
  14449. +
  14450. + PORTAL_IRQ_LOCK(p, irqflags);
  14451. + mcc = qm_mc_start(&p->p);
  14452. + if (opts)
  14453. + mcc->initcgr = *opts;
  14454. + mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
  14455. + mcc->initcgr.cgr.wr_parm_g.word =
  14456. + cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
  14457. + mcc->initcgr.cgr.wr_parm_y.word =
  14458. + cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
  14459. + mcc->initcgr.cgr.wr_parm_r.word =
  14460. + cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
  14461. + mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
  14462. + mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
  14463. +
  14464. + mcc->initcgr.cgid = cgr->cgrid;
  14465. + if (flags & QMAN_CGR_FLAG_USE_INIT)
  14466. + verb = QM_MCC_VERB_INITCGR;
  14467. + qm_mc_commit(&p->p, verb);
  14468. + while (!(mcr = qm_mc_result(&p->p)))
  14469. + cpu_relax();
  14470. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
  14471. + res = mcr->result;
  14472. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14473. + put_affine_portal();
  14474. + return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
  14475. +}
  14476. +EXPORT_SYMBOL(qman_modify_cgr);
  14477. +
  14478. +#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
  14479. + QM_CHANNEL_SWPORTAL0))
  14480. +#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
  14481. +#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
  14482. +
  14483. +static u8 qman_cgr_cpus[__CGR_NUM];
  14484. +
  14485. +int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
  14486. + struct qm_mcc_initcgr *opts)
  14487. +{
  14488. + unsigned long irqflags __maybe_unused;
  14489. + struct qm_mcr_querycgr cgr_state;
  14490. + struct qm_mcc_initcgr local_opts;
  14491. + int ret;
  14492. + struct qman_portal *p;
  14493. +
  14494. + /* We have to check that the provided CGRID is within the limits of the
  14495. + * data-structures, for obvious reasons. However we'll let h/w take
  14496. + * care of determining whether it's within the limits of what exists on
  14497. + * the SoC. */
  14498. + if (cgr->cgrid >= __CGR_NUM)
  14499. + return -EINVAL;
  14500. +
  14501. + preempt_disable();
  14502. + p = get_affine_portal();
  14503. + qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
  14504. + preempt_enable();
  14505. +
  14506. + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
  14507. + cgr->chan = p->config->public_cfg.channel;
  14508. + spin_lock_irqsave(&p->cgr_lock, irqflags);
  14509. +
  14510. + /* if no opts specified, just add it to the list */
  14511. + if (!opts)
  14512. + goto add_list;
  14513. +
  14514. + ret = qman_query_cgr(cgr, &cgr_state);
  14515. + if (ret)
  14516. + goto release_lock;
  14517. + if (opts)
  14518. + local_opts = *opts;
  14519. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
  14520. + local_opts.cgr.cscn_targ_upd_ctrl =
  14521. + QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
  14522. + else
  14523. + /* Overwrite TARG */
  14524. + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
  14525. + TARG_MASK(p);
  14526. + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
  14527. +
  14528. + /* send init if flags indicate so */
  14529. + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
  14530. + ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
  14531. + else
  14532. + ret = qman_modify_cgr(cgr, 0, &local_opts);
  14533. + if (ret)
  14534. + goto release_lock;
  14535. +add_list:
  14536. + list_add(&cgr->node, &p->cgr_cbs);
  14537. +
  14538. + /* Determine if newly added object requires its callback to be called */
  14539. + ret = qman_query_cgr(cgr, &cgr_state);
  14540. + if (ret) {
  14541. + /* we can't go back, so proceed and return success, but screen
  14542. + * and wail to the log file */
  14543. + pr_crit("CGR HW state partially modified\n");
  14544. + ret = 0;
  14545. + goto release_lock;
  14546. + }
  14547. + if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
  14548. + cgr->cgrid))
  14549. + cgr->cb(p, cgr, 1);
  14550. +release_lock:
  14551. + spin_unlock_irqrestore(&p->cgr_lock, irqflags);
  14552. + put_affine_portal();
  14553. + return ret;
  14554. +}
  14555. +EXPORT_SYMBOL(qman_create_cgr);
  14556. +
  14557. +int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
  14558. + struct qm_mcc_initcgr *opts)
  14559. +{
  14560. + unsigned long irqflags __maybe_unused;
  14561. + struct qm_mcc_initcgr local_opts;
  14562. + struct qm_mcr_querycgr cgr_state;
  14563. + int ret;
  14564. +
  14565. + /* We have to check that the provided CGRID is within the limits of the
  14566. + * data-structures, for obvious reasons. However we'll let h/w take
  14567. + * care of determining whether it's within the limits of what exists on
  14568. + * the SoC.
  14569. + */
  14570. + if (cgr->cgrid >= __CGR_NUM)
  14571. + return -EINVAL;
  14572. +
  14573. + ret = qman_query_cgr(cgr, &cgr_state);
  14574. + if (ret)
  14575. + return ret;
  14576. +
  14577. + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
  14578. + if (opts)
  14579. + local_opts = *opts;
  14580. +
  14581. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
  14582. + local_opts.cgr.cscn_targ_upd_ctrl =
  14583. + QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
  14584. + QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
  14585. + else
  14586. + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
  14587. + TARG_DCP_MASK(dcp_portal);
  14588. + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
  14589. +
  14590. + /* send init if flags indicate so */
  14591. + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
  14592. + ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
  14593. + &local_opts);
  14594. + else
  14595. + ret = qman_modify_cgr(cgr, 0, &local_opts);
  14596. +
  14597. + return ret;
  14598. +}
  14599. +EXPORT_SYMBOL(qman_create_cgr_to_dcp);
  14600. +
  14601. +int qman_delete_cgr(struct qman_cgr *cgr)
  14602. +{
  14603. + unsigned long irqflags __maybe_unused;
  14604. + struct qm_mcr_querycgr cgr_state;
  14605. + struct qm_mcc_initcgr local_opts;
  14606. + int ret = 0;
  14607. + struct qman_cgr *i;
  14608. + struct qman_portal *p = get_affine_portal();
  14609. +
  14610. + if (cgr->chan != p->config->public_cfg.channel) {
  14611. + pr_crit("Attempting to delete cgr from different portal "
  14612. + "than it was create: create 0x%x, delete 0x%x\n",
  14613. + cgr->chan, p->config->public_cfg.channel);
  14614. + ret = -EINVAL;
  14615. + goto put_portal;
  14616. + }
  14617. + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
  14618. + spin_lock_irqsave(&p->cgr_lock, irqflags);
  14619. + list_del(&cgr->node);
  14620. + /*
  14621. + * If there are no other CGR objects for this CGRID in the list, update
  14622. + * CSCN_TARG accordingly
  14623. + */
  14624. + list_for_each_entry(i, &p->cgr_cbs, node)
  14625. + if ((i->cgrid == cgr->cgrid) && i->cb)
  14626. + goto release_lock;
  14627. + ret = qman_query_cgr(cgr, &cgr_state);
  14628. + if (ret) {
  14629. + /* add back to the list */
  14630. + list_add(&cgr->node, &p->cgr_cbs);
  14631. + goto release_lock;
  14632. + }
  14633. + /* Overwrite TARG */
  14634. + local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
  14635. + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
  14636. + local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
  14637. + else
  14638. + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
  14639. + ~(TARG_MASK(p));
  14640. + ret = qman_modify_cgr(cgr, 0, &local_opts);
  14641. + if (ret)
  14642. + /* add back to the list */
  14643. + list_add(&cgr->node, &p->cgr_cbs);
  14644. +release_lock:
  14645. + spin_unlock_irqrestore(&p->cgr_lock, irqflags);
  14646. +put_portal:
  14647. + put_affine_portal();
  14648. + return ret;
  14649. +}
  14650. +EXPORT_SYMBOL(qman_delete_cgr);
  14651. +
  14652. +struct cgr_comp {
  14653. + struct qman_cgr *cgr;
  14654. + struct completion completion;
  14655. +};
  14656. +
  14657. +static int qman_delete_cgr_thread(void *p)
  14658. +{
  14659. + struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
  14660. + int res;
  14661. +
  14662. + res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr);
  14663. + complete(&cgr_comp->completion);
  14664. +
  14665. + return res;
  14666. +}
  14667. +
  14668. +void qman_delete_cgr_safe(struct qman_cgr *cgr)
  14669. +{
  14670. + struct task_struct *thread;
  14671. + struct cgr_comp cgr_comp;
  14672. +
  14673. + preempt_disable();
  14674. + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
  14675. + init_completion(&cgr_comp.completion);
  14676. + cgr_comp.cgr = cgr;
  14677. + thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
  14678. + "cgr_del");
  14679. +
  14680. + if (likely(!IS_ERR(thread))) {
  14681. + kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
  14682. + wake_up_process(thread);
  14683. + wait_for_completion(&cgr_comp.completion);
  14684. + preempt_enable();
  14685. + return;
  14686. + }
  14687. + }
  14688. + qman_delete_cgr(cgr);
  14689. + preempt_enable();
  14690. +}
  14691. +EXPORT_SYMBOL(qman_delete_cgr_safe);
  14692. +
  14693. +int qm_get_clock(u64 *clock_hz)
  14694. +{
  14695. + if (!qman_clk) {
  14696. + pr_warn("Qman clock speed is unknown\n");
  14697. + return -EINVAL;
  14698. + }
  14699. + *clock_hz = (u64)qman_clk;
  14700. + return 0;
  14701. +}
  14702. +EXPORT_SYMBOL(qm_get_clock);
  14703. +
  14704. +int qm_set_clock(u64 clock_hz)
  14705. +{
  14706. + if (qman_clk)
  14707. + return -1;
  14708. + qman_clk = (u32)clock_hz;
  14709. + return 0;
  14710. +}
  14711. +EXPORT_SYMBOL(qm_set_clock);
  14712. +
  14713. +/* CEETM management command */
  14714. +static int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts)
  14715. +{
  14716. + struct qm_mc_command *mcc;
  14717. + struct qm_mc_result *mcr;
  14718. + struct qman_portal *p;
  14719. + unsigned long irqflags __maybe_unused;
  14720. + u8 res;
  14721. +
  14722. + p = get_affine_portal();
  14723. + PORTAL_IRQ_LOCK(p, irqflags);
  14724. +
  14725. + mcc = qm_mc_start(&p->p);
  14726. + mcc->lfqmt_config = *opts;
  14727. + qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG);
  14728. + while (!(mcr = qm_mc_result(&p->p)))
  14729. + cpu_relax();
  14730. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  14731. + QM_CEETM_VERB_LFQMT_CONFIG);
  14732. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14733. + put_affine_portal();
  14734. +
  14735. + res = mcr->result;
  14736. + if (res != QM_MCR_RESULT_OK) {
  14737. + pr_err("CEETM: CONFIGURE LFQMT failed\n");
  14738. + return -EIO;
  14739. + }
  14740. + return 0;
  14741. +}
  14742. +
  14743. +int qman_ceetm_query_lfqmt(int lfqid,
  14744. + struct qm_mcr_ceetm_lfqmt_query *lfqmt_query)
  14745. +{
  14746. + struct qm_mc_command *mcc;
  14747. + struct qm_mc_result *mcr;
  14748. + struct qman_portal *p;
  14749. + unsigned long irqflags __maybe_unused;
  14750. + u8 res;
  14751. +
  14752. + p = get_affine_portal();
  14753. + PORTAL_IRQ_LOCK(p, irqflags);
  14754. +
  14755. + mcc = qm_mc_start(&p->p);
  14756. + mcc->lfqmt_query.lfqid = lfqid;
  14757. + qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY);
  14758. + while (!(mcr = qm_mc_result(&p->p)))
  14759. + cpu_relax();
  14760. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY);
  14761. + res = mcr->result;
  14762. + if (res == QM_MCR_RESULT_OK)
  14763. + *lfqmt_query = mcr->lfqmt_query;
  14764. +
  14765. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14766. + put_affine_portal();
  14767. + if (res != QM_MCR_RESULT_OK) {
  14768. + pr_err("CEETM: QUERY LFQMT failed\n");
  14769. + return -EIO;
  14770. + }
  14771. + return 0;
  14772. +}
  14773. +EXPORT_SYMBOL(qman_ceetm_query_lfqmt);
  14774. +
  14775. +static int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts)
  14776. +{
  14777. + struct qm_mc_command *mcc;
  14778. + struct qm_mc_result *mcr;
  14779. + struct qman_portal *p;
  14780. + unsigned long irqflags __maybe_unused;
  14781. + u8 res;
  14782. +
  14783. + p = get_affine_portal();
  14784. + PORTAL_IRQ_LOCK(p, irqflags);
  14785. +
  14786. + mcc = qm_mc_start(&p->p);
  14787. + mcc->cq_config = *opts;
  14788. + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG);
  14789. + while (!(mcr = qm_mc_result(&p->p)))
  14790. + cpu_relax();
  14791. + res = mcr->result;
  14792. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG);
  14793. +
  14794. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14795. + put_affine_portal();
  14796. +
  14797. + if (res != QM_MCR_RESULT_OK) {
  14798. + pr_err("CEETM: CONFIGURE CQ failed\n");
  14799. + return -EIO;
  14800. + }
  14801. + return 0;
  14802. +}
  14803. +
  14804. +int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
  14805. + struct qm_mcr_ceetm_cq_query *cq_query)
  14806. +{
  14807. + struct qm_mc_command *mcc;
  14808. + struct qm_mc_result *mcr;
  14809. + struct qman_portal *p;
  14810. + unsigned long irqflags __maybe_unused;
  14811. + u8 res;
  14812. +
  14813. + p = get_affine_portal();
  14814. + PORTAL_IRQ_LOCK(p, irqflags);
  14815. +
  14816. + mcc = qm_mc_start(&p->p);
  14817. + mcc->cq_query.cqid = cpu_to_be16(cqid);
  14818. + mcc->cq_query.dcpid = dcpid;
  14819. + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY);
  14820. + while (!(mcr = qm_mc_result(&p->p)))
  14821. + cpu_relax();
  14822. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY);
  14823. + res = mcr->result;
  14824. + if (res == QM_MCR_RESULT_OK) {
  14825. + *cq_query = mcr->cq_query;
  14826. + hw_cq_query_to_cpu(cq_query);
  14827. + }
  14828. +
  14829. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14830. + put_affine_portal();
  14831. +
  14832. + if (res != QM_MCR_RESULT_OK) {
  14833. + pr_err("CEETM: QUERY CQ failed\n");
  14834. + return -EIO;
  14835. + }
  14836. +
  14837. + return 0;
  14838. +}
  14839. +EXPORT_SYMBOL(qman_ceetm_query_cq);
  14840. +
  14841. +static int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts)
  14842. +{
  14843. + struct qm_mc_command *mcc;
  14844. + struct qm_mc_result *mcr;
  14845. + struct qman_portal *p;
  14846. + unsigned long irqflags __maybe_unused;
  14847. + u8 res;
  14848. +
  14849. + p = get_affine_portal();
  14850. + PORTAL_IRQ_LOCK(p, irqflags);
  14851. +
  14852. + mcc = qm_mc_start(&p->p);
  14853. + mcc->dct_config = *opts;
  14854. + qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG);
  14855. + while (!(mcr = qm_mc_result(&p->p)))
  14856. + cpu_relax();
  14857. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG);
  14858. + res = mcr->result;
  14859. +
  14860. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14861. + put_affine_portal();
  14862. +
  14863. + if (res != QM_MCR_RESULT_OK) {
  14864. + pr_err("CEETM: CONFIGURE DCT failed\n");
  14865. + return -EIO;
  14866. + }
  14867. + return 0;
  14868. +}
  14869. +
  14870. +static int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts,
  14871. + struct qm_mcr_ceetm_dct_query *dct_query)
  14872. +{
  14873. + struct qm_mc_command *mcc;
  14874. + struct qm_mc_result *mcr;
  14875. + struct qman_portal *p = get_affine_portal();
  14876. + unsigned long irqflags __maybe_unused;
  14877. + u8 res;
  14878. +
  14879. + PORTAL_IRQ_LOCK(p, irqflags);
  14880. +
  14881. + mcc = qm_mc_start(&p->p);
  14882. + mcc->dct_query = *opts;
  14883. + qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY);
  14884. + while (!(mcr = qm_mc_result(&p->p)))
  14885. + cpu_relax();
  14886. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY);
  14887. + res = mcr->result;
  14888. +
  14889. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14890. + put_affine_portal();
  14891. +
  14892. + if (res != QM_MCR_RESULT_OK) {
  14893. + pr_err("CEETM: QUERY DCT failed\n");
  14894. + return -EIO;
  14895. + }
  14896. +
  14897. + *dct_query = mcr->dct_query;
  14898. + return 0;
  14899. +}
  14900. +
  14901. +static int qman_ceetm_configure_class_scheduler(
  14902. + struct qm_mcc_ceetm_class_scheduler_config *opts)
  14903. +{
  14904. + struct qm_mc_command *mcc;
  14905. + struct qm_mc_result *mcr;
  14906. + struct qman_portal *p;
  14907. + unsigned long irqflags __maybe_unused;
  14908. + u8 res;
  14909. +
  14910. + p = get_affine_portal();
  14911. + PORTAL_IRQ_LOCK(p, irqflags);
  14912. +
  14913. + mcc = qm_mc_start(&p->p);
  14914. + mcc->csch_config = *opts;
  14915. + qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
  14916. + while (!(mcr = qm_mc_result(&p->p)))
  14917. + cpu_relax();
  14918. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  14919. + QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
  14920. + res = mcr->result;
  14921. +
  14922. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14923. + put_affine_portal();
  14924. +
  14925. + if (res != QM_MCR_RESULT_OK) {
  14926. + pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n");
  14927. + return -EIO;
  14928. + }
  14929. + return 0;
  14930. +}
  14931. +
  14932. +static int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel,
  14933. + struct qm_mcr_ceetm_class_scheduler_query *query)
  14934. +{
  14935. + struct qm_mc_command *mcc;
  14936. + struct qm_mc_result *mcr;
  14937. + struct qman_portal *p;
  14938. + unsigned long irqflags __maybe_unused;
  14939. + u8 res;
  14940. +
  14941. + p = get_affine_portal();
  14942. + PORTAL_IRQ_LOCK(p, irqflags);
  14943. +
  14944. + mcc = qm_mc_start(&p->p);
  14945. + mcc->csch_query.cqcid = channel->idx;
  14946. + mcc->csch_query.dcpid = channel->dcp_idx;
  14947. + qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
  14948. + while (!(mcr = qm_mc_result(&p->p)))
  14949. + cpu_relax();
  14950. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  14951. + QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
  14952. + res = mcr->result;
  14953. +
  14954. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14955. + put_affine_portal();
  14956. +
  14957. + if (res != QM_MCR_RESULT_OK) {
  14958. + pr_err("CEETM: QUERY CLASS SCHEDULER failed\n");
  14959. + return -EIO;
  14960. + }
  14961. + *query = mcr->csch_query;
  14962. + return 0;
  14963. +}
  14964. +
  14965. +static int qman_ceetm_configure_mapping_shaper_tcfc(
  14966. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts)
  14967. +{
  14968. + struct qm_mc_command *mcc;
  14969. + struct qm_mc_result *mcr;
  14970. + struct qman_portal *p;
  14971. + unsigned long irqflags __maybe_unused;
  14972. + u8 res;
  14973. +
  14974. + p = get_affine_portal();
  14975. + PORTAL_IRQ_LOCK(p, irqflags);
  14976. +
  14977. + mcc = qm_mc_start(&p->p);
  14978. + mcc->mst_config = *opts;
  14979. + qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
  14980. + while (!(mcr = qm_mc_result(&p->p)))
  14981. + cpu_relax();
  14982. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  14983. + QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
  14984. + res = mcr->result;
  14985. +
  14986. + PORTAL_IRQ_UNLOCK(p, irqflags);
  14987. + put_affine_portal();
  14988. +
  14989. + if (res != QM_MCR_RESULT_OK) {
  14990. + pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n");
  14991. + return -EIO;
  14992. + }
  14993. + return 0;
  14994. +}
  14995. +
  14996. +static int qman_ceetm_query_mapping_shaper_tcfc(
  14997. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts,
  14998. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response)
  14999. +{
  15000. + struct qm_mc_command *mcc;
  15001. + struct qm_mc_result *mcr;
  15002. + struct qman_portal *p;
  15003. + unsigned long irqflags __maybe_unused;
  15004. + u8 res;
  15005. +
  15006. + p = get_affine_portal();
  15007. + PORTAL_IRQ_LOCK(p, irqflags);
  15008. +
  15009. + mcc = qm_mc_start(&p->p);
  15010. + mcc->mst_query = *opts;
  15011. + qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
  15012. + while (!(mcr = qm_mc_result(&p->p)))
  15013. + cpu_relax();
  15014. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  15015. + QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
  15016. + res = mcr->result;
  15017. +
  15018. + PORTAL_IRQ_UNLOCK(p, irqflags);
  15019. + put_affine_portal();
  15020. +
  15021. + if (res != QM_MCR_RESULT_OK) {
  15022. + pr_err("CEETM: QUERY CHANNEL MAPPING failed\n");
  15023. + return -EIO;
  15024. + }
  15025. +
  15026. + *response = mcr->mst_query;
  15027. + return 0;
  15028. +}
  15029. +
  15030. +static int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts)
  15031. +{
  15032. + struct qm_mc_command *mcc;
  15033. + struct qm_mc_result *mcr;
  15034. + struct qman_portal *p;
  15035. + unsigned long irqflags __maybe_unused;
  15036. + u8 res;
  15037. +
  15038. + p = get_affine_portal();
  15039. + PORTAL_IRQ_LOCK(p, irqflags);
  15040. +
  15041. + mcc = qm_mc_start(&p->p);
  15042. + mcc->ccgr_config = *opts;
  15043. +
  15044. + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG);
  15045. + while (!(mcr = qm_mc_result(&p->p)))
  15046. + cpu_relax();
  15047. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG);
  15048. +
  15049. + PORTAL_IRQ_UNLOCK(p, irqflags);
  15050. + put_affine_portal();
  15051. +
  15052. + res = mcr->result;
  15053. + if (res != QM_MCR_RESULT_OK) {
  15054. + pr_err("CEETM: CONFIGURE CCGR failed\n");
  15055. + return -EIO;
  15056. + }
  15057. + return 0;
  15058. +}
  15059. +
  15060. +int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
  15061. + struct qm_mcr_ceetm_ccgr_query *response)
  15062. +{
  15063. + struct qm_mc_command *mcc;
  15064. + struct qm_mc_result *mcr;
  15065. + struct qman_portal *p;
  15066. + unsigned long irqflags __maybe_unused;
  15067. + u8 res;
  15068. +
  15069. + p = get_affine_portal();
  15070. + PORTAL_IRQ_LOCK(p, irqflags);
  15071. +
  15072. + mcc = qm_mc_start(&p->p);
  15073. + mcc->ccgr_query.ccgrid = cpu_to_be16(ccgr_query->ccgrid);
  15074. + mcc->ccgr_query.dcpid = ccgr_query->dcpid;
  15075. + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
  15076. +
  15077. + while (!(mcr = qm_mc_result(&p->p)))
  15078. + cpu_relax();
  15079. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY);
  15080. + res = mcr->result;
  15081. + if (res == QM_MCR_RESULT_OK) {
  15082. + *response = mcr->ccgr_query;
  15083. + hw_ccgr_query_to_cpu(response);
  15084. + }
  15085. +
  15086. + PORTAL_IRQ_UNLOCK(p, irqflags);
  15087. + put_affine_portal();
  15088. + if (res != QM_MCR_RESULT_OK) {
  15089. + pr_err("CEETM: QUERY CCGR failed\n");
  15090. + return -EIO;
  15091. + }
  15092. + return 0;
  15093. +}
  15094. +EXPORT_SYMBOL(qman_ceetm_query_ccgr);
  15095. +
  15096. +static int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq,
  15097. + u8 command_type, u16 xsfdr,
  15098. + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr)
  15099. +{
  15100. + struct qm_mc_command *mcc;
  15101. + struct qm_mc_result *mcr;
  15102. + struct qman_portal *p;
  15103. + unsigned long irqflags __maybe_unused;
  15104. + u8 res;
  15105. +
  15106. + p = get_affine_portal();
  15107. + PORTAL_IRQ_LOCK(p, irqflags);
  15108. +
  15109. + mcc = qm_mc_start(&p->p);
  15110. + switch (command_type) {
  15111. + case 0:
  15112. + case 1:
  15113. + mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx;
  15114. + break;
  15115. + case 2:
  15116. + mcc->cq_ppxr.xsfdr = xsfdr;
  15117. + break;
  15118. + default:
  15119. + break;
  15120. + }
  15121. + mcc->cq_ppxr.ct = command_type;
  15122. + mcc->cq_ppxr.dcpid = cq->parent->dcp_idx;
  15123. + qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
  15124. + while (!(mcr = qm_mc_result(&p->p)))
  15125. + cpu_relax();
  15126. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  15127. + QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
  15128. +
  15129. + PORTAL_IRQ_UNLOCK(p, irqflags);
  15130. + put_affine_portal();
  15131. +
  15132. + res = mcr->result;
  15133. + if (res != QM_MCR_RESULT_OK) {
  15134. + pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n");
  15135. + return -EIO;
  15136. + }
  15137. + *cq_ppxr = mcr->cq_ppxr;
  15138. + return 0;
  15139. +}
  15140. +
  15141. +static int qman_ceetm_query_statistics(u16 cid,
  15142. + enum qm_dc_portal dcp_idx,
  15143. + u16 command_type,
  15144. + struct qm_mcr_ceetm_statistics_query *query_result)
  15145. +{
  15146. + struct qm_mc_command *mcc;
  15147. + struct qm_mc_result *mcr;
  15148. + struct qman_portal *p;
  15149. + unsigned long irqflags __maybe_unused;
  15150. + u8 res;
  15151. +
  15152. + p = get_affine_portal();
  15153. + PORTAL_IRQ_LOCK(p, irqflags);
  15154. +
  15155. + mcc = qm_mc_start(&p->p);
  15156. + mcc->stats_query_write.cid = cid;
  15157. + mcc->stats_query_write.dcpid = dcp_idx;
  15158. + mcc->stats_query_write.ct = command_type;
  15159. + qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
  15160. +
  15161. + while (!(mcr = qm_mc_result(&p->p)))
  15162. + cpu_relax();
  15163. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  15164. + QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
  15165. +
  15166. + PORTAL_IRQ_UNLOCK(p, irqflags);
  15167. + put_affine_portal();
  15168. +
  15169. + res = mcr->result;
  15170. + if (res != QM_MCR_RESULT_OK) {
  15171. + pr_err("CEETM: STATISTICS QUERY failed\n");
  15172. + return -EIO;
  15173. + }
  15174. + *query_result = mcr->stats_query;
  15175. + return 0;
  15176. +}
  15177. +
  15178. +int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
  15179. + u16 command_type, u64 frame_count,
  15180. + u64 byte_count)
  15181. +{
  15182. + struct qm_mc_command *mcc;
  15183. + struct qm_mc_result *mcr;
  15184. + struct qman_portal *p;
  15185. + unsigned long irqflags __maybe_unused;
  15186. + u8 res;
  15187. +
  15188. + p = get_affine_portal();
  15189. + PORTAL_IRQ_LOCK(p, irqflags);
  15190. +
  15191. + mcc = qm_mc_start(&p->p);
  15192. + mcc->stats_query_write.cid = cid;
  15193. + mcc->stats_query_write.dcpid = dcp_idx;
  15194. + mcc->stats_query_write.ct = command_type;
  15195. + mcc->stats_query_write.frm_cnt = frame_count;
  15196. + mcc->stats_query_write.byte_cnt = byte_count;
  15197. + qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
  15198. +
  15199. + while (!(mcr = qm_mc_result(&p->p)))
  15200. + cpu_relax();
  15201. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  15202. + QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
  15203. +
  15204. + PORTAL_IRQ_UNLOCK(p, irqflags);
  15205. + put_affine_portal();
  15206. +
  15207. + res = mcr->result;
  15208. + if (res != QM_MCR_RESULT_OK) {
  15209. + pr_err("CEETM: STATISTICS WRITE failed\n");
  15210. + return -EIO;
  15211. + }
  15212. + return 0;
  15213. +}
  15214. +EXPORT_SYMBOL(qman_ceetm_query_write_statistics);
  15215. +
  15216. +int qman_ceetm_bps2tokenrate(u64 bps, struct qm_ceetm_rate *token_rate,
  15217. + int rounding)
  15218. +{
  15219. + u16 pres;
  15220. + u64 temp;
  15221. + u64 qman_freq;
  15222. + int ret;
  15223. +
  15224. + /* Read PRES from CEET_CFG_PRES register */
  15225. + ret = qman_ceetm_get_prescaler(&pres);
  15226. + if (ret)
  15227. + return -EINVAL;
  15228. +
  15229. + ret = qm_get_clock(&qman_freq);
  15230. + if (ret)
  15231. + return -EINVAL;
  15232. +
  15233. + /* token-rate = bytes-per-second * update-reference-period
  15234. + *
  15235. + * Where token-rate is N/8192 for a integer N, and
  15236. + * update-reference-period is (2^22)/(PRES*QHz), where PRES
  15237. + * is the prescalar value and QHz is the QMan clock frequency.
  15238. + * So:
  15239. + *
  15240. + * token-rate = (byte-per-second*2^22)/PRES*QHZ)
  15241. + *
  15242. + * Converting to bits-per-second gives;
  15243. + *
  15244. + * token-rate = (bps*2^19) / (PRES*QHZ)
  15245. + * N = (bps*2^32) / (PRES*QHz)
  15246. + *
  15247. + * And to avoid 64-bit overflow if 'bps' is larger than 4Gbps
  15248. + * (yet minimise rounding error if 'bps' is small), we reorganise
  15249. + * the formula to use two 16-bit shifts rather than 1 32-bit shift.
  15250. + * N = (((bps*2^16)/PRES)*2^16)/QHz
  15251. + */
  15252. + temp = ROUNDING((bps << 16), pres, rounding);
  15253. + temp = ROUNDING((temp << 16), qman_freq, rounding);
  15254. + token_rate->whole = temp >> 13;
  15255. + token_rate->fraction = temp & (((u64)1 << 13) - 1);
  15256. + return 0;
  15257. +}
  15258. +EXPORT_SYMBOL(qman_ceetm_bps2tokenrate);
  15259. +
  15260. +int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u64 *bps,
  15261. + int rounding)
  15262. +{
  15263. + u16 pres;
  15264. + u64 temp;
  15265. + u64 qman_freq;
  15266. + int ret;
  15267. +
  15268. + /* Read PRES from CEET_CFG_PRES register */
  15269. + ret = qman_ceetm_get_prescaler(&pres);
  15270. + if (ret)
  15271. + return -EINVAL;
  15272. +
  15273. + ret = qm_get_clock(&qman_freq);
  15274. + if (ret)
  15275. + return -EINVAL;
  15276. +
  15277. + /* bytes-per-second = token-rate / update-reference-period
  15278. + *
  15279. + * where "token-rate" is N/8192 for an integer N, and
  15280. + * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is
  15281. + * the prescalar value and QHz is the QMan clock frequency. So;
  15282. + *
  15283. + * bytes-per-second = (N/8192) / (4194304/PRES*QHz)
  15284. + * = N*PRES*QHz / (4194304*8192)
  15285. + * = N*PRES*QHz / (2^35)
  15286. + *
  15287. + * Converting to bits-per-second gives;
  15288. + *
  15289. + * bps = N*PRES*QHZ / (2^32)
  15290. + *
  15291. + * Note, the numerator has a maximum width of 72 bits! So to
  15292. + * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum
  15293. + * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before
  15294. + * multiplying by N (goes to maximum of 63 bits).
  15295. + *
  15296. + * temp = PRES*QHZ / (2^16)
  15297. + * kbps = temp*N / (2^16)
  15298. + */
  15299. + temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding);
  15300. + temp *= ((token_rate->whole << 13) + token_rate->fraction);
  15301. + *bps = ROUNDING(temp, (u64)(1) << 16, rounding);
  15302. + return 0;
  15303. +}
  15304. +EXPORT_SYMBOL(qman_ceetm_tokenrate2bps);
  15305. +
  15306. +int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx,
  15307. + unsigned int sp_idx)
  15308. +{
  15309. + struct qm_ceetm_sp *p;
  15310. +
  15311. + DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) ||
  15312. + (dcp_idx == qm_dc_portal_fman1));
  15313. +
  15314. + if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) ||
  15315. + (sp_idx > (qman_ceetms[dcp_idx].sp_range[0] +
  15316. + qman_ceetms[dcp_idx].sp_range[1]))) {
  15317. + pr_err("Sub-portal index doesn't exist\n");
  15318. + return -EINVAL;
  15319. + }
  15320. +
  15321. + list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) {
  15322. + if ((p->idx == sp_idx) && (p->is_claimed == 0)) {
  15323. + p->is_claimed = 1;
  15324. + *sp = p;
  15325. + return 0;
  15326. + }
  15327. + }
  15328. + pr_err("The sub-portal#%d is not available!\n", sp_idx);
  15329. + return -ENODEV;
  15330. +}
  15331. +EXPORT_SYMBOL(qman_ceetm_sp_claim);
  15332. +
  15333. +int qman_ceetm_sp_release(struct qm_ceetm_sp *sp)
  15334. +{
  15335. + struct qm_ceetm_sp *p;
  15336. +
  15337. + if (sp->lni && sp->lni->is_claimed == 1) {
  15338. + pr_err("The dependency of sub-portal has not been released!\n");
  15339. + return -EBUSY;
  15340. + }
  15341. +
  15342. + list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) {
  15343. + if (p->idx == sp->idx) {
  15344. + p->is_claimed = 0;
  15345. + p->lni = NULL;
  15346. + }
  15347. + }
  15348. + /* Disable CEETM mode of this sub-portal */
  15349. + qman_sp_disable_ceetm_mode(sp->dcp_idx, sp->idx);
  15350. +
  15351. + return 0;
  15352. +}
  15353. +EXPORT_SYMBOL(qman_ceetm_sp_release);
  15354. +
  15355. +int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx,
  15356. + unsigned int lni_idx)
  15357. +{
  15358. + struct qm_ceetm_lni *p;
  15359. +
  15360. + if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) ||
  15361. + (lni_idx > (qman_ceetms[dcp_idx].lni_range[0] +
  15362. + qman_ceetms[dcp_idx].lni_range[1]))) {
  15363. + pr_err("The lni index is out of range\n");
  15364. + return -EINVAL;
  15365. + }
  15366. +
  15367. + list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) {
  15368. + if ((p->idx == lni_idx) && (p->is_claimed == 0)) {
  15369. + *lni = p;
  15370. + p->is_claimed = 1;
  15371. + return 0;
  15372. + }
  15373. + }
  15374. +
  15375. + pr_err("The LNI#%d is not available!\n", lni_idx);
  15376. + return -EINVAL;
  15377. +}
  15378. +EXPORT_SYMBOL(qman_ceetm_lni_claim);
  15379. +
  15380. +int qman_ceetm_lni_release(struct qm_ceetm_lni *lni)
  15381. +{
  15382. + struct qm_ceetm_lni *p;
  15383. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15384. +
  15385. + if (!list_empty(&lni->channels)) {
  15386. + pr_err("The LNI dependencies are not released!\n");
  15387. + return -EBUSY;
  15388. + }
  15389. +
  15390. + list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) {
  15391. + if (p->idx == lni->idx) {
  15392. + p->shaper_enable = 0;
  15393. + p->shaper_couple = 0;
  15394. + p->cr_token_rate.whole = 0;
  15395. + p->cr_token_rate.fraction = 0;
  15396. + p->er_token_rate.whole = 0;
  15397. + p->er_token_rate.fraction = 0;
  15398. + p->cr_token_bucket_limit = 0;
  15399. + p->er_token_bucket_limit = 0;
  15400. + p->is_claimed = 0;
  15401. + }
  15402. + }
  15403. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15404. + config_opts.dcpid = lni->dcp_idx;
  15405. + memset(&config_opts.shaper_config, 0,
  15406. + sizeof(config_opts.shaper_config));
  15407. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15408. +}
  15409. +EXPORT_SYMBOL(qman_ceetm_lni_release);
  15410. +
  15411. +int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni)
  15412. +{
  15413. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15414. +
  15415. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
  15416. + config_opts.dcpid = sp->dcp_idx;
  15417. + config_opts.sp_mapping.map_lni_id = lni->idx;
  15418. + sp->lni = lni;
  15419. +
  15420. + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts))
  15421. + return -EINVAL;
  15422. +
  15423. + /* Enable CEETM mode for this sub-portal */
  15424. + return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx);
  15425. +}
  15426. +EXPORT_SYMBOL(qman_ceetm_sp_set_lni);
  15427. +
  15428. +int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx)
  15429. +{
  15430. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15431. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15432. +
  15433. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
  15434. + query_opts.dcpid = sp->dcp_idx;
  15435. + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
  15436. + pr_err("Can't get SP <-> LNI mapping\n");
  15437. + return -EINVAL;
  15438. + }
  15439. + *lni_idx = query_result.sp_mapping_query.map_lni_id;
  15440. + sp->lni->idx = query_result.sp_mapping_query.map_lni_id;
  15441. + return 0;
  15442. +}
  15443. +EXPORT_SYMBOL(qman_ceetm_sp_get_lni);
  15444. +
  15445. +int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
  15446. + int oal)
  15447. +{
  15448. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15449. +
  15450. + lni->shaper_enable = 1;
  15451. + lni->shaper_couple = coupled;
  15452. + lni->oal = oal;
  15453. +
  15454. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15455. + config_opts.dcpid = lni->dcp_idx;
  15456. + config_opts.shaper_config.cpl = coupled;
  15457. + config_opts.shaper_config.oal = oal;
  15458. + config_opts.shaper_config.crtcr = cpu_to_be24((lni->cr_token_rate.whole
  15459. + << 13) | lni->cr_token_rate.fraction);
  15460. + config_opts.shaper_config.ertcr = cpu_to_be24((lni->er_token_rate.whole
  15461. + << 13) | lni->er_token_rate.fraction);
  15462. + config_opts.shaper_config.crtbl =
  15463. + cpu_to_be16(lni->cr_token_bucket_limit);
  15464. + config_opts.shaper_config.ertbl =
  15465. + cpu_to_be16(lni->er_token_bucket_limit);
  15466. + config_opts.shaper_config.mps = 60;
  15467. +
  15468. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15469. +}
  15470. +EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper);
  15471. +
  15472. +int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni)
  15473. +{
  15474. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15475. +
  15476. + if (!lni->shaper_enable) {
  15477. + pr_err("The shaper has been disabled\n");
  15478. + return -EINVAL;
  15479. + }
  15480. +
  15481. + config_opts.cid = CEETM_COMMAND_LNI_SHAPER | lni->idx;
  15482. + config_opts.dcpid = lni->dcp_idx;
  15483. + config_opts.shaper_config.cpl = (lni->shaper_couple << 7) | lni->oal;
  15484. + config_opts.shaper_config.crtbl = lni->cr_token_bucket_limit;
  15485. + config_opts.shaper_config.ertbl = lni->er_token_bucket_limit;
  15486. + /* Set CR/ER rate with all 1's to configure an infinite rate, thus
  15487. + * disable the shaping.
  15488. + */
  15489. + config_opts.shaper_config.crtcr = 0xFFFFFF;
  15490. + config_opts.shaper_config.ertcr = 0xFFFFFF;
  15491. + config_opts.shaper_config.mps = 60;
  15492. + lni->shaper_enable = 0;
  15493. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15494. +}
  15495. +EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper);
  15496. +
  15497. +int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni)
  15498. +{
  15499. + return lni->shaper_enable;
  15500. +}
  15501. +EXPORT_SYMBOL(qman_ceetm_lni_is_shaper_enabled);
  15502. +
  15503. +int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
  15504. + const struct qm_ceetm_rate *token_rate,
  15505. + u16 token_limit)
  15506. +{
  15507. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15508. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15509. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15510. + int ret;
  15511. +
  15512. + lni->cr_token_rate.whole = token_rate->whole;
  15513. + lni->cr_token_rate.fraction = token_rate->fraction;
  15514. + lni->cr_token_bucket_limit = token_limit;
  15515. + if (!lni->shaper_enable)
  15516. + return 0;
  15517. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15518. + query_opts.dcpid = lni->dcp_idx;
  15519. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
  15520. + &query_result);
  15521. + if (ret) {
  15522. + pr_err("Fail to get current LNI shaper setting\n");
  15523. + return -EINVAL;
  15524. + }
  15525. +
  15526. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15527. + config_opts.dcpid = lni->dcp_idx;
  15528. + config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole << 13)
  15529. + | (token_rate->fraction));
  15530. + config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
  15531. + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
  15532. + config_opts.shaper_config.oal = query_result.shaper_query.oal;
  15533. + config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
  15534. + config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
  15535. + config_opts.shaper_config.mps = query_result.shaper_query.mps;
  15536. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15537. +}
  15538. +EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate);
  15539. +
  15540. +int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
  15541. + u64 bps,
  15542. + u16 token_limit)
  15543. +{
  15544. + struct qm_ceetm_rate token_rate;
  15545. + int ret;
  15546. +
  15547. + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
  15548. + if (ret) {
  15549. + pr_err("Can not convert bps to token rate\n");
  15550. + return -EINVAL;
  15551. + }
  15552. +
  15553. + return qman_ceetm_lni_set_commit_rate(lni, &token_rate, token_limit);
  15554. +}
  15555. +EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate_bps);
  15556. +
  15557. +int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
  15558. + struct qm_ceetm_rate *token_rate,
  15559. + u16 *token_limit)
  15560. +{
  15561. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15562. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15563. + int ret;
  15564. +
  15565. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15566. + query_opts.dcpid = lni->dcp_idx;
  15567. +
  15568. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  15569. + if (ret) {
  15570. + pr_err("The LNI CR rate or limit is not set\n");
  15571. + return -EINVAL;
  15572. + }
  15573. + token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
  15574. + token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
  15575. + 0x1FFF;
  15576. + *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
  15577. + return 0;
  15578. +}
  15579. +EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate);
  15580. +
  15581. +int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
  15582. + u64 *bps, u16 *token_limit)
  15583. +{
  15584. + struct qm_ceetm_rate token_rate;
  15585. + int ret;
  15586. +
  15587. + ret = qman_ceetm_lni_get_commit_rate(lni, &token_rate, token_limit);
  15588. + if (ret) {
  15589. + pr_err("The LNI CR rate or limit is not available\n");
  15590. + return -EINVAL;
  15591. + }
  15592. +
  15593. + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
  15594. +}
  15595. +EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate_bps);
  15596. +
  15597. +int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
  15598. + const struct qm_ceetm_rate *token_rate,
  15599. + u16 token_limit)
  15600. +{
  15601. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15602. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15603. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15604. + int ret;
  15605. +
  15606. + lni->er_token_rate.whole = token_rate->whole;
  15607. + lni->er_token_rate.fraction = token_rate->fraction;
  15608. + lni->er_token_bucket_limit = token_limit;
  15609. + if (!lni->shaper_enable)
  15610. + return 0;
  15611. +
  15612. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15613. + query_opts.dcpid = lni->dcp_idx;
  15614. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
  15615. + &query_result);
  15616. + if (ret) {
  15617. + pr_err("Fail to get current LNI shaper setting\n");
  15618. + return -EINVAL;
  15619. + }
  15620. +
  15621. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15622. + config_opts.dcpid = lni->dcp_idx;
  15623. + config_opts.shaper_config.ertcr = cpu_to_be24(
  15624. + (token_rate->whole << 13) | (token_rate->fraction));
  15625. + config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
  15626. + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
  15627. + config_opts.shaper_config.oal = query_result.shaper_query.oal;
  15628. + config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
  15629. + config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
  15630. + config_opts.shaper_config.mps = query_result.shaper_query.mps;
  15631. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15632. +}
  15633. +EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate);
  15634. +
  15635. +int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
  15636. + u64 bps,
  15637. + u16 token_limit)
  15638. +{
  15639. + struct qm_ceetm_rate token_rate;
  15640. + int ret;
  15641. +
  15642. + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
  15643. + if (ret) {
  15644. + pr_err("Can not convert bps to token rate\n");
  15645. + return -EINVAL;
  15646. + }
  15647. + return qman_ceetm_lni_set_excess_rate(lni, &token_rate, token_limit);
  15648. +}
  15649. +EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate_bps);
  15650. +
  15651. +int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
  15652. + struct qm_ceetm_rate *token_rate,
  15653. + u16 *token_limit)
  15654. +{
  15655. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15656. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15657. + int ret;
  15658. +
  15659. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
  15660. + query_opts.dcpid = lni->dcp_idx;
  15661. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  15662. + if (ret) {
  15663. + pr_err("The LNI ER rate or limit is not set\n");
  15664. + return -EINVAL;
  15665. + }
  15666. + token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
  15667. + token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
  15668. + 0x1FFF;
  15669. + *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
  15670. + return 0;
  15671. +}
  15672. +EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate);
  15673. +
  15674. +int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
  15675. + u64 *bps, u16 *token_limit)
  15676. +{
  15677. + struct qm_ceetm_rate token_rate;
  15678. + int ret;
  15679. +
  15680. + ret = qman_ceetm_lni_get_excess_rate(lni, &token_rate, token_limit);
  15681. + if (ret) {
  15682. + pr_err("The LNI ER rate or limit is not available\n");
  15683. + return -EINVAL;
  15684. + }
  15685. +
  15686. + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
  15687. +}
  15688. +EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate_bps);
  15689. +
  15690. +#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4)
  15691. +#define QMAN_CEETM_LNITCFCC_ENABLE 0x8
  15692. +int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
  15693. + unsigned int cq_level,
  15694. + int traffic_class)
  15695. +{
  15696. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15697. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15698. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15699. + u64 lnitcfcc;
  15700. +
  15701. + if ((cq_level > 15) | (traffic_class > 7)) {
  15702. + pr_err("The CQ or traffic class id is out of range\n");
  15703. + return -EINVAL;
  15704. + }
  15705. +
  15706. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
  15707. + query_opts.dcpid = lni->dcp_idx;
  15708. + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
  15709. + pr_err("Fail to query tcfcc\n");
  15710. + return -EINVAL;
  15711. + }
  15712. +
  15713. + lnitcfcc = be64_to_cpu(query_result.tcfc_query.lnitcfcc);
  15714. + if (traffic_class == -1) {
  15715. + /* disable tcfc for this CQ */
  15716. + lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE <<
  15717. + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
  15718. + } else {
  15719. + lnitcfcc &= ~((u64)0xF <<
  15720. + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
  15721. + lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE |
  15722. + traffic_class)) <<
  15723. + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level);
  15724. + }
  15725. + config_opts.tcfc_config.lnitcfcc = cpu_to_be64(lnitcfcc);
  15726. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
  15727. + config_opts.dcpid = lni->dcp_idx;
  15728. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15729. +}
  15730. +EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc);
  15731. +
  15732. +#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7
  15733. +int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level,
  15734. + int *traffic_class)
  15735. +{
  15736. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15737. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15738. + int ret;
  15739. + u8 lnitcfcc;
  15740. +
  15741. + if (cq_level > 15) {
  15742. + pr_err("the CQ level is out of range\n");
  15743. + return -EINVAL;
  15744. + }
  15745. +
  15746. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
  15747. + query_opts.dcpid = lni->dcp_idx;
  15748. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  15749. + if (ret)
  15750. + return ret;
  15751. + lnitcfcc = (u8)be64_to_cpu((query_result.tcfc_query.lnitcfcc) >>
  15752. + QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
  15753. + if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE)
  15754. + *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK;
  15755. + else
  15756. + *traffic_class = -1;
  15757. + return 0;
  15758. +}
  15759. +EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc);
  15760. +
  15761. +int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
  15762. + struct qm_ceetm_lni *lni)
  15763. +{
  15764. + struct qm_ceetm_channel *p;
  15765. + u32 channel_idx;
  15766. + int ret = 0;
  15767. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15768. +
  15769. + if (lni->dcp_idx == qm_dc_portal_fman0) {
  15770. + ret = qman_alloc_ceetm0_channel(&channel_idx);
  15771. + } else if (lni->dcp_idx == qm_dc_portal_fman1) {
  15772. + ret = qman_alloc_ceetm1_channel(&channel_idx);
  15773. + } else {
  15774. + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
  15775. + lni->dcp_idx);
  15776. + return -EINVAL;
  15777. + }
  15778. +
  15779. + if (ret) {
  15780. + pr_err("The is no channel available for LNI#%d\n", lni->idx);
  15781. + return -ENODEV;
  15782. + }
  15783. +
  15784. + p = kzalloc(sizeof(*p), GFP_KERNEL);
  15785. + if (!p)
  15786. + return -ENOMEM;
  15787. + p->idx = channel_idx;
  15788. + p->dcp_idx = lni->dcp_idx;
  15789. + list_add_tail(&p->node, &lni->channels);
  15790. + INIT_LIST_HEAD(&p->class_queues);
  15791. + INIT_LIST_HEAD(&p->ccgs);
  15792. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
  15793. + channel_idx);
  15794. + config_opts.dcpid = lni->dcp_idx;
  15795. + config_opts.channel_mapping.map_lni_id = lni->idx;
  15796. + config_opts.channel_mapping.map_shaped = 0;
  15797. + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
  15798. + pr_err("Can't map channel#%d for LNI#%d\n",
  15799. + channel_idx, lni->idx);
  15800. + return -EINVAL;
  15801. + }
  15802. + *channel = p;
  15803. + return 0;
  15804. +}
  15805. +EXPORT_SYMBOL(qman_ceetm_channel_claim);
  15806. +
  15807. +int qman_ceetm_channel_release(struct qm_ceetm_channel *channel)
  15808. +{
  15809. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15810. + if (!list_empty(&channel->class_queues)) {
  15811. + pr_err("CEETM channel#%d has class queue unreleased!\n",
  15812. + channel->idx);
  15813. + return -EBUSY;
  15814. + }
  15815. + if (!list_empty(&channel->ccgs)) {
  15816. + pr_err("CEETM channel#%d has ccg unreleased!\n",
  15817. + channel->idx);
  15818. + return -EBUSY;
  15819. + }
  15820. +
  15821. + /* channel->dcp_idx corresponds to known fman validation */
  15822. + if ((channel->dcp_idx != qm_dc_portal_fman0) &&
  15823. + (channel->dcp_idx != qm_dc_portal_fman1)) {
  15824. + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
  15825. + channel->dcp_idx);
  15826. + return -EINVAL;
  15827. + }
  15828. +
  15829. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  15830. + channel->idx);
  15831. + config_opts.dcpid = channel->dcp_idx;
  15832. + memset(&config_opts.shaper_config, 0,
  15833. + sizeof(config_opts.shaper_config));
  15834. + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
  15835. + pr_err("Can't reset channel shapping parameters\n");
  15836. + return -EINVAL;
  15837. + }
  15838. +
  15839. + if (channel->dcp_idx == qm_dc_portal_fman0) {
  15840. + qman_release_ceetm0_channelid(channel->idx);
  15841. + } else if (channel->dcp_idx == qm_dc_portal_fman1) {
  15842. + qman_release_ceetm1_channelid(channel->idx);
  15843. + } else {
  15844. + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
  15845. + channel->dcp_idx);
  15846. + return -EINVAL;
  15847. + }
  15848. + list_del(&channel->node);
  15849. + kfree(channel);
  15850. +
  15851. + return 0;
  15852. +}
  15853. +EXPORT_SYMBOL(qman_ceetm_channel_release);
  15854. +
  15855. +int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
  15856. + int coupled)
  15857. +{
  15858. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15859. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15860. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15861. +
  15862. + if (channel->shaper_enable == 1) {
  15863. + pr_err("This channel shaper has been enabled!\n");
  15864. + return -EINVAL;
  15865. + }
  15866. +
  15867. + channel->shaper_enable = 1;
  15868. + channel->shaper_couple = coupled;
  15869. +
  15870. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
  15871. + channel->idx);
  15872. + query_opts.dcpid = (u8)channel->dcp_idx;
  15873. +
  15874. + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
  15875. + pr_err("Can't query channel mapping\n");
  15876. + return -EINVAL;
  15877. + }
  15878. +
  15879. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
  15880. + channel->idx);
  15881. + config_opts.dcpid = channel->dcp_idx;
  15882. + config_opts.channel_mapping.map_lni_id =
  15883. + query_result.channel_mapping_query.map_lni_id;
  15884. + config_opts.channel_mapping.map_shaped = 1;
  15885. + if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
  15886. + pr_err("Can't enable shaper for channel #%d\n", channel->idx);
  15887. + return -EINVAL;
  15888. + }
  15889. +
  15890. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  15891. + channel->idx);
  15892. + config_opts.shaper_config.cpl = coupled;
  15893. + config_opts.shaper_config.crtcr = cpu_to_be24((channel->cr_token_rate.
  15894. + whole << 13) |
  15895. + channel->cr_token_rate.fraction);
  15896. + config_opts.shaper_config.ertcr = cpu_to_be24((channel->er_token_rate.
  15897. + whole << 13) |
  15898. + channel->er_token_rate.fraction);
  15899. + config_opts.shaper_config.crtbl =
  15900. + cpu_to_be16(channel->cr_token_bucket_limit);
  15901. + config_opts.shaper_config.ertbl =
  15902. + cpu_to_be16(channel->er_token_bucket_limit);
  15903. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15904. +}
  15905. +EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper);
  15906. +
  15907. +int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel)
  15908. +{
  15909. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15910. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15911. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15912. +
  15913. +
  15914. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
  15915. + channel->idx);
  15916. + query_opts.dcpid = channel->dcp_idx;
  15917. +
  15918. + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
  15919. + pr_err("Can't query channel mapping\n");
  15920. + return -EINVAL;
  15921. + }
  15922. +
  15923. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
  15924. + channel->idx);
  15925. + config_opts.dcpid = channel->dcp_idx;
  15926. + config_opts.channel_mapping.map_shaped = 0;
  15927. + config_opts.channel_mapping.map_lni_id =
  15928. + query_result.channel_mapping_query.map_lni_id;
  15929. +
  15930. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15931. +}
  15932. +EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper);
  15933. +
  15934. +int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel)
  15935. +{
  15936. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15937. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15938. +
  15939. + query_opts.cid = CEETM_COMMAND_CHANNEL_MAPPING | channel->idx;
  15940. + query_opts.dcpid = channel->dcp_idx;
  15941. +
  15942. + if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
  15943. + pr_err("Can't query channel mapping\n");
  15944. + return -EINVAL;
  15945. + }
  15946. +
  15947. + return query_result.channel_mapping_query.map_shaped;
  15948. +}
  15949. +EXPORT_SYMBOL(qman_ceetm_channel_is_shaper_enabled);
  15950. +
  15951. +int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
  15952. + const struct qm_ceetm_rate *token_rate,
  15953. + u16 token_limit)
  15954. +{
  15955. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  15956. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  15957. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  15958. + int ret;
  15959. +
  15960. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  15961. + channel->idx);
  15962. + query_opts.dcpid = channel->dcp_idx;
  15963. +
  15964. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  15965. + if (ret) {
  15966. + pr_err("Fail to get the current channel shaper setting\n");
  15967. + return -EINVAL;
  15968. + }
  15969. +
  15970. + channel->cr_token_rate.whole = token_rate->whole;
  15971. + channel->cr_token_rate.fraction = token_rate->fraction;
  15972. + channel->cr_token_bucket_limit = token_limit;
  15973. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  15974. + channel->idx);
  15975. + config_opts.dcpid = channel->dcp_idx;
  15976. + config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole
  15977. + << 13) | (token_rate->fraction));
  15978. + config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
  15979. + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
  15980. + config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
  15981. + config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
  15982. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  15983. +}
  15984. +EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate);
  15985. +
  15986. +int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
  15987. + u64 bps, u16 token_limit)
  15988. +{
  15989. + struct qm_ceetm_rate token_rate;
  15990. + int ret;
  15991. +
  15992. + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
  15993. + if (ret) {
  15994. + pr_err("Can not convert bps to token rate\n");
  15995. + return -EINVAL;
  15996. + }
  15997. + return qman_ceetm_channel_set_commit_rate(channel, &token_rate,
  15998. + token_limit);
  15999. +}
  16000. +EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate_bps);
  16001. +
  16002. +int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
  16003. + struct qm_ceetm_rate *token_rate,
  16004. + u16 *token_limit)
  16005. +{
  16006. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  16007. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  16008. + int ret;
  16009. +
  16010. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  16011. + channel->idx);
  16012. + query_opts.dcpid = channel->dcp_idx;
  16013. +
  16014. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  16015. + if (ret | !query_result.shaper_query.crtcr |
  16016. + !query_result.shaper_query.crtbl) {
  16017. + pr_err("The channel commit rate or limit is not set\n");
  16018. + return -EINVAL;
  16019. + }
  16020. + token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
  16021. + token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
  16022. + 0x1FFF;
  16023. + *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
  16024. + return 0;
  16025. +}
  16026. +EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate);
  16027. +
  16028. +int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
  16029. + u64 *bps, u16 *token_limit)
  16030. +{
  16031. + struct qm_ceetm_rate token_rate;
  16032. + int ret;
  16033. +
  16034. + ret = qman_ceetm_channel_get_commit_rate(channel, &token_rate,
  16035. + token_limit);
  16036. + if (ret) {
  16037. + pr_err("The channel CR rate or limit is not available\n");
  16038. + return -EINVAL;
  16039. + }
  16040. +
  16041. + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
  16042. +}
  16043. +EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate_bps);
  16044. +
  16045. +int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
  16046. + const struct qm_ceetm_rate *token_rate,
  16047. + u16 token_limit)
  16048. +{
  16049. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  16050. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  16051. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  16052. + int ret;
  16053. +
  16054. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  16055. + channel->idx);
  16056. + query_opts.dcpid = channel->dcp_idx;
  16057. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  16058. + if (ret) {
  16059. + pr_err("Fail to get the current channel shaper setting\n");
  16060. + return -EINVAL;
  16061. + }
  16062. +
  16063. + channel->er_token_rate.whole = token_rate->whole;
  16064. + channel->er_token_rate.fraction = token_rate->fraction;
  16065. + channel->er_token_bucket_limit = token_limit;
  16066. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  16067. + channel->idx);
  16068. + config_opts.dcpid = channel->dcp_idx;
  16069. + config_opts.shaper_config.ertcr = cpu_to_be24(
  16070. + (token_rate->whole << 13) | (token_rate->fraction));
  16071. + config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
  16072. + config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
  16073. + config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
  16074. + config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
  16075. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  16076. +}
  16077. +EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate);
  16078. +
  16079. +int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
  16080. + u64 bps, u16 token_limit)
  16081. +{
  16082. + struct qm_ceetm_rate token_rate;
  16083. + int ret;
  16084. +
  16085. + ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
  16086. + if (ret) {
  16087. + pr_err("Can not convert bps to token rate\n");
  16088. + return -EINVAL;
  16089. + }
  16090. + return qman_ceetm_channel_set_excess_rate(channel, &token_rate,
  16091. + token_limit);
  16092. +}
  16093. +EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate_bps);
  16094. +
  16095. +int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
  16096. + struct qm_ceetm_rate *token_rate,
  16097. + u16 *token_limit)
  16098. +{
  16099. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  16100. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  16101. + int ret;
  16102. +
  16103. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  16104. + channel->idx);
  16105. + query_opts.dcpid = channel->dcp_idx;
  16106. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  16107. + if (ret | !query_result.shaper_query.ertcr |
  16108. + !query_result.shaper_query.ertbl) {
  16109. + pr_err("The channel excess rate or limit is not set\n");
  16110. + return -EINVAL;
  16111. + }
  16112. + token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
  16113. + token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
  16114. + 0x1FFF;
  16115. + *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
  16116. + return 0;
  16117. +}
  16118. +EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate);
  16119. +
  16120. +int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
  16121. + u64 *bps, u16 *token_limit)
  16122. +{
  16123. + struct qm_ceetm_rate token_rate;
  16124. + int ret;
  16125. +
  16126. + ret = qman_ceetm_channel_get_excess_rate(channel, &token_rate,
  16127. + token_limit);
  16128. + if (ret) {
  16129. + pr_err("The channel ER rate or limit is not available\n");
  16130. + return -EINVAL;
  16131. + }
  16132. +
  16133. + return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
  16134. +}
  16135. +EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate_bps);
  16136. +
  16137. +int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
  16138. + u16 token_limit)
  16139. +{
  16140. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
  16141. +
  16142. + if (channel->shaper_enable) {
  16143. + pr_err("This channel is a shaped one\n");
  16144. + return -EINVAL;
  16145. + }
  16146. +
  16147. + channel->cr_token_bucket_limit = token_limit;
  16148. + config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  16149. + channel->idx);
  16150. + config_opts.dcpid = channel->dcp_idx;
  16151. + config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
  16152. + return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
  16153. +}
  16154. +EXPORT_SYMBOL(qman_ceetm_channel_set_weight);
  16155. +
  16156. +int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
  16157. + u16 *token_limit)
  16158. +{
  16159. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
  16160. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
  16161. + int ret;
  16162. +
  16163. + query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
  16164. + channel->idx);
  16165. + query_opts.dcpid = channel->dcp_idx;
  16166. + ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
  16167. + if (ret | !query_result.shaper_query.crtbl) {
  16168. + pr_err("This unshaped channel's uFQ wight is unavailable\n");
  16169. + return -EINVAL;
  16170. + }
  16171. + *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
  16172. + return 0;
  16173. +}
  16174. +EXPORT_SYMBOL(qman_ceetm_channel_get_weight);
  16175. +
  16176. +int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b,
  16177. + unsigned int prio_a, unsigned int prio_b)
  16178. +{
  16179. + struct qm_mcc_ceetm_class_scheduler_config config_opts;
  16180. + struct qm_mcr_ceetm_class_scheduler_query query_result;
  16181. + int i;
  16182. +
  16183. + if (prio_a > 7) {
  16184. + pr_err("The priority of group A is out of range\n");
  16185. + return -EINVAL;
  16186. + }
  16187. + if (group_b && (prio_b > 7)) {
  16188. + pr_err("The priority of group B is out of range\n");
  16189. + return -EINVAL;
  16190. + }
  16191. +
  16192. + if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
  16193. + pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
  16194. + return -EINVAL;
  16195. + }
  16196. +
  16197. + config_opts.cqcid = cpu_to_be16(channel->idx);
  16198. + config_opts.dcpid = channel->dcp_idx;
  16199. + config_opts.gpc_combine_flag = !group_b;
  16200. + config_opts.gpc_prio_a = prio_a;
  16201. + config_opts.gpc_prio_b = prio_b;
  16202. +
  16203. + for (i = 0; i < 8; i++)
  16204. + config_opts.w[i] = query_result.w[i];
  16205. + config_opts.crem = query_result.crem;
  16206. + config_opts.erem = query_result.erem;
  16207. +
  16208. + return qman_ceetm_configure_class_scheduler(&config_opts);
  16209. +}
  16210. +EXPORT_SYMBOL(qman_ceetm_channel_set_group);
  16211. +
  16212. +int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b,
  16213. + unsigned int *prio_a, unsigned int *prio_b)
  16214. +{
  16215. + struct qm_mcr_ceetm_class_scheduler_query query_result;
  16216. +
  16217. + if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
  16218. + pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
  16219. + return -EINVAL;
  16220. + }
  16221. + *group_b = !query_result.gpc_combine_flag;
  16222. + *prio_a = query_result.gpc_prio_a;
  16223. + *prio_b = query_result.gpc_prio_b;
  16224. +
  16225. + return 0;
  16226. +}
  16227. +EXPORT_SYMBOL(qman_ceetm_channel_get_group);
  16228. +
  16229. +#define GROUP_A_ELIGIBILITY_SET (1 << 8)
  16230. +#define GROUP_B_ELIGIBILITY_SET (1 << 9)
  16231. +#define CQ_ELIGIBILITY_SET(n) (1 << (7 - n))
  16232. +int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
  16233. + *channel, int group_b, int cre)
  16234. +{
  16235. + struct qm_mcc_ceetm_class_scheduler_config csch_config;
  16236. + struct qm_mcr_ceetm_class_scheduler_query csch_query;
  16237. + int i;
  16238. +
  16239. + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
  16240. + pr_err("Cannot get the channel %d scheduler setting.\n",
  16241. + channel->idx);
  16242. + return -EINVAL;
  16243. + }
  16244. + csch_config.cqcid = cpu_to_be16(channel->idx);
  16245. + csch_config.dcpid = channel->dcp_idx;
  16246. + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
  16247. + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
  16248. + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
  16249. +
  16250. + for (i = 0; i < 8; i++)
  16251. + csch_config.w[i] = csch_query.w[i];
  16252. + csch_config.erem = csch_query.erem;
  16253. + if (group_b)
  16254. + csch_config.crem = (be16_to_cpu(csch_query.crem)
  16255. + & ~GROUP_B_ELIGIBILITY_SET)
  16256. + | (cre ? GROUP_B_ELIGIBILITY_SET : 0);
  16257. + else
  16258. + csch_config.crem = (be16_to_cpu(csch_query.crem)
  16259. + & ~GROUP_A_ELIGIBILITY_SET)
  16260. + | (cre ? GROUP_A_ELIGIBILITY_SET : 0);
  16261. +
  16262. + csch_config.crem = cpu_to_be16(csch_config.crem);
  16263. +
  16264. + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
  16265. + pr_err("Cannot config channel %d's scheduler with "
  16266. + "group_%c's cr eligibility\n", channel->idx,
  16267. + group_b ? 'b' : 'a');
  16268. + return -EINVAL;
  16269. + }
  16270. +
  16271. + return 0;
  16272. +}
  16273. +EXPORT_SYMBOL(qman_ceetm_channel_set_group_cr_eligibility);
  16274. +
  16275. +int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
  16276. + *channel, int group_b, int ere)
  16277. +{
  16278. + struct qm_mcc_ceetm_class_scheduler_config csch_config;
  16279. + struct qm_mcr_ceetm_class_scheduler_query csch_query;
  16280. + int i;
  16281. +
  16282. + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
  16283. + pr_err("Cannot get the channel %d scheduler setting.\n",
  16284. + channel->idx);
  16285. + return -EINVAL;
  16286. + }
  16287. + csch_config.cqcid = cpu_to_be16(channel->idx);
  16288. + csch_config.dcpid = channel->dcp_idx;
  16289. + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
  16290. + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
  16291. + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
  16292. +
  16293. + for (i = 0; i < 8; i++)
  16294. + csch_config.w[i] = csch_query.w[i];
  16295. + csch_config.crem = csch_query.crem;
  16296. + if (group_b)
  16297. + csch_config.erem = (be16_to_cpu(csch_query.erem)
  16298. + & ~GROUP_B_ELIGIBILITY_SET)
  16299. + | (ere ? GROUP_B_ELIGIBILITY_SET : 0);
  16300. + else
  16301. + csch_config.erem = (be16_to_cpu(csch_query.erem)
  16302. + & ~GROUP_A_ELIGIBILITY_SET)
  16303. + | (ere ? GROUP_A_ELIGIBILITY_SET : 0);
  16304. +
  16305. + csch_config.erem = cpu_to_be16(csch_config.erem);
  16306. +
  16307. + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
  16308. + pr_err("Cannot config channel %d's scheduler with "
  16309. + "group_%c's er eligibility\n", channel->idx,
  16310. + group_b ? 'b' : 'a');
  16311. + return -EINVAL;
  16312. + }
  16313. +
  16314. + return 0;
  16315. +}
  16316. +EXPORT_SYMBOL(qman_ceetm_channel_set_group_er_eligibility);
  16317. +
  16318. +int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
  16319. + unsigned int idx, int cre)
  16320. +{
  16321. + struct qm_mcc_ceetm_class_scheduler_config csch_config;
  16322. + struct qm_mcr_ceetm_class_scheduler_query csch_query;
  16323. + int i;
  16324. +
  16325. + if (idx > 7) {
  16326. + pr_err("CQ index is out of range\n");
  16327. + return -EINVAL;
  16328. + }
  16329. + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
  16330. + pr_err("Cannot get the channel %d scheduler setting.\n",
  16331. + channel->idx);
  16332. + return -EINVAL;
  16333. + }
  16334. + csch_config.cqcid = cpu_to_be16(channel->idx);
  16335. + csch_config.dcpid = channel->dcp_idx;
  16336. + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
  16337. + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
  16338. + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
  16339. + for (i = 0; i < 8; i++)
  16340. + csch_config.w[i] = csch_query.w[i];
  16341. + csch_config.erem = csch_query.erem;
  16342. + csch_config.crem = (be16_to_cpu(csch_query.crem)
  16343. + & ~CQ_ELIGIBILITY_SET(idx)) |
  16344. + (cre ? CQ_ELIGIBILITY_SET(idx) : 0);
  16345. + csch_config.crem = cpu_to_be16(csch_config.crem);
  16346. + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
  16347. + pr_err("Cannot config channel scheduler to set "
  16348. + "cr eligibility mask for CQ#%d\n", idx);
  16349. + return -EINVAL;
  16350. + }
  16351. +
  16352. + return 0;
  16353. +}
  16354. +EXPORT_SYMBOL(qman_ceetm_channel_set_cq_cr_eligibility);
  16355. +
  16356. +int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
  16357. + unsigned int idx, int ere)
  16358. +{
  16359. + struct qm_mcc_ceetm_class_scheduler_config csch_config;
  16360. + struct qm_mcr_ceetm_class_scheduler_query csch_query;
  16361. + int i;
  16362. +
  16363. + if (idx > 7) {
  16364. + pr_err("CQ index is out of range\n");
  16365. + return -EINVAL;
  16366. + }
  16367. + if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
  16368. + pr_err("Cannot get the channel %d scheduler setting.\n",
  16369. + channel->idx);
  16370. + return -EINVAL;
  16371. + }
  16372. + csch_config.cqcid = cpu_to_be16(channel->idx);
  16373. + csch_config.dcpid = channel->dcp_idx;
  16374. + csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
  16375. + csch_config.gpc_prio_a = csch_query.gpc_prio_a;
  16376. + csch_config.gpc_prio_b = csch_query.gpc_prio_b;
  16377. + for (i = 0; i < 8; i++)
  16378. + csch_config.w[i] = csch_query.w[i];
  16379. + csch_config.crem = csch_query.crem;
  16380. + csch_config.erem = (be16_to_cpu(csch_query.erem)
  16381. + & ~CQ_ELIGIBILITY_SET(idx)) |
  16382. + (ere ? CQ_ELIGIBILITY_SET(idx) : 0);
  16383. + csch_config.erem = cpu_to_be16(csch_config.erem);
  16384. + if (qman_ceetm_configure_class_scheduler(&csch_config)) {
  16385. + pr_err("Cannot config channel scheduler to set "
  16386. + "er eligibility mask for CQ#%d\n", idx);
  16387. + return -EINVAL;
  16388. + }
  16389. + return 0;
  16390. +}
  16391. +EXPORT_SYMBOL(qman_ceetm_channel_set_cq_er_eligibility);
  16392. +
  16393. +int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
  16394. + struct qm_ceetm_channel *channel, unsigned int idx,
  16395. + struct qm_ceetm_ccg *ccg)
  16396. +{
  16397. + struct qm_ceetm_cq *p;
  16398. + struct qm_mcc_ceetm_cq_config cq_config;
  16399. +
  16400. + if (idx > 7) {
  16401. + pr_err("The independent class queue id is out of range\n");
  16402. + return -EINVAL;
  16403. + }
  16404. +
  16405. + list_for_each_entry(p, &channel->class_queues, node) {
  16406. + if (p->idx == idx) {
  16407. + pr_err("The CQ#%d has been claimed!\n", idx);
  16408. + return -EINVAL;
  16409. + }
  16410. + }
  16411. +
  16412. + p = kmalloc(sizeof(*p), GFP_KERNEL);
  16413. + if (!p) {
  16414. + pr_err("Can't allocate memory for CQ#%d!\n", idx);
  16415. + return -ENOMEM;
  16416. + }
  16417. +
  16418. + list_add_tail(&p->node, &channel->class_queues);
  16419. + p->idx = idx;
  16420. + p->is_claimed = 1;
  16421. + p->parent = channel;
  16422. + INIT_LIST_HEAD(&p->bound_lfqids);
  16423. +
  16424. + if (ccg) {
  16425. + cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
  16426. + cq_config.dcpid = channel->dcp_idx;
  16427. + cq_config.ccgid = cpu_to_be16(ccg->idx);
  16428. + if (qman_ceetm_configure_cq(&cq_config)) {
  16429. + pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
  16430. + idx, ccg->idx);
  16431. + list_del(&p->node);
  16432. + kfree(p);
  16433. + return -EINVAL;
  16434. + }
  16435. + }
  16436. +
  16437. + *cq = p;
  16438. + return 0;
  16439. +}
  16440. +EXPORT_SYMBOL(qman_ceetm_cq_claim);
  16441. +
  16442. +int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
  16443. + struct qm_ceetm_channel *channel, unsigned int idx,
  16444. + struct qm_ceetm_ccg *ccg)
  16445. +{
  16446. + struct qm_ceetm_cq *p;
  16447. + struct qm_mcc_ceetm_cq_config cq_config;
  16448. +
  16449. + if ((idx < 8) || (idx > 15)) {
  16450. + pr_err("This grouped class queue id is out of range\n");
  16451. + return -EINVAL;
  16452. + }
  16453. +
  16454. + list_for_each_entry(p, &channel->class_queues, node) {
  16455. + if (p->idx == idx) {
  16456. + pr_err("The CQ#%d has been claimed!\n", idx);
  16457. + return -EINVAL;
  16458. + }
  16459. + }
  16460. +
  16461. + p = kmalloc(sizeof(*p), GFP_KERNEL);
  16462. + if (!p) {
  16463. + pr_err("Can't allocate memory for CQ#%d!\n", idx);
  16464. + return -ENOMEM;
  16465. + }
  16466. +
  16467. + list_add_tail(&p->node, &channel->class_queues);
  16468. + p->idx = idx;
  16469. + p->is_claimed = 1;
  16470. + p->parent = channel;
  16471. + INIT_LIST_HEAD(&p->bound_lfqids);
  16472. +
  16473. + if (ccg) {
  16474. + cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
  16475. + cq_config.dcpid = channel->dcp_idx;
  16476. + cq_config.ccgid = cpu_to_be16(ccg->idx);
  16477. + if (qman_ceetm_configure_cq(&cq_config)) {
  16478. + pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
  16479. + idx, ccg->idx);
  16480. + list_del(&p->node);
  16481. + kfree(p);
  16482. + return -EINVAL;
  16483. + }
  16484. + }
  16485. + *cq = p;
  16486. + return 0;
  16487. +}
  16488. +EXPORT_SYMBOL(qman_ceetm_cq_claim_A);
  16489. +
  16490. +int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
  16491. + struct qm_ceetm_channel *channel, unsigned int idx,
  16492. + struct qm_ceetm_ccg *ccg)
  16493. +{
  16494. + struct qm_ceetm_cq *p;
  16495. + struct qm_mcc_ceetm_cq_config cq_config;
  16496. +
  16497. + if ((idx < 12) || (idx > 15)) {
  16498. + pr_err("This grouped class queue id is out of range\n");
  16499. + return -EINVAL;
  16500. + }
  16501. +
  16502. + list_for_each_entry(p, &channel->class_queues, node) {
  16503. + if (p->idx == idx) {
  16504. + pr_err("The CQ#%d has been claimed!\n", idx);
  16505. + return -EINVAL;
  16506. + }
  16507. + }
  16508. +
  16509. + p = kmalloc(sizeof(*p), GFP_KERNEL);
  16510. + if (!p) {
  16511. + pr_err("Can't allocate memory for CQ#%d!\n", idx);
  16512. + return -ENOMEM;
  16513. + }
  16514. +
  16515. + list_add_tail(&p->node, &channel->class_queues);
  16516. + p->idx = idx;
  16517. + p->is_claimed = 1;
  16518. + p->parent = channel;
  16519. + INIT_LIST_HEAD(&p->bound_lfqids);
  16520. +
  16521. + if (ccg) {
  16522. + cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
  16523. + cq_config.dcpid = channel->dcp_idx;
  16524. + cq_config.ccgid = cpu_to_be16(ccg->idx);
  16525. + if (qman_ceetm_configure_cq(&cq_config)) {
  16526. + pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
  16527. + idx, ccg->idx);
  16528. + list_del(&p->node);
  16529. + kfree(p);
  16530. + return -EINVAL;
  16531. + }
  16532. + }
  16533. + *cq = p;
  16534. + return 0;
  16535. +}
  16536. +EXPORT_SYMBOL(qman_ceetm_cq_claim_B);
  16537. +
  16538. +int qman_ceetm_cq_release(struct qm_ceetm_cq *cq)
  16539. +{
  16540. + if (!list_empty(&cq->bound_lfqids)) {
  16541. + pr_err("The CQ#%d has unreleased LFQID\n", cq->idx);
  16542. + return -EBUSY;
  16543. + }
  16544. + list_del(&cq->node);
  16545. + qman_ceetm_drain_cq(cq);
  16546. + kfree(cq);
  16547. + return 0;
  16548. +}
  16549. +EXPORT_SYMBOL(qman_ceetm_cq_release);
  16550. +
  16551. +int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
  16552. + struct qm_ceetm_weight_code *weight_code)
  16553. +{
  16554. + struct qm_mcc_ceetm_class_scheduler_config config_opts;
  16555. + struct qm_mcr_ceetm_class_scheduler_query query_result;
  16556. + int i;
  16557. +
  16558. + if (cq->idx < 8) {
  16559. + pr_err("Can not set weight for ungrouped class queue\n");
  16560. + return -EINVAL;
  16561. + }
  16562. +
  16563. + if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) {
  16564. + pr_err("Can't query channel#%d's scheduler!\n",
  16565. + cq->parent->idx);
  16566. + return -EINVAL;
  16567. + }
  16568. +
  16569. + config_opts.cqcid = cpu_to_be16(cq->parent->idx);
  16570. + config_opts.dcpid = cq->parent->dcp_idx;
  16571. + config_opts.crem = query_result.crem;
  16572. + config_opts.erem = query_result.erem;
  16573. + config_opts.gpc_combine_flag = query_result.gpc_combine_flag;
  16574. + config_opts.gpc_prio_a = query_result.gpc_prio_a;
  16575. + config_opts.gpc_prio_b = query_result.gpc_prio_b;
  16576. +
  16577. + for (i = 0; i < 8; i++)
  16578. + config_opts.w[i] = query_result.w[i];
  16579. + config_opts.w[cq->idx - 8] = ((weight_code->y << 3) |
  16580. + (weight_code->x & 0x7));
  16581. + return qman_ceetm_configure_class_scheduler(&config_opts);
  16582. +}
  16583. +EXPORT_SYMBOL(qman_ceetm_set_queue_weight);
  16584. +
  16585. +int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
  16586. + struct qm_ceetm_weight_code *weight_code)
  16587. +{
  16588. + struct qm_mcr_ceetm_class_scheduler_query query_result;
  16589. +
  16590. + if (cq->idx < 8) {
  16591. + pr_err("Can not get weight for ungrouped class queue\n");
  16592. + return -EINVAL;
  16593. + }
  16594. +
  16595. + if (qman_ceetm_query_class_scheduler(cq->parent,
  16596. + &query_result)) {
  16597. + pr_err("Can't get the weight code for CQ#%d!\n", cq->idx);
  16598. + return -EINVAL;
  16599. + }
  16600. + weight_code->y = query_result.w[cq->idx - 8] >> 3;
  16601. + weight_code->x = query_result.w[cq->idx - 8] & 0x7;
  16602. +
  16603. + return 0;
  16604. +}
  16605. +EXPORT_SYMBOL(qman_ceetm_get_queue_weight);
  16606. +
  16607. +/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as:
  16608. + * effective weight = 2^x / (1 - (y/64))
  16609. + * = 2^(x+6) / (64 - y)
  16610. + */
  16611. +static void reduce_fraction(u32 *n, u32 *d)
  16612. +{
  16613. + u32 factor = 2;
  16614. + u32 lesser = (*n < *d) ? *n : *d;
  16615. + /* If factor exceeds the square-root of the lesser of *n and *d,
  16616. + * then there's no point continuing. Proof: if there was a factor
  16617. + * bigger than the square root, that would imply there exists
  16618. + * another factor smaller than the square-root with which it
  16619. + * multiplies to give 'lesser' - but that's a contradiction
  16620. + * because the other factor would have already been found and
  16621. + * divided out.
  16622. + */
  16623. + while ((factor * factor) <= lesser) {
  16624. + /* If 'factor' is a factor of *n and *d, divide them both
  16625. + * by 'factor' as many times as possible.
  16626. + */
  16627. + while (!(*n % factor) && !(*d % factor)) {
  16628. + *n /= factor;
  16629. + *d /= factor;
  16630. + lesser /= factor;
  16631. + }
  16632. + if (factor == 2)
  16633. + factor = 3;
  16634. + else
  16635. + factor += 2;
  16636. + }
  16637. +}
  16638. +
  16639. +int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
  16640. + u32 *numerator,
  16641. + u32 *denominator)
  16642. +{
  16643. + *numerator = (u32) 1 << (weight_code->x + 6);
  16644. + *denominator = 64 - weight_code->y;
  16645. + reduce_fraction(numerator, denominator);
  16646. + return 0;
  16647. +}
  16648. +EXPORT_SYMBOL(qman_ceetm_wbfs2ratio);
  16649. +
  16650. +/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive).
  16651. + * So find 'x' by range, and then estimate 'y' using:
  16652. + * 64 - y = 2^(x + 6) / weight
  16653. + * = 2^(x + 6) / (n/d)
  16654. + * = d * 2^(x+6) / n
  16655. + * y = 64 - (d * 2^(x+6) / n)
  16656. + */
  16657. +int qman_ceetm_ratio2wbfs(u32 numerator,
  16658. + u32 denominator,
  16659. + struct qm_ceetm_weight_code *weight_code,
  16660. + int rounding)
  16661. +{
  16662. + unsigned int y, x = 0;
  16663. + /* search incrementing 'x' until:
  16664. + * weight < 2^(x+1)
  16665. + * n/d < 2^(x+1)
  16666. + * n < d * 2^(x+1)
  16667. + */
  16668. + while ((x < 8) && (numerator >= (denominator << (x + 1))))
  16669. + x++;
  16670. + if (x >= 8)
  16671. + return -ERANGE;
  16672. + /* because of the subtraction, use '-rounding' */
  16673. + y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding);
  16674. + if (y >= 32)
  16675. + return -ERANGE;
  16676. + weight_code->x = x;
  16677. + weight_code->y = y;
  16678. + return 0;
  16679. +}
  16680. +EXPORT_SYMBOL(qman_ceetm_ratio2wbfs);
  16681. +
  16682. +int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio)
  16683. +{
  16684. + struct qm_ceetm_weight_code weight_code;
  16685. +
  16686. + if (qman_ceetm_ratio2wbfs(ratio, 100, &weight_code, 0)) {
  16687. + pr_err("Cannot get wbfs code for cq %x\n", cq->idx);
  16688. + return -EINVAL;
  16689. + }
  16690. + return qman_ceetm_set_queue_weight(cq, &weight_code);
  16691. +}
  16692. +EXPORT_SYMBOL(qman_ceetm_set_queue_weight_in_ratio);
  16693. +
  16694. +int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio)
  16695. +{
  16696. + struct qm_ceetm_weight_code weight_code;
  16697. + u32 n, d;
  16698. +
  16699. + if (qman_ceetm_get_queue_weight(cq, &weight_code)) {
  16700. + pr_err("Cannot query the weight code for cq%x\n", cq->idx);
  16701. + return -EINVAL;
  16702. + }
  16703. +
  16704. + if (qman_ceetm_wbfs2ratio(&weight_code, &n, &d)) {
  16705. + pr_err("Cannot get the ratio with wbfs code\n");
  16706. + return -EINVAL;
  16707. + }
  16708. +
  16709. + *ratio = (n * (u32)100) / d;
  16710. + return 0;
  16711. +}
  16712. +EXPORT_SYMBOL(qman_ceetm_get_queue_weight_in_ratio);
  16713. +
  16714. +int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
  16715. + u64 *frame_count, u64 *byte_count)
  16716. +{
  16717. + struct qm_mcr_ceetm_statistics_query result;
  16718. + u16 cid, command_type;
  16719. + enum qm_dc_portal dcp_idx;
  16720. + int ret;
  16721. +
  16722. + cid = cpu_to_be16((cq->parent->idx << 4) | cq->idx);
  16723. + dcp_idx = cq->parent->dcp_idx;
  16724. + if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
  16725. + command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS;
  16726. + else
  16727. + command_type = CEETM_QUERY_DEQUEUE_STATISTICS;
  16728. +
  16729. + ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
  16730. + if (ret) {
  16731. + pr_err("Can't query the statistics of CQ#%d!\n", cq->idx);
  16732. + return -EINVAL;
  16733. + }
  16734. +
  16735. + *frame_count = be40_to_cpu(result.frm_cnt);
  16736. + *byte_count = be48_to_cpu(result.byte_cnt);
  16737. + return 0;
  16738. +}
  16739. +EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics);
  16740. +
  16741. +int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq)
  16742. +{
  16743. + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread ppxr;
  16744. + int ret;
  16745. +
  16746. + do {
  16747. + ret = qman_ceetm_cq_peek_pop_xsfdrread(cq, 1, 0, &ppxr);
  16748. + if (ret) {
  16749. + pr_err("Failed to pop frame from CQ\n");
  16750. + return -EINVAL;
  16751. + }
  16752. + } while (!(ppxr.stat & 0x2));
  16753. +
  16754. + return 0;
  16755. +}
  16756. +EXPORT_SYMBOL(qman_ceetm_drain_cq);
  16757. +
  16758. +#define CEETM_LFQMT_LFQID_MSB 0xF00000
  16759. +#define CEETM_LFQMT_LFQID_LSB 0x000FFF
  16760. +int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
  16761. + struct qm_ceetm_cq *cq)
  16762. +{
  16763. + struct qm_ceetm_lfq *p;
  16764. + u32 lfqid;
  16765. + int ret = 0;
  16766. + struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
  16767. +
  16768. + if (cq->parent->dcp_idx == qm_dc_portal_fman0) {
  16769. + ret = qman_alloc_ceetm0_lfqid(&lfqid);
  16770. + } else if (cq->parent->dcp_idx == qm_dc_portal_fman1) {
  16771. + ret = qman_alloc_ceetm1_lfqid(&lfqid);
  16772. + } else {
  16773. + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
  16774. + cq->parent->dcp_idx);
  16775. + return -EINVAL;
  16776. + }
  16777. +
  16778. + if (ret) {
  16779. + pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx);
  16780. + return -ENODEV;
  16781. + }
  16782. + p = kmalloc(sizeof(*p), GFP_KERNEL);
  16783. + if (!p)
  16784. + return -ENOMEM;
  16785. + p->idx = lfqid;
  16786. + p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB);
  16787. + p->parent = cq->parent;
  16788. + list_add_tail(&p->node, &cq->bound_lfqids);
  16789. +
  16790. + lfqmt_config.lfqid = cpu_to_be24(CEETM_LFQMT_LFQID_MSB |
  16791. + (cq->parent->dcp_idx << 16) |
  16792. + (lfqid & CEETM_LFQMT_LFQID_LSB));
  16793. + lfqmt_config.cqid = cpu_to_be16((cq->parent->idx << 4) | (cq->idx));
  16794. + lfqmt_config.dctidx = cpu_to_be16(p->dctidx);
  16795. + if (qman_ceetm_configure_lfqmt(&lfqmt_config)) {
  16796. + pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n",
  16797. + lfqid, cq->idx);
  16798. + list_del(&p->node);
  16799. + kfree(p);
  16800. + return -EINVAL;
  16801. + }
  16802. + *lfq = p;
  16803. + return 0;
  16804. +}
  16805. +EXPORT_SYMBOL(qman_ceetm_lfq_claim);
  16806. +
  16807. +int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq)
  16808. +{
  16809. + if (lfq->parent->dcp_idx == qm_dc_portal_fman0) {
  16810. + qman_release_ceetm0_lfqid(lfq->idx);
  16811. + } else if (lfq->parent->dcp_idx == qm_dc_portal_fman1) {
  16812. + qman_release_ceetm1_lfqid(lfq->idx);
  16813. + } else {
  16814. + pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
  16815. + lfq->parent->dcp_idx);
  16816. + return -EINVAL;
  16817. + }
  16818. + list_del(&lfq->node);
  16819. + kfree(lfq);
  16820. + return 0;
  16821. +}
  16822. +EXPORT_SYMBOL(qman_ceetm_lfq_release);
  16823. +
  16824. +int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a,
  16825. + u32 context_b)
  16826. +{
  16827. + struct qm_mcc_ceetm_dct_config dct_config;
  16828. + lfq->context_a = context_a;
  16829. + lfq->context_b = context_b;
  16830. + dct_config.dctidx = cpu_to_be16(lfq->dctidx);
  16831. + dct_config.dcpid = lfq->parent->dcp_idx;
  16832. + dct_config.context_b = cpu_to_be32(context_b);
  16833. + dct_config.context_a = cpu_to_be64(context_a);
  16834. + return qman_ceetm_configure_dct(&dct_config);
  16835. +}
  16836. +EXPORT_SYMBOL(qman_ceetm_lfq_set_context);
  16837. +
  16838. +int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a,
  16839. + u32 *context_b)
  16840. +{
  16841. + struct qm_mcc_ceetm_dct_query dct_query;
  16842. + struct qm_mcr_ceetm_dct_query query_result;
  16843. +
  16844. + dct_query.dctidx = cpu_to_be16(lfq->dctidx);
  16845. + dct_query.dcpid = lfq->parent->dcp_idx;
  16846. + if (qman_ceetm_query_dct(&dct_query, &query_result)) {
  16847. + pr_err("Can't query LFQID#%d's context!\n", lfq->idx);
  16848. + return -EINVAL;
  16849. + }
  16850. + *context_a = be64_to_cpu(query_result.context_a);
  16851. + *context_b = be32_to_cpu(query_result.context_b);
  16852. + return 0;
  16853. +}
  16854. +EXPORT_SYMBOL(qman_ceetm_lfq_get_context);
  16855. +
  16856. +int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq)
  16857. +{
  16858. + spin_lock_init(&fq->fqlock);
  16859. + fq->fqid = lfq->idx;
  16860. + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
  16861. + if (lfq->ern)
  16862. + fq->cb.ern = lfq->ern;
  16863. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  16864. + if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
  16865. + return -ENOMEM;
  16866. +#endif
  16867. + return 0;
  16868. +}
  16869. +EXPORT_SYMBOL(qman_ceetm_create_fq);
  16870. +
  16871. +int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
  16872. + struct qm_ceetm_channel *channel,
  16873. + unsigned int idx,
  16874. + void (*cscn)(struct qm_ceetm_ccg *,
  16875. + void *cb_ctx,
  16876. + int congested),
  16877. + void *cb_ctx)
  16878. +{
  16879. + struct qm_ceetm_ccg *p;
  16880. +
  16881. + if (idx > 15) {
  16882. + pr_err("The given ccg index is out of range\n");
  16883. + return -EINVAL;
  16884. + }
  16885. +
  16886. + list_for_each_entry(p, &channel->ccgs, node) {
  16887. + if (p->idx == idx) {
  16888. + pr_err("The CCG#%d has been claimed\n", idx);
  16889. + return -EINVAL;
  16890. + }
  16891. + }
  16892. +
  16893. + p = kmalloc(sizeof(*p), GFP_KERNEL);
  16894. + if (!p) {
  16895. + pr_err("Can't allocate memory for CCG#%d!\n", idx);
  16896. + return -ENOMEM;
  16897. + }
  16898. +
  16899. + list_add_tail(&p->node, &channel->ccgs);
  16900. +
  16901. + p->idx = idx;
  16902. + p->parent = channel;
  16903. + p->cb = cscn;
  16904. + p->cb_ctx = cb_ctx;
  16905. + INIT_LIST_HEAD(&p->cb_node);
  16906. +
  16907. + *ccg = p;
  16908. + return 0;
  16909. +}
  16910. +EXPORT_SYMBOL(qman_ceetm_ccg_claim);
  16911. +
  16912. +int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg)
  16913. +{
  16914. + unsigned long irqflags __maybe_unused;
  16915. + struct qm_mcc_ceetm_ccgr_config config_opts;
  16916. + int ret = 0;
  16917. + struct qman_portal *p = get_affine_portal();
  16918. +
  16919. + memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
  16920. + spin_lock_irqsave(&p->ccgr_lock, irqflags);
  16921. + if (!list_empty(&ccg->cb_node))
  16922. + list_del(&ccg->cb_node);
  16923. + config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
  16924. + (ccg->parent->idx << 4) | ccg->idx);
  16925. + config_opts.dcpid = ccg->parent->dcp_idx;
  16926. + config_opts.we_mask = cpu_to_be16(QM_CCGR_WE_CSCN_TUPD);
  16927. + config_opts.cm_config.cscn_tupd = cpu_to_be16(PORTAL_IDX(p));
  16928. + ret = qman_ceetm_configure_ccgr(&config_opts);
  16929. + spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
  16930. + put_affine_portal();
  16931. +
  16932. + list_del(&ccg->node);
  16933. + kfree(ccg);
  16934. + return ret;
  16935. +}
  16936. +EXPORT_SYMBOL(qman_ceetm_ccg_release);
  16937. +
  16938. +int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask,
  16939. + const struct qm_ceetm_ccg_params *params)
  16940. +{
  16941. + struct qm_mcc_ceetm_ccgr_config config_opts;
  16942. + unsigned long irqflags __maybe_unused;
  16943. + int ret;
  16944. + struct qman_portal *p;
  16945. +
  16946. + if (((ccg->parent->idx << 4) | ccg->idx) >= (2 * __CGR_NUM))
  16947. + return -EINVAL;
  16948. +
  16949. + p = get_affine_portal();
  16950. +
  16951. + memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
  16952. + spin_lock_irqsave(&p->ccgr_lock, irqflags);
  16953. +
  16954. + config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
  16955. + (ccg->parent->idx << 4) | ccg->idx);
  16956. + config_opts.dcpid = ccg->parent->dcp_idx;
  16957. + config_opts.we_mask = we_mask;
  16958. + if (we_mask & QM_CCGR_WE_CSCN_EN) {
  16959. + config_opts.we_mask |= QM_CCGR_WE_CSCN_TUPD;
  16960. + config_opts.cm_config.cscn_tupd = cpu_to_be16(
  16961. + QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p));
  16962. + }
  16963. + config_opts.we_mask = cpu_to_be16(config_opts.we_mask);
  16964. + config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
  16965. + config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
  16966. + config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
  16967. + config_opts.cm_config.ctl_td_en = params->td_en;
  16968. + config_opts.cm_config.ctl_td_mode = params->td_mode;
  16969. + config_opts.cm_config.ctl_cscn_en = params->cscn_en;
  16970. + config_opts.cm_config.ctl_mode = params->mode;
  16971. + config_opts.cm_config.oal = params->oal;
  16972. + config_opts.cm_config.cs_thres.hword =
  16973. + cpu_to_be16(params->cs_thres_in.hword);
  16974. + config_opts.cm_config.cs_thres_x.hword =
  16975. + cpu_to_be16(params->cs_thres_out.hword);
  16976. + config_opts.cm_config.td_thres.hword =
  16977. + cpu_to_be16(params->td_thres.hword);
  16978. + config_opts.cm_config.wr_parm_g.word =
  16979. + cpu_to_be32(params->wr_parm_g.word);
  16980. + config_opts.cm_config.wr_parm_y.word =
  16981. + cpu_to_be32(params->wr_parm_y.word);
  16982. + config_opts.cm_config.wr_parm_r.word =
  16983. + cpu_to_be32(params->wr_parm_r.word);
  16984. + ret = qman_ceetm_configure_ccgr(&config_opts);
  16985. + if (ret) {
  16986. + pr_err("Configure CCGR CM failed!\n");
  16987. + goto release_lock;
  16988. + }
  16989. +
  16990. + if (we_mask & QM_CCGR_WE_CSCN_EN)
  16991. + if (list_empty(&ccg->cb_node))
  16992. + list_add(&ccg->cb_node,
  16993. + &p->ccgr_cbs[ccg->parent->dcp_idx]);
  16994. +release_lock:
  16995. + spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
  16996. + put_affine_portal();
  16997. + return ret;
  16998. +}
  16999. +EXPORT_SYMBOL(qman_ceetm_ccg_set);
  17000. +
  17001. +#define CEETM_CCGR_CTL_MASK 0x01
  17002. +int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
  17003. + struct qm_ceetm_ccg_params *params)
  17004. +{
  17005. + struct qm_mcc_ceetm_ccgr_query query_opts;
  17006. + struct qm_mcr_ceetm_ccgr_query query_result;
  17007. +
  17008. + query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
  17009. + (ccg->parent->idx << 4) | ccg->idx);
  17010. + query_opts.dcpid = ccg->parent->dcp_idx;
  17011. +
  17012. + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
  17013. + pr_err("Can't query CCGR#%d\n", ccg->idx);
  17014. + return -EINVAL;
  17015. + }
  17016. +
  17017. + params->wr_parm_r.word =
  17018. + be32_to_cpu(query_result.cm_query.wr_parm_r.word);
  17019. + params->wr_parm_y.word =
  17020. + be32_to_cpu(query_result.cm_query.wr_parm_y.word);
  17021. + params->wr_parm_g.word =
  17022. + be32_to_cpu(query_result.cm_query.wr_parm_g.word);
  17023. + params->td_thres.hword =
  17024. + be16_to_cpu(query_result.cm_query.td_thres.hword);
  17025. + params->cs_thres_out.hword =
  17026. + be16_to_cpu(query_result.cm_query.cs_thres_x.hword);
  17027. + params->cs_thres_in.hword =
  17028. + be16_to_cpu(query_result.cm_query.cs_thres.hword);
  17029. + params->oal = query_result.cm_query.oal;
  17030. + params->wr_en_g = query_result.cm_query.ctl_wr_en_g;
  17031. + params->wr_en_y = query_result.cm_query.ctl_wr_en_y;
  17032. + params->wr_en_r = query_result.cm_query.ctl_wr_en_r;
  17033. + params->td_en = query_result.cm_query.ctl_td_en;
  17034. + params->td_mode = query_result.cm_query.ctl_td_mode;
  17035. + params->cscn_en = query_result.cm_query.ctl_cscn_en;
  17036. + params->mode = query_result.cm_query.ctl_mode;
  17037. +
  17038. + return 0;
  17039. +}
  17040. +EXPORT_SYMBOL(qman_ceetm_ccg_get);
  17041. +
  17042. +int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
  17043. + u64 *frame_count, u64 *byte_count)
  17044. +{
  17045. + struct qm_mcr_ceetm_statistics_query result;
  17046. + u16 cid, command_type;
  17047. + enum qm_dc_portal dcp_idx;
  17048. + int ret;
  17049. +
  17050. + cid = cpu_to_be16((ccg->parent->idx << 4) | ccg->idx);
  17051. + dcp_idx = ccg->parent->dcp_idx;
  17052. + if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
  17053. + command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS;
  17054. + else
  17055. + command_type = CEETM_QUERY_REJECT_STATISTICS;
  17056. +
  17057. + ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
  17058. + if (ret) {
  17059. + pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx);
  17060. + return -EINVAL;
  17061. + }
  17062. +
  17063. + *frame_count = be40_to_cpu(result.frm_cnt);
  17064. + *byte_count = be48_to_cpu(result.byte_cnt);
  17065. + return 0;
  17066. +}
  17067. +EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics);
  17068. +
  17069. +int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
  17070. + u16 swp_idx,
  17071. + unsigned int *cscn_enabled)
  17072. +{
  17073. + struct qm_mcc_ceetm_ccgr_query query_opts;
  17074. + struct qm_mcr_ceetm_ccgr_query query_result;
  17075. + int i;
  17076. +
  17077. + DPA_ASSERT(swp_idx < 127);
  17078. + query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
  17079. + (ccg->parent->idx << 4) | ccg->idx);
  17080. + query_opts.dcpid = ccg->parent->dcp_idx;
  17081. +
  17082. + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
  17083. + pr_err("Can't query CCGR#%d\n", ccg->idx);
  17084. + return -EINVAL;
  17085. + }
  17086. +
  17087. + i = swp_idx / 32;
  17088. + i = 3 - i;
  17089. + *cscn_enabled = be32_to_cpu(query_result.cm_query.cscn_targ_swp[i]) >>
  17090. + (31 - swp_idx % 32);
  17091. +
  17092. + return 0;
  17093. +}
  17094. +EXPORT_SYMBOL(qman_ceetm_cscn_swp_get);
  17095. +
  17096. +int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
  17097. + u16 dcp_idx,
  17098. + u8 vcgid,
  17099. + unsigned int cscn_enabled,
  17100. + u16 we_mask,
  17101. + const struct qm_ceetm_ccg_params *params)
  17102. +{
  17103. + struct qm_mcc_ceetm_ccgr_config config_opts;
  17104. + int ret;
  17105. +
  17106. + config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
  17107. + (ccg->parent->idx << 4) | ccg->idx);
  17108. + config_opts.dcpid = ccg->parent->dcp_idx;
  17109. + config_opts.we_mask = cpu_to_be16(we_mask | QM_CCGR_WE_CSCN_TUPD |
  17110. + QM_CCGR_WE_CDV);
  17111. + config_opts.cm_config.cdv = vcgid;
  17112. + config_opts.cm_config.cscn_tupd = cpu_to_be16((cscn_enabled << 15) |
  17113. + QM_CGR_TARG_UDP_CTRL_DCP | dcp_idx);
  17114. + config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
  17115. + config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
  17116. + config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
  17117. + config_opts.cm_config.ctl_td_en = params->td_en;
  17118. + config_opts.cm_config.ctl_td_mode = params->td_mode;
  17119. + config_opts.cm_config.ctl_cscn_en = params->cscn_en;
  17120. + config_opts.cm_config.ctl_mode = params->mode;
  17121. + config_opts.cm_config.cs_thres.hword =
  17122. + cpu_to_be16(params->cs_thres_in.hword);
  17123. + config_opts.cm_config.cs_thres_x.hword =
  17124. + cpu_to_be16(params->cs_thres_out.hword);
  17125. + config_opts.cm_config.td_thres.hword =
  17126. + cpu_to_be16(params->td_thres.hword);
  17127. + config_opts.cm_config.wr_parm_g.word =
  17128. + cpu_to_be32(params->wr_parm_g.word);
  17129. + config_opts.cm_config.wr_parm_y.word =
  17130. + cpu_to_be32(params->wr_parm_y.word);
  17131. + config_opts.cm_config.wr_parm_r.word =
  17132. + cpu_to_be32(params->wr_parm_r.word);
  17133. +
  17134. + ret = qman_ceetm_configure_ccgr(&config_opts);
  17135. + if (ret) {
  17136. + pr_err("Configure CSCN_TARG_DCP failed!\n");
  17137. + return -EINVAL;
  17138. + }
  17139. + return 0;
  17140. +}
  17141. +EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set);
  17142. +
  17143. +int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
  17144. + u16 dcp_idx,
  17145. + u8 *vcgid,
  17146. + unsigned int *cscn_enabled)
  17147. +{
  17148. + struct qm_mcc_ceetm_ccgr_query query_opts;
  17149. + struct qm_mcr_ceetm_ccgr_query query_result;
  17150. +
  17151. + query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
  17152. + (ccg->parent->idx << 4) | ccg->idx);
  17153. + query_opts.dcpid = ccg->parent->dcp_idx;
  17154. +
  17155. + if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
  17156. + pr_err("Can't query CCGR#%d\n", ccg->idx);
  17157. + return -EINVAL;
  17158. + }
  17159. +
  17160. + *vcgid = query_result.cm_query.cdv;
  17161. + *cscn_enabled = (be16_to_cpu(query_result.cm_query.cscn_targ_dcp >>
  17162. + dcp_idx)) & 0x1;
  17163. + return 0;
  17164. +}
  17165. +EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get);
  17166. +
  17167. +int qman_ceetm_querycongestion(struct __qm_mcr_querycongestion *ccg_state,
  17168. + unsigned int dcp_idx)
  17169. +{
  17170. + struct qm_mc_command *mcc;
  17171. + struct qm_mc_result *mcr;
  17172. + struct qman_portal *p;
  17173. + unsigned long irqflags __maybe_unused;
  17174. + u8 res;
  17175. + int i, j;
  17176. +
  17177. + p = get_affine_portal();
  17178. + PORTAL_IRQ_LOCK(p, irqflags);
  17179. +
  17180. + mcc = qm_mc_start(&p->p);
  17181. + for (i = 0; i < 2; i++) {
  17182. + mcc->ccgr_query.ccgrid =
  17183. + cpu_to_be16(CEETM_QUERY_CONGESTION_STATE | i);
  17184. + mcc->ccgr_query.dcpid = dcp_idx;
  17185. + qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
  17186. +
  17187. + while (!(mcr = qm_mc_result(&p->p)))
  17188. + cpu_relax();
  17189. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  17190. + QM_CEETM_VERB_CCGR_QUERY);
  17191. + res = mcr->result;
  17192. + if (res == QM_MCR_RESULT_OK) {
  17193. + for (j = 0; j < 8; j++)
  17194. + mcr->ccgr_query.congestion_state.state.
  17195. + __state[j] =
  17196. + be32_to_cpu(mcr->ccgr_query.
  17197. + congestion_state.state.__state[j]);
  17198. +
  17199. + *(ccg_state + i) =
  17200. + mcr->ccgr_query.congestion_state.state;
  17201. + } else {
  17202. + pr_err("QUERY CEETM CONGESTION STATE failed\n");
  17203. + return -EIO;
  17204. + }
  17205. + }
  17206. + PORTAL_IRQ_UNLOCK(p, irqflags);
  17207. + put_affine_portal();
  17208. + return 0;
  17209. +}
  17210. +
  17211. +int qman_set_wpm(int wpm_enable)
  17212. +{
  17213. + return qm_set_wpm(wpm_enable);
  17214. +}
  17215. +EXPORT_SYMBOL(qman_set_wpm);
  17216. +
  17217. +int qman_get_wpm(int *wpm_enable)
  17218. +{
  17219. + return qm_get_wpm(wpm_enable);
  17220. +}
  17221. +EXPORT_SYMBOL(qman_get_wpm);
  17222. +
  17223. +int qman_shutdown_fq(u32 fqid)
  17224. +{
  17225. + struct qman_portal *p;
  17226. + unsigned long irqflags __maybe_unused;
  17227. + int ret;
  17228. + struct qm_portal *low_p;
  17229. + p = get_affine_portal();
  17230. + PORTAL_IRQ_LOCK(p, irqflags);
  17231. + low_p = &p->p;
  17232. + ret = qm_shutdown_fq(&low_p, 1, fqid);
  17233. + PORTAL_IRQ_UNLOCK(p, irqflags);
  17234. + put_affine_portal();
  17235. + return ret;
  17236. +}
  17237. +
  17238. +const struct qm_portal_config *qman_get_qm_portal_config(
  17239. + struct qman_portal *portal)
  17240. +{
  17241. + return portal->sharing_redirect ? NULL : portal->config;
  17242. +}
  17243. --- /dev/null
  17244. +++ b/drivers/staging/fsl_qbman/qman_low.h
  17245. @@ -0,0 +1,1407 @@
  17246. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  17247. + *
  17248. + * Redistribution and use in source and binary forms, with or without
  17249. + * modification, are permitted provided that the following conditions are met:
  17250. + * * Redistributions of source code must retain the above copyright
  17251. + * notice, this list of conditions and the following disclaimer.
  17252. + * * Redistributions in binary form must reproduce the above copyright
  17253. + * notice, this list of conditions and the following disclaimer in the
  17254. + * documentation and/or other materials provided with the distribution.
  17255. + * * Neither the name of Freescale Semiconductor nor the
  17256. + * names of its contributors may be used to endorse or promote products
  17257. + * derived from this software without specific prior written permission.
  17258. + *
  17259. + *
  17260. + * ALTERNATIVELY, this software may be distributed under the terms of the
  17261. + * GNU General Public License ("GPL") as published by the Free Software
  17262. + * Foundation, either version 2 of that License or (at your option) any
  17263. + * later version.
  17264. + *
  17265. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  17266. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  17267. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  17268. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  17269. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  17270. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  17271. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  17272. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  17273. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  17274. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  17275. + */
  17276. +
  17277. +#include "qman_private.h"
  17278. +
  17279. +/***************************/
  17280. +/* Portal register assists */
  17281. +/***************************/
  17282. +
  17283. +/* Cache-inhibited register offsets */
  17284. +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
  17285. +
  17286. +#define QM_REG_EQCR_PI_CINH 0x0000
  17287. +#define QM_REG_EQCR_CI_CINH 0x0004
  17288. +#define QM_REG_EQCR_ITR 0x0008
  17289. +#define QM_REG_DQRR_PI_CINH 0x0040
  17290. +#define QM_REG_DQRR_CI_CINH 0x0044
  17291. +#define QM_REG_DQRR_ITR 0x0048
  17292. +#define QM_REG_DQRR_DCAP 0x0050
  17293. +#define QM_REG_DQRR_SDQCR 0x0054
  17294. +#define QM_REG_DQRR_VDQCR 0x0058
  17295. +#define QM_REG_DQRR_PDQCR 0x005c
  17296. +#define QM_REG_MR_PI_CINH 0x0080
  17297. +#define QM_REG_MR_CI_CINH 0x0084
  17298. +#define QM_REG_MR_ITR 0x0088
  17299. +#define QM_REG_CFG 0x0100
  17300. +#define QM_REG_ISR 0x0e00
  17301. +#define QM_REG_IIR 0x0e0c
  17302. +#define QM_REG_ITPR 0x0e14
  17303. +
  17304. +/* Cache-enabled register offsets */
  17305. +#define QM_CL_EQCR 0x0000
  17306. +#define QM_CL_DQRR 0x1000
  17307. +#define QM_CL_MR 0x2000
  17308. +#define QM_CL_EQCR_PI_CENA 0x3000
  17309. +#define QM_CL_EQCR_CI_CENA 0x3100
  17310. +#define QM_CL_DQRR_PI_CENA 0x3200
  17311. +#define QM_CL_DQRR_CI_CENA 0x3300
  17312. +#define QM_CL_MR_PI_CENA 0x3400
  17313. +#define QM_CL_MR_CI_CENA 0x3500
  17314. +#define QM_CL_CR 0x3800
  17315. +#define QM_CL_RR0 0x3900
  17316. +#define QM_CL_RR1 0x3940
  17317. +
  17318. +#endif
  17319. +
  17320. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  17321. +
  17322. +#define QM_REG_EQCR_PI_CINH 0x3000
  17323. +#define QM_REG_EQCR_CI_CINH 0x3040
  17324. +#define QM_REG_EQCR_ITR 0x3080
  17325. +#define QM_REG_DQRR_PI_CINH 0x3100
  17326. +#define QM_REG_DQRR_CI_CINH 0x3140
  17327. +#define QM_REG_DQRR_ITR 0x3180
  17328. +#define QM_REG_DQRR_DCAP 0x31C0
  17329. +#define QM_REG_DQRR_SDQCR 0x3200
  17330. +#define QM_REG_DQRR_VDQCR 0x3240
  17331. +#define QM_REG_DQRR_PDQCR 0x3280
  17332. +#define QM_REG_MR_PI_CINH 0x3300
  17333. +#define QM_REG_MR_CI_CINH 0x3340
  17334. +#define QM_REG_MR_ITR 0x3380
  17335. +#define QM_REG_CFG 0x3500
  17336. +#define QM_REG_ISR 0x3600
  17337. +#define QM_REG_IIR 0x36C0
  17338. +#define QM_REG_ITPR 0x3740
  17339. +
  17340. +/* Cache-enabled register offsets */
  17341. +#define QM_CL_EQCR 0x0000
  17342. +#define QM_CL_DQRR 0x1000
  17343. +#define QM_CL_MR 0x2000
  17344. +#define QM_CL_EQCR_PI_CENA 0x3000
  17345. +#define QM_CL_EQCR_CI_CENA 0x3040
  17346. +#define QM_CL_DQRR_PI_CENA 0x3100
  17347. +#define QM_CL_DQRR_CI_CENA 0x3140
  17348. +#define QM_CL_MR_PI_CENA 0x3300
  17349. +#define QM_CL_MR_CI_CENA 0x3340
  17350. +#define QM_CL_CR 0x3800
  17351. +#define QM_CL_RR0 0x3900
  17352. +#define QM_CL_RR1 0x3940
  17353. +
  17354. +#endif
  17355. +
  17356. +
  17357. +/* BTW, the drivers (and h/w programming model) already obtain the required
  17358. + * synchronisation for portal accesses via lwsync(), hwsync(), and
  17359. + * data-dependencies. Use of barrier()s or other order-preserving primitives
  17360. + * simply degrade performance. Hence the use of the __raw_*() interfaces, which
  17361. + * simply ensure that the compiler treats the portal registers as volatile (ie.
  17362. + * non-coherent). */
  17363. +
  17364. +/* Cache-inhibited register access. */
  17365. +#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ci + (o)))
  17366. +#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
  17367. + (qm)->addr_ci + (o));
  17368. +#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
  17369. +#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
  17370. +
  17371. +/* Cache-enabled (index) register access */
  17372. +#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
  17373. +#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
  17374. +#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ce + (o)))
  17375. +#define __qm_cl_out(qm, o, val) \
  17376. + do { \
  17377. + u32 *__tmpclout = (qm)->addr_ce + (o); \
  17378. + __raw_writel(cpu_to_be32(val), __tmpclout); \
  17379. + dcbf(__tmpclout); \
  17380. + } while (0)
  17381. +#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
  17382. +#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
  17383. +#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
  17384. +#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
  17385. +#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
  17386. +#define qm_cl_invalidate(reg)\
  17387. + __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
  17388. +
  17389. +/* Cache-enabled ring access */
  17390. +#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
  17391. +
  17392. +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
  17393. + * analysis, look at using the "extra" bit in the ring index registers to avoid
  17394. + * cyclic issues. */
  17395. +static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
  17396. +{
  17397. + /* 'first' is included, 'last' is excluded */
  17398. + if (first <= last)
  17399. + return last - first;
  17400. + return ringsize + last - first;
  17401. +}
  17402. +
  17403. +/* Portal modes.
  17404. + * Enum types;
  17405. + * pmode == production mode
  17406. + * cmode == consumption mode,
  17407. + * dmode == h/w dequeue mode.
  17408. + * Enum values use 3 letter codes. First letter matches the portal mode,
  17409. + * remaining two letters indicate;
  17410. + * ci == cache-inhibited portal register
  17411. + * ce == cache-enabled portal register
  17412. + * vb == in-band valid-bit (cache-enabled)
  17413. + * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only
  17414. + * As for "enum qm_dqrr_dmode", it should be self-explanatory.
  17415. + */
  17416. +enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
  17417. + qm_eqcr_pci = 0, /* PI index, cache-inhibited */
  17418. + qm_eqcr_pce = 1, /* PI index, cache-enabled */
  17419. + qm_eqcr_pvb = 2 /* valid-bit */
  17420. +};
  17421. +enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
  17422. + qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
  17423. + qm_dqrr_dpull = 1 /* PDQCR */
  17424. +};
  17425. +enum qm_dqrr_pmode { /* s/w-only */
  17426. + qm_dqrr_pci, /* reads DQRR_PI_CINH */
  17427. + qm_dqrr_pce, /* reads DQRR_PI_CENA */
  17428. + qm_dqrr_pvb /* reads valid-bit */
  17429. +};
  17430. +enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
  17431. + qm_dqrr_cci = 0, /* CI index, cache-inhibited */
  17432. + qm_dqrr_cce = 1, /* CI index, cache-enabled */
  17433. + qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */
  17434. +};
  17435. +enum qm_mr_pmode { /* s/w-only */
  17436. + qm_mr_pci, /* reads MR_PI_CINH */
  17437. + qm_mr_pce, /* reads MR_PI_CENA */
  17438. + qm_mr_pvb /* reads valid-bit */
  17439. +};
  17440. +enum qm_mr_cmode { /* matches QCSP_CFG::MM */
  17441. + qm_mr_cci = 0, /* CI index, cache-inhibited */
  17442. + qm_mr_cce = 1 /* CI index, cache-enabled */
  17443. +};
  17444. +
  17445. +
  17446. +/* ------------------------- */
  17447. +/* --- Portal structures --- */
  17448. +
  17449. +#define QM_EQCR_SIZE 8
  17450. +#define QM_DQRR_SIZE 16
  17451. +#define QM_MR_SIZE 8
  17452. +
  17453. +struct qm_eqcr {
  17454. + struct qm_eqcr_entry *ring, *cursor;
  17455. + u8 ci, available, ithresh, vbit;
  17456. +#ifdef CONFIG_FSL_DPA_CHECKING
  17457. + u32 busy;
  17458. + enum qm_eqcr_pmode pmode;
  17459. +#endif
  17460. +};
  17461. +
  17462. +struct qm_dqrr {
  17463. + const struct qm_dqrr_entry *ring, *cursor;
  17464. + u8 pi, ci, fill, ithresh, vbit;
  17465. +#ifdef CONFIG_FSL_DPA_CHECKING
  17466. + enum qm_dqrr_dmode dmode;
  17467. + enum qm_dqrr_pmode pmode;
  17468. + enum qm_dqrr_cmode cmode;
  17469. +#endif
  17470. +};
  17471. +
  17472. +struct qm_mr {
  17473. + const struct qm_mr_entry *ring, *cursor;
  17474. + u8 pi, ci, fill, ithresh, vbit;
  17475. +#ifdef CONFIG_FSL_DPA_CHECKING
  17476. + enum qm_mr_pmode pmode;
  17477. + enum qm_mr_cmode cmode;
  17478. +#endif
  17479. +};
  17480. +
  17481. +struct qm_mc {
  17482. + struct qm_mc_command *cr;
  17483. + struct qm_mc_result *rr;
  17484. + u8 rridx, vbit;
  17485. +#ifdef CONFIG_FSL_DPA_CHECKING
  17486. + enum {
  17487. + /* Can be _mc_start()ed */
  17488. + qman_mc_idle,
  17489. + /* Can be _mc_commit()ed or _mc_abort()ed */
  17490. + qman_mc_user,
  17491. + /* Can only be _mc_retry()ed */
  17492. + qman_mc_hw
  17493. + } state;
  17494. +#endif
  17495. +};
  17496. +
  17497. +#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
  17498. +
  17499. +struct qm_addr {
  17500. + void __iomem *addr_ce; /* cache-enabled */
  17501. + void __iomem *addr_ci; /* cache-inhibited */
  17502. +};
  17503. +
  17504. +struct qm_portal {
  17505. + /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
  17506. + * and including 'mc' fits within a cacheline (yay!). The 'config' part
  17507. + * is setup-only, so isn't a cause for a concern. In other words, don't
  17508. + * rearrange this structure on a whim, there be dragons ... */
  17509. + struct qm_addr addr;
  17510. + struct qm_eqcr eqcr;
  17511. + struct qm_dqrr dqrr;
  17512. + struct qm_mr mr;
  17513. + struct qm_mc mc;
  17514. +} QM_PORTAL_ALIGNMENT;
  17515. +
  17516. +
  17517. +/* ---------------- */
  17518. +/* --- EQCR API --- */
  17519. +
  17520. +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
  17521. +#define EQCR_CARRYCLEAR(p) \
  17522. + (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
  17523. +
  17524. +/* Bit-wise logic to convert a ring pointer to a ring index */
  17525. +static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
  17526. +{
  17527. + return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
  17528. +}
  17529. +
  17530. +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
  17531. +static inline void EQCR_INC(struct qm_eqcr *eqcr)
  17532. +{
  17533. + /* NB: this is odd-looking, but experiments show that it generates fast
  17534. + * code with essentially no branching overheads. We increment to the
  17535. + * next EQCR pointer and handle overflow and 'vbit'. */
  17536. + struct qm_eqcr_entry *partial = eqcr->cursor + 1;
  17537. + eqcr->cursor = EQCR_CARRYCLEAR(partial);
  17538. + if (partial != eqcr->cursor)
  17539. + eqcr->vbit ^= QM_EQCR_VERB_VBIT;
  17540. +}
  17541. +
  17542. +static inline int qm_eqcr_init(struct qm_portal *portal,
  17543. + enum qm_eqcr_pmode pmode,
  17544. + unsigned int eq_stash_thresh,
  17545. + int eq_stash_prio)
  17546. +{
  17547. + /* This use of 'register', as well as all other occurrences, is because
  17548. + * it has been observed to generate much faster code with gcc than is
  17549. + * otherwise the case. */
  17550. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17551. + u32 cfg;
  17552. + u8 pi;
  17553. +
  17554. + eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
  17555. + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
  17556. + qm_cl_invalidate(EQCR_CI);
  17557. + pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
  17558. + eqcr->cursor = eqcr->ring + pi;
  17559. + eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
  17560. + QM_EQCR_VERB_VBIT : 0;
  17561. + eqcr->available = QM_EQCR_SIZE - 1 -
  17562. + qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
  17563. + eqcr->ithresh = qm_in(EQCR_ITR);
  17564. +#ifdef CONFIG_FSL_DPA_CHECKING
  17565. + eqcr->busy = 0;
  17566. + eqcr->pmode = pmode;
  17567. +#endif
  17568. + cfg = (qm_in(CFG) & 0x00ffffff) |
  17569. + (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
  17570. + (eq_stash_prio << 26) | /* QCSP_CFG: EP */
  17571. + ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
  17572. + qm_out(CFG, cfg);
  17573. + return 0;
  17574. +}
  17575. +
  17576. +static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
  17577. +{
  17578. + return (qm_in(CFG) >> 28) & 0x7;
  17579. +}
  17580. +
  17581. +static inline void qm_eqcr_finish(struct qm_portal *portal)
  17582. +{
  17583. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17584. + u8 pi, ci;
  17585. + u32 cfg;
  17586. +
  17587. + /*
  17588. + * Disable EQCI stashing because the QMan only
  17589. + * presents the value it previously stashed to
  17590. + * maintain coherency. Setting the stash threshold
  17591. + * to 1 then 0 ensures that QMan has resyncronized
  17592. + * its internal copy so that the portal is clean
  17593. + * when it is reinitialized in the future
  17594. + */
  17595. + cfg = (qm_in(CFG) & 0x0fffffff) |
  17596. + (1 << 28); /* QCSP_CFG: EST */
  17597. + qm_out(CFG, cfg);
  17598. + cfg &= 0x0fffffff; /* stash threshold = 0 */
  17599. + qm_out(CFG, cfg);
  17600. +
  17601. + pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
  17602. + ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
  17603. +
  17604. + /* Refresh EQCR CI cache value */
  17605. + qm_cl_invalidate(EQCR_CI);
  17606. + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
  17607. +
  17608. + DPA_ASSERT(!eqcr->busy);
  17609. + if (pi != EQCR_PTR2IDX(eqcr->cursor))
  17610. + pr_crit("losing uncommited EQCR entries\n");
  17611. + if (ci != eqcr->ci)
  17612. + pr_crit("missing existing EQCR completions\n");
  17613. + if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
  17614. + pr_crit("EQCR destroyed unquiesced\n");
  17615. +}
  17616. +
  17617. +static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
  17618. + *portal)
  17619. +{
  17620. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17621. + DPA_ASSERT(!eqcr->busy);
  17622. + if (!eqcr->available)
  17623. + return NULL;
  17624. +
  17625. +
  17626. +#ifdef CONFIG_FSL_DPA_CHECKING
  17627. + eqcr->busy = 1;
  17628. +#endif
  17629. + dcbz_64(eqcr->cursor);
  17630. + return eqcr->cursor;
  17631. +}
  17632. +
  17633. +static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
  17634. + *portal)
  17635. +{
  17636. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17637. + u8 diff, old_ci;
  17638. +
  17639. + DPA_ASSERT(!eqcr->busy);
  17640. + if (!eqcr->available) {
  17641. + old_ci = eqcr->ci;
  17642. + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
  17643. + diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
  17644. + eqcr->available += diff;
  17645. + if (!diff)
  17646. + return NULL;
  17647. + }
  17648. +#ifdef CONFIG_FSL_DPA_CHECKING
  17649. + eqcr->busy = 1;
  17650. +#endif
  17651. + dcbz_64(eqcr->cursor);
  17652. + return eqcr->cursor;
  17653. +}
  17654. +
  17655. +static inline void qm_eqcr_abort(struct qm_portal *portal)
  17656. +{
  17657. + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
  17658. + DPA_ASSERT(eqcr->busy);
  17659. +#ifdef CONFIG_FSL_DPA_CHECKING
  17660. + eqcr->busy = 0;
  17661. +#endif
  17662. +}
  17663. +
  17664. +static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
  17665. + struct qm_portal *portal, u8 myverb)
  17666. +{
  17667. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17668. + DPA_ASSERT(eqcr->busy);
  17669. + DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
  17670. + if (eqcr->available == 1)
  17671. + return NULL;
  17672. + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
  17673. + dcbf(eqcr->cursor);
  17674. + EQCR_INC(eqcr);
  17675. + eqcr->available--;
  17676. + dcbz_64(eqcr->cursor);
  17677. + return eqcr->cursor;
  17678. +}
  17679. +
  17680. +#define EQCR_COMMIT_CHECKS(eqcr) \
  17681. +do { \
  17682. + DPA_ASSERT(eqcr->busy); \
  17683. + DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
  17684. + DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
  17685. +} while (0)
  17686. +
  17687. +static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
  17688. +{
  17689. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17690. + EQCR_COMMIT_CHECKS(eqcr);
  17691. + DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
  17692. + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
  17693. + EQCR_INC(eqcr);
  17694. + eqcr->available--;
  17695. + dcbf(eqcr->cursor);
  17696. + hwsync();
  17697. + qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
  17698. +#ifdef CONFIG_FSL_DPA_CHECKING
  17699. + eqcr->busy = 0;
  17700. +#endif
  17701. +}
  17702. +
  17703. +static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
  17704. +{
  17705. + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
  17706. + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
  17707. + qm_cl_invalidate(EQCR_PI);
  17708. + qm_cl_touch_rw(EQCR_PI);
  17709. +}
  17710. +
  17711. +static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
  17712. +{
  17713. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17714. + EQCR_COMMIT_CHECKS(eqcr);
  17715. + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
  17716. + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
  17717. + EQCR_INC(eqcr);
  17718. + eqcr->available--;
  17719. + dcbf(eqcr->cursor);
  17720. + lwsync();
  17721. + qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
  17722. +#ifdef CONFIG_FSL_DPA_CHECKING
  17723. + eqcr->busy = 0;
  17724. +#endif
  17725. +}
  17726. +
  17727. +static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
  17728. +{
  17729. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17730. + struct qm_eqcr_entry *eqcursor;
  17731. + EQCR_COMMIT_CHECKS(eqcr);
  17732. + DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
  17733. + lwsync();
  17734. + eqcursor = eqcr->cursor;
  17735. + eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
  17736. + dcbf(eqcursor);
  17737. + EQCR_INC(eqcr);
  17738. + eqcr->available--;
  17739. +#ifdef CONFIG_FSL_DPA_CHECKING
  17740. + eqcr->busy = 0;
  17741. +#endif
  17742. +}
  17743. +
  17744. +static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
  17745. +{
  17746. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17747. + u8 diff, old_ci = eqcr->ci;
  17748. + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
  17749. + diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
  17750. + eqcr->available += diff;
  17751. + return diff;
  17752. +}
  17753. +
  17754. +static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
  17755. +{
  17756. + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
  17757. + qm_cl_touch_ro(EQCR_CI);
  17758. +}
  17759. +
  17760. +static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
  17761. +{
  17762. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17763. + u8 diff, old_ci = eqcr->ci;
  17764. + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
  17765. + qm_cl_invalidate(EQCR_CI);
  17766. + diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
  17767. + eqcr->available += diff;
  17768. + return diff;
  17769. +}
  17770. +
  17771. +static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
  17772. +{
  17773. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17774. + return eqcr->ithresh;
  17775. +}
  17776. +
  17777. +static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
  17778. +{
  17779. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17780. + eqcr->ithresh = ithresh;
  17781. + qm_out(EQCR_ITR, ithresh);
  17782. +}
  17783. +
  17784. +static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
  17785. +{
  17786. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17787. + return eqcr->available;
  17788. +}
  17789. +
  17790. +static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
  17791. +{
  17792. + register struct qm_eqcr *eqcr = &portal->eqcr;
  17793. + return QM_EQCR_SIZE - 1 - eqcr->available;
  17794. +}
  17795. +
  17796. +
  17797. +/* ---------------- */
  17798. +/* --- DQRR API --- */
  17799. +
  17800. +/* FIXME: many possible improvements;
  17801. + * - look at changing the API to use pointer rather than index parameters now
  17802. + * that 'cursor' is a pointer,
  17803. + * - consider moving other parameters to pointer if it could help (ci)
  17804. + */
  17805. +
  17806. +#define DQRR_CARRYCLEAR(p) \
  17807. + (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
  17808. +
  17809. +static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
  17810. +{
  17811. + return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
  17812. +}
  17813. +
  17814. +static inline const struct qm_dqrr_entry *DQRR_INC(
  17815. + const struct qm_dqrr_entry *e)
  17816. +{
  17817. + return DQRR_CARRYCLEAR(e + 1);
  17818. +}
  17819. +
  17820. +static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
  17821. +{
  17822. + qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
  17823. + ((mf & (QM_DQRR_SIZE - 1)) << 20));
  17824. +}
  17825. +
  17826. +static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
  17827. +{
  17828. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17829. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
  17830. + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
  17831. + qm_out(DQRR_CI_CINH, dqrr->ci);
  17832. +}
  17833. +
  17834. +static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
  17835. +{
  17836. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17837. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
  17838. + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
  17839. + qm_cl_out(DQRR_CI, dqrr->ci);
  17840. +}
  17841. +
  17842. +static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
  17843. +{
  17844. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  17845. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
  17846. + qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
  17847. + ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
  17848. + dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
  17849. + dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
  17850. +}
  17851. +
  17852. +static inline int qm_dqrr_init(struct qm_portal *portal,
  17853. + const struct qm_portal_config *config,
  17854. + enum qm_dqrr_dmode dmode,
  17855. + __maybe_unused enum qm_dqrr_pmode pmode,
  17856. + enum qm_dqrr_cmode cmode, u8 max_fill)
  17857. +{
  17858. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17859. + u32 cfg;
  17860. +
  17861. + /* Make sure the DQRR will be idle when we enable */
  17862. + qm_out(DQRR_SDQCR, 0);
  17863. + qm_out(DQRR_VDQCR, 0);
  17864. + qm_out(DQRR_PDQCR, 0);
  17865. + dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
  17866. + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
  17867. + dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
  17868. + dqrr->cursor = dqrr->ring + dqrr->ci;
  17869. + dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
  17870. + dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
  17871. + QM_DQRR_VERB_VBIT : 0;
  17872. + dqrr->ithresh = qm_in(DQRR_ITR);
  17873. +
  17874. + /* Free up pending DQRR entries if any as per current DCM */
  17875. + if (dqrr->fill) {
  17876. + enum qm_dqrr_cmode dcm = (qm_in(CFG) >> 16) & 3;
  17877. +
  17878. +#ifdef CONFIG_FSL_DPA_CHECKING
  17879. + dqrr->cmode = dcm;
  17880. +#endif
  17881. + switch (dcm) {
  17882. + case qm_dqrr_cci:
  17883. + qm_dqrr_cci_consume(portal, dqrr->fill);
  17884. + break;
  17885. + case qm_dqrr_cce:
  17886. + qm_dqrr_cce_consume(portal, dqrr->fill);
  17887. + break;
  17888. + case qm_dqrr_cdc:
  17889. + qm_dqrr_cdc_consume_n(portal, (QM_DQRR_SIZE - 1));
  17890. + break;
  17891. + default:
  17892. + DPA_ASSERT(0);
  17893. + }
  17894. + }
  17895. +
  17896. +#ifdef CONFIG_FSL_DPA_CHECKING
  17897. + dqrr->dmode = dmode;
  17898. + dqrr->pmode = pmode;
  17899. + dqrr->cmode = cmode;
  17900. +#endif
  17901. + /* Invalidate every ring entry before beginning */
  17902. + for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
  17903. + dcbi(qm_cl(dqrr->ring, cfg));
  17904. + cfg = (qm_in(CFG) & 0xff000f00) |
  17905. + ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
  17906. + ((dmode & 1) << 18) | /* DP */
  17907. + ((cmode & 3) << 16) | /* DCM */
  17908. + 0xa0 | /* RE+SE */
  17909. + (0 ? 0x40 : 0) | /* Ignore RP */
  17910. + (0 ? 0x10 : 0); /* Ignore SP */
  17911. + qm_out(CFG, cfg);
  17912. + qm_dqrr_set_maxfill(portal, max_fill);
  17913. + return 0;
  17914. +}
  17915. +
  17916. +static inline void qm_dqrr_finish(struct qm_portal *portal)
  17917. +{
  17918. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  17919. +#ifdef CONFIG_FSL_DPA_CHECKING
  17920. + if ((dqrr->cmode != qm_dqrr_cdc) &&
  17921. + (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
  17922. + pr_crit("Ignoring completed DQRR entries\n");
  17923. +#endif
  17924. +}
  17925. +
  17926. +static inline const struct qm_dqrr_entry *qm_dqrr_current(
  17927. + struct qm_portal *portal)
  17928. +{
  17929. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17930. + if (!dqrr->fill)
  17931. + return NULL;
  17932. + return dqrr->cursor;
  17933. +}
  17934. +
  17935. +static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
  17936. +{
  17937. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17938. + return DQRR_PTR2IDX(dqrr->cursor);
  17939. +}
  17940. +
  17941. +static inline u8 qm_dqrr_next(struct qm_portal *portal)
  17942. +{
  17943. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17944. + DPA_ASSERT(dqrr->fill);
  17945. + dqrr->cursor = DQRR_INC(dqrr->cursor);
  17946. + return --dqrr->fill;
  17947. +}
  17948. +
  17949. +static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
  17950. +{
  17951. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17952. + u8 diff, old_pi = dqrr->pi;
  17953. + DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
  17954. + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
  17955. + diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
  17956. + dqrr->fill += diff;
  17957. + return diff;
  17958. +}
  17959. +
  17960. +static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
  17961. +{
  17962. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  17963. + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
  17964. + qm_cl_invalidate(DQRR_PI);
  17965. + qm_cl_touch_ro(DQRR_PI);
  17966. +}
  17967. +
  17968. +static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
  17969. +{
  17970. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17971. + u8 diff, old_pi = dqrr->pi;
  17972. + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
  17973. + dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
  17974. + diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
  17975. + dqrr->fill += diff;
  17976. + return diff;
  17977. +}
  17978. +
  17979. +static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
  17980. +{
  17981. + register struct qm_dqrr *dqrr = &portal->dqrr;
  17982. + const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
  17983. + DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
  17984. +#ifndef CONFIG_FSL_PAMU
  17985. + /*
  17986. + * If PAMU is not available we need to invalidate the cache.
  17987. + * When PAMU is available the cache is updated by stash
  17988. + */
  17989. + dcbi(res);
  17990. + dcbt_ro(res);
  17991. +#endif
  17992. +
  17993. + /* when accessing 'verb', use __raw_readb() to ensure that compiler
  17994. + * inlining doesn't try to optimise out "excess reads". */
  17995. + if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
  17996. + dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
  17997. + if (!dqrr->pi)
  17998. + dqrr->vbit ^= QM_DQRR_VERB_VBIT;
  17999. + dqrr->fill++;
  18000. + }
  18001. +}
  18002. +
  18003. +
  18004. +static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
  18005. +{
  18006. + register struct qm_dqrr *dqrr = &portal->dqrr;
  18007. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
  18008. + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
  18009. + qm_out(DQRR_CI_CINH, dqrr->ci);
  18010. +}
  18011. +
  18012. +static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
  18013. +{
  18014. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  18015. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
  18016. + qm_cl_invalidate(DQRR_CI);
  18017. + qm_cl_touch_rw(DQRR_CI);
  18018. +}
  18019. +
  18020. +static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
  18021. +{
  18022. + register struct qm_dqrr *dqrr = &portal->dqrr;
  18023. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
  18024. + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
  18025. + qm_cl_out(DQRR_CI, dqrr->ci);
  18026. +}
  18027. +
  18028. +static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
  18029. + int park)
  18030. +{
  18031. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  18032. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
  18033. + DPA_ASSERT(idx < QM_DQRR_SIZE);
  18034. + qm_out(DQRR_DCAP, (0 << 8) | /* S */
  18035. + ((park ? 1 : 0) << 6) | /* PK */
  18036. + idx); /* DCAP_CI */
  18037. +}
  18038. +
  18039. +static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
  18040. + const struct qm_dqrr_entry *dq,
  18041. + int park)
  18042. +{
  18043. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  18044. + u8 idx = DQRR_PTR2IDX(dq);
  18045. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
  18046. + DPA_ASSERT((dqrr->ring + idx) == dq);
  18047. + DPA_ASSERT(idx < QM_DQRR_SIZE);
  18048. + qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
  18049. + ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
  18050. + idx); /* DQRR_DCAP::DCAP_CI */
  18051. +}
  18052. +
  18053. +static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
  18054. +{
  18055. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  18056. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
  18057. + return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
  18058. +}
  18059. +
  18060. +static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
  18061. +{
  18062. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  18063. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
  18064. + qm_cl_invalidate(DQRR_CI);
  18065. + qm_cl_touch_ro(DQRR_CI);
  18066. +}
  18067. +
  18068. +static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
  18069. +{
  18070. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  18071. + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
  18072. + return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
  18073. +}
  18074. +
  18075. +static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
  18076. +{
  18077. + register struct qm_dqrr *dqrr = &portal->dqrr;
  18078. + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
  18079. + return dqrr->ci;
  18080. +}
  18081. +
  18082. +static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
  18083. +{
  18084. + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
  18085. + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
  18086. + qm_out(DQRR_DCAP, (0 << 8) | /* S */
  18087. + (1 << 6) | /* PK */
  18088. + (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
  18089. +}
  18090. +
  18091. +static inline void qm_dqrr_park_current(struct qm_portal *portal)
  18092. +{
  18093. + register struct qm_dqrr *dqrr = &portal->dqrr;
  18094. + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
  18095. + qm_out(DQRR_DCAP, (0 << 8) | /* S */
  18096. + (1 << 6) | /* PK */
  18097. + DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
  18098. +}
  18099. +
  18100. +static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
  18101. +{
  18102. + qm_out(DQRR_SDQCR, sdqcr);
  18103. +}
  18104. +
  18105. +static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
  18106. +{
  18107. + return qm_in(DQRR_SDQCR);
  18108. +}
  18109. +
  18110. +static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
  18111. +{
  18112. + qm_out(DQRR_VDQCR, vdqcr);
  18113. +}
  18114. +
  18115. +static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
  18116. +{
  18117. + return qm_in(DQRR_VDQCR);
  18118. +}
  18119. +
  18120. +static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
  18121. +{
  18122. + qm_out(DQRR_PDQCR, pdqcr);
  18123. +}
  18124. +
  18125. +static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
  18126. +{
  18127. + return qm_in(DQRR_PDQCR);
  18128. +}
  18129. +
  18130. +static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
  18131. +{
  18132. + register struct qm_dqrr *dqrr = &portal->dqrr;
  18133. + return dqrr->ithresh;
  18134. +}
  18135. +
  18136. +static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
  18137. +{
  18138. + qm_out(DQRR_ITR, ithresh);
  18139. +}
  18140. +
  18141. +static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
  18142. +{
  18143. + return (qm_in(CFG) & 0x00f00000) >> 20;
  18144. +}
  18145. +
  18146. +
  18147. +/* -------------- */
  18148. +/* --- MR API --- */
  18149. +
  18150. +#define MR_CARRYCLEAR(p) \
  18151. + (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
  18152. +
  18153. +static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
  18154. +{
  18155. + return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
  18156. +}
  18157. +
  18158. +static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
  18159. +{
  18160. + return MR_CARRYCLEAR(e + 1);
  18161. +}
  18162. +
  18163. +static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
  18164. + enum qm_mr_cmode cmode)
  18165. +{
  18166. + register struct qm_mr *mr = &portal->mr;
  18167. + u32 cfg;
  18168. +
  18169. + mr->ring = portal->addr.addr_ce + QM_CL_MR;
  18170. + mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
  18171. + mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
  18172. + mr->cursor = mr->ring + mr->ci;
  18173. + mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
  18174. + mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
  18175. + mr->ithresh = qm_in(MR_ITR);
  18176. +#ifdef CONFIG_FSL_DPA_CHECKING
  18177. + mr->pmode = pmode;
  18178. + mr->cmode = cmode;
  18179. +#endif
  18180. + cfg = (qm_in(CFG) & 0xfffff0ff) |
  18181. + ((cmode & 1) << 8); /* QCSP_CFG:MM */
  18182. + qm_out(CFG, cfg);
  18183. + return 0;
  18184. +}
  18185. +
  18186. +static inline void qm_mr_finish(struct qm_portal *portal)
  18187. +{
  18188. + register struct qm_mr *mr = &portal->mr;
  18189. + if (mr->ci != MR_PTR2IDX(mr->cursor))
  18190. + pr_crit("Ignoring completed MR entries\n");
  18191. +}
  18192. +
  18193. +static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
  18194. +{
  18195. + register struct qm_mr *mr = &portal->mr;
  18196. + if (!mr->fill)
  18197. + return NULL;
  18198. + return mr->cursor;
  18199. +}
  18200. +
  18201. +static inline u8 qm_mr_cursor(struct qm_portal *portal)
  18202. +{
  18203. + register struct qm_mr *mr = &portal->mr;
  18204. + return MR_PTR2IDX(mr->cursor);
  18205. +}
  18206. +
  18207. +static inline u8 qm_mr_next(struct qm_portal *portal)
  18208. +{
  18209. + register struct qm_mr *mr = &portal->mr;
  18210. + DPA_ASSERT(mr->fill);
  18211. + mr->cursor = MR_INC(mr->cursor);
  18212. + return --mr->fill;
  18213. +}
  18214. +
  18215. +static inline u8 qm_mr_pci_update(struct qm_portal *portal)
  18216. +{
  18217. + register struct qm_mr *mr = &portal->mr;
  18218. + u8 diff, old_pi = mr->pi;
  18219. + DPA_ASSERT(mr->pmode == qm_mr_pci);
  18220. + mr->pi = qm_in(MR_PI_CINH);
  18221. + diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
  18222. + mr->fill += diff;
  18223. + return diff;
  18224. +}
  18225. +
  18226. +static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
  18227. +{
  18228. + __maybe_unused register struct qm_mr *mr = &portal->mr;
  18229. + DPA_ASSERT(mr->pmode == qm_mr_pce);
  18230. + qm_cl_invalidate(MR_PI);
  18231. + qm_cl_touch_ro(MR_PI);
  18232. +}
  18233. +
  18234. +static inline u8 qm_mr_pce_update(struct qm_portal *portal)
  18235. +{
  18236. + register struct qm_mr *mr = &portal->mr;
  18237. + u8 diff, old_pi = mr->pi;
  18238. + DPA_ASSERT(mr->pmode == qm_mr_pce);
  18239. + mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
  18240. + diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
  18241. + mr->fill += diff;
  18242. + return diff;
  18243. +}
  18244. +
  18245. +static inline void qm_mr_pvb_update(struct qm_portal *portal)
  18246. +{
  18247. + register struct qm_mr *mr = &portal->mr;
  18248. + const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
  18249. + DPA_ASSERT(mr->pmode == qm_mr_pvb);
  18250. + /* when accessing 'verb', use __raw_readb() to ensure that compiler
  18251. + * inlining doesn't try to optimise out "excess reads". */
  18252. + if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
  18253. + mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
  18254. + if (!mr->pi)
  18255. + mr->vbit ^= QM_MR_VERB_VBIT;
  18256. + mr->fill++;
  18257. + res = MR_INC(res);
  18258. + }
  18259. + dcbit_ro(res);
  18260. +}
  18261. +
  18262. +static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
  18263. +{
  18264. + register struct qm_mr *mr = &portal->mr;
  18265. + DPA_ASSERT(mr->cmode == qm_mr_cci);
  18266. + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
  18267. + qm_out(MR_CI_CINH, mr->ci);
  18268. +}
  18269. +
  18270. +static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
  18271. +{
  18272. + register struct qm_mr *mr = &portal->mr;
  18273. + DPA_ASSERT(mr->cmode == qm_mr_cci);
  18274. + mr->ci = MR_PTR2IDX(mr->cursor);
  18275. + qm_out(MR_CI_CINH, mr->ci);
  18276. +}
  18277. +
  18278. +static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
  18279. +{
  18280. + __maybe_unused register struct qm_mr *mr = &portal->mr;
  18281. + DPA_ASSERT(mr->cmode == qm_mr_cce);
  18282. + qm_cl_invalidate(MR_CI);
  18283. + qm_cl_touch_rw(MR_CI);
  18284. +}
  18285. +
  18286. +static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
  18287. +{
  18288. + register struct qm_mr *mr = &portal->mr;
  18289. + DPA_ASSERT(mr->cmode == qm_mr_cce);
  18290. + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
  18291. + qm_cl_out(MR_CI, mr->ci);
  18292. +}
  18293. +
  18294. +static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
  18295. +{
  18296. + register struct qm_mr *mr = &portal->mr;
  18297. + DPA_ASSERT(mr->cmode == qm_mr_cce);
  18298. + mr->ci = MR_PTR2IDX(mr->cursor);
  18299. + qm_cl_out(MR_CI, mr->ci);
  18300. +}
  18301. +
  18302. +static inline u8 qm_mr_get_ci(struct qm_portal *portal)
  18303. +{
  18304. + register struct qm_mr *mr = &portal->mr;
  18305. + return mr->ci;
  18306. +}
  18307. +
  18308. +static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
  18309. +{
  18310. + register struct qm_mr *mr = &portal->mr;
  18311. + return mr->ithresh;
  18312. +}
  18313. +
  18314. +static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
  18315. +{
  18316. + qm_out(MR_ITR, ithresh);
  18317. +}
  18318. +
  18319. +
  18320. +/* ------------------------------ */
  18321. +/* --- Management command API --- */
  18322. +
  18323. +static inline int qm_mc_init(struct qm_portal *portal)
  18324. +{
  18325. + register struct qm_mc *mc = &portal->mc;
  18326. + mc->cr = portal->addr.addr_ce + QM_CL_CR;
  18327. + mc->rr = portal->addr.addr_ce + QM_CL_RR0;
  18328. + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
  18329. + QM_MCC_VERB_VBIT) ? 0 : 1;
  18330. + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
  18331. +#ifdef CONFIG_FSL_DPA_CHECKING
  18332. + mc->state = qman_mc_idle;
  18333. +#endif
  18334. + return 0;
  18335. +}
  18336. +
  18337. +static inline void qm_mc_finish(struct qm_portal *portal)
  18338. +{
  18339. + __maybe_unused register struct qm_mc *mc = &portal->mc;
  18340. + DPA_ASSERT(mc->state == qman_mc_idle);
  18341. +#ifdef CONFIG_FSL_DPA_CHECKING
  18342. + if (mc->state != qman_mc_idle)
  18343. + pr_crit("Losing incomplete MC command\n");
  18344. +#endif
  18345. +}
  18346. +
  18347. +static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
  18348. +{
  18349. + register struct qm_mc *mc = &portal->mc;
  18350. + DPA_ASSERT(mc->state == qman_mc_idle);
  18351. +#ifdef CONFIG_FSL_DPA_CHECKING
  18352. + mc->state = qman_mc_user;
  18353. +#endif
  18354. + dcbz_64(mc->cr);
  18355. + return mc->cr;
  18356. +}
  18357. +
  18358. +static inline void qm_mc_abort(struct qm_portal *portal)
  18359. +{
  18360. + __maybe_unused register struct qm_mc *mc = &portal->mc;
  18361. + DPA_ASSERT(mc->state == qman_mc_user);
  18362. +#ifdef CONFIG_FSL_DPA_CHECKING
  18363. + mc->state = qman_mc_idle;
  18364. +#endif
  18365. +}
  18366. +
  18367. +static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
  18368. +{
  18369. + register struct qm_mc *mc = &portal->mc;
  18370. + struct qm_mc_result *rr = mc->rr + mc->rridx;
  18371. + DPA_ASSERT(mc->state == qman_mc_user);
  18372. + lwsync();
  18373. + mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
  18374. + dcbf(mc->cr);
  18375. + dcbit_ro(rr);
  18376. +#ifdef CONFIG_FSL_DPA_CHECKING
  18377. + mc->state = qman_mc_hw;
  18378. +#endif
  18379. +}
  18380. +
  18381. +static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
  18382. +{
  18383. + register struct qm_mc *mc = &portal->mc;
  18384. + struct qm_mc_result *rr = mc->rr + mc->rridx;
  18385. + DPA_ASSERT(mc->state == qman_mc_hw);
  18386. + /* The inactive response register's verb byte always returns zero until
  18387. + * its command is submitted and completed. This includes the valid-bit,
  18388. + * in case you were wondering... */
  18389. + if (!__raw_readb(&rr->verb)) {
  18390. + dcbit_ro(rr);
  18391. + return NULL;
  18392. + }
  18393. + mc->rridx ^= 1;
  18394. + mc->vbit ^= QM_MCC_VERB_VBIT;
  18395. +#ifdef CONFIG_FSL_DPA_CHECKING
  18396. + mc->state = qman_mc_idle;
  18397. +#endif
  18398. + return rr;
  18399. +}
  18400. +
  18401. +
  18402. +/* ------------------------------------- */
  18403. +/* --- Portal interrupt register API --- */
  18404. +
  18405. +static inline int qm_isr_init(__always_unused struct qm_portal *portal)
  18406. +{
  18407. + return 0;
  18408. +}
  18409. +
  18410. +static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
  18411. +{
  18412. +}
  18413. +
  18414. +static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
  18415. +{
  18416. + qm_out(ITPR, iperiod);
  18417. +}
  18418. +
  18419. +static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
  18420. +{
  18421. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  18422. + return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
  18423. +#else
  18424. + return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
  18425. +#endif
  18426. +}
  18427. +
  18428. +static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
  18429. + u32 val)
  18430. +{
  18431. +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  18432. + __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
  18433. +#else
  18434. + __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
  18435. +#endif
  18436. +}
  18437. +
  18438. +/* Cleanup FQs */
  18439. +static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
  18440. + u32 fqid)
  18441. +{
  18442. +
  18443. + struct qm_mc_command *mcc;
  18444. + struct qm_mc_result *mcr;
  18445. + u8 state;
  18446. + int orl_empty, fq_empty, i, drain = 0;
  18447. + u32 result;
  18448. + u32 channel, wq;
  18449. + u16 dest_wq;
  18450. +
  18451. + /* Determine the state of the FQID */
  18452. + mcc = qm_mc_start(portal[0]);
  18453. + mcc->queryfq_np.fqid = cpu_to_be32(fqid);
  18454. + qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
  18455. + while (!(mcr = qm_mc_result(portal[0])))
  18456. + cpu_relax();
  18457. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
  18458. + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
  18459. + if (state == QM_MCR_NP_STATE_OOS)
  18460. + return 0; /* Already OOS, no need to do anymore checks */
  18461. +
  18462. + /* Query which channel the FQ is using */
  18463. + mcc = qm_mc_start(portal[0]);
  18464. + mcc->queryfq.fqid = cpu_to_be32(fqid);
  18465. + qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
  18466. + while (!(mcr = qm_mc_result(portal[0])))
  18467. + cpu_relax();
  18468. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
  18469. +
  18470. + /* Need to store these since the MCR gets reused */
  18471. + dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
  18472. + wq = dest_wq & 0x7;
  18473. + channel = dest_wq>>3;
  18474. +
  18475. + switch (state) {
  18476. + case QM_MCR_NP_STATE_TEN_SCHED:
  18477. + case QM_MCR_NP_STATE_TRU_SCHED:
  18478. + case QM_MCR_NP_STATE_ACTIVE:
  18479. + case QM_MCR_NP_STATE_PARKED:
  18480. + orl_empty = 0;
  18481. + mcc = qm_mc_start(portal[0]);
  18482. + mcc->alterfq.fqid = cpu_to_be32(fqid);
  18483. + qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
  18484. + while (!(mcr = qm_mc_result(portal[0])))
  18485. + cpu_relax();
  18486. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  18487. + QM_MCR_VERB_ALTER_RETIRE);
  18488. + result = mcr->result; /* Make a copy as we reuse MCR below */
  18489. +
  18490. + if (result == QM_MCR_RESULT_PENDING) {
  18491. + /* Need to wait for the FQRN in the message ring, which
  18492. + will only occur once the FQ has been drained. In
  18493. + order for the FQ to drain the portal needs to be set
  18494. + to dequeue from the channel the FQ is scheduled on */
  18495. + const struct qm_mr_entry *msg;
  18496. + const struct qm_dqrr_entry *dqrr = NULL;
  18497. + int found_fqrn = 0;
  18498. + u16 dequeue_wq = 0;
  18499. +
  18500. + /* Flag that we need to drain FQ */
  18501. + drain = 1;
  18502. +
  18503. + if (channel >= qm_channel_pool1 &&
  18504. + channel < (qm_channel_pool1 + 15)) {
  18505. + /* Pool channel, enable the bit in the portal */
  18506. + dequeue_wq = (channel -
  18507. + qm_channel_pool1 + 1)<<4 | wq;
  18508. + } else if (channel < qm_channel_pool1) {
  18509. + /* Dedicated channel */
  18510. + dequeue_wq = wq;
  18511. + } else {
  18512. + pr_info("Cannot recover FQ 0x%x, it is "
  18513. + "scheduled on channel 0x%x",
  18514. + fqid, channel);
  18515. + return -EBUSY;
  18516. + }
  18517. + /* Set the sdqcr to drain this channel */
  18518. + if (channel < qm_channel_pool1)
  18519. + for (i = 0; i < portal_count; i++)
  18520. + qm_dqrr_sdqcr_set(portal[i],
  18521. + QM_SDQCR_TYPE_ACTIVE |
  18522. + QM_SDQCR_CHANNELS_DEDICATED);
  18523. + else
  18524. + for (i = 0; i < portal_count; i++)
  18525. + qm_dqrr_sdqcr_set(
  18526. + portal[i],
  18527. + QM_SDQCR_TYPE_ACTIVE |
  18528. + QM_SDQCR_CHANNELS_POOL_CONV
  18529. + (channel));
  18530. + while (!found_fqrn) {
  18531. + /* Keep draining DQRR while checking the MR*/
  18532. + for (i = 0; i < portal_count; i++) {
  18533. + qm_dqrr_pvb_update(portal[i]);
  18534. + dqrr = qm_dqrr_current(portal[i]);
  18535. + while (dqrr) {
  18536. + qm_dqrr_cdc_consume_1ptr(
  18537. + portal[i], dqrr, 0);
  18538. + qm_dqrr_pvb_update(portal[i]);
  18539. + qm_dqrr_next(portal[i]);
  18540. + dqrr = qm_dqrr_current(
  18541. + portal[i]);
  18542. + }
  18543. + /* Process message ring too */
  18544. + qm_mr_pvb_update(portal[i]);
  18545. + msg = qm_mr_current(portal[i]);
  18546. + while (msg) {
  18547. + if ((msg->verb &
  18548. + QM_MR_VERB_TYPE_MASK)
  18549. + == QM_MR_VERB_FQRN)
  18550. + found_fqrn = 1;
  18551. + qm_mr_next(portal[i]);
  18552. + qm_mr_cci_consume_to_current(
  18553. + portal[i]);
  18554. + qm_mr_pvb_update(portal[i]);
  18555. + msg = qm_mr_current(portal[i]);
  18556. + }
  18557. + cpu_relax();
  18558. + }
  18559. + }
  18560. + }
  18561. + if (result != QM_MCR_RESULT_OK &&
  18562. + result != QM_MCR_RESULT_PENDING) {
  18563. + /* error */
  18564. + pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
  18565. + fqid, result);
  18566. + return -1;
  18567. + }
  18568. + if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
  18569. + /* ORL had no entries, no need to wait until the
  18570. + ERNs come in */
  18571. + orl_empty = 1;
  18572. + }
  18573. + /* Retirement succeeded, check to see if FQ needs
  18574. + to be drained */
  18575. + if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
  18576. + /* FQ is Not Empty, drain using volatile DQ commands */
  18577. + fq_empty = 0;
  18578. + do {
  18579. + const struct qm_dqrr_entry *dqrr = NULL;
  18580. + u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
  18581. + qm_dqrr_vdqcr_set(portal[0], vdqcr);
  18582. +
  18583. + /* Wait for a dequeue to occur */
  18584. + while (dqrr == NULL) {
  18585. + qm_dqrr_pvb_update(portal[0]);
  18586. + dqrr = qm_dqrr_current(portal[0]);
  18587. + if (!dqrr)
  18588. + cpu_relax();
  18589. + }
  18590. + /* Process the dequeues, making sure to
  18591. + empty the ring completely */
  18592. + while (dqrr) {
  18593. + if (be32_to_cpu(dqrr->fqid) == fqid &&
  18594. + dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
  18595. + fq_empty = 1;
  18596. + qm_dqrr_cdc_consume_1ptr(portal[0],
  18597. + dqrr, 0);
  18598. + qm_dqrr_pvb_update(portal[0]);
  18599. + qm_dqrr_next(portal[0]);
  18600. + dqrr = qm_dqrr_current(portal[0]);
  18601. + }
  18602. + } while (fq_empty == 0);
  18603. + }
  18604. + for (i = 0; i < portal_count; i++)
  18605. + qm_dqrr_sdqcr_set(portal[i], 0);
  18606. +
  18607. + /* Wait for the ORL to have been completely drained */
  18608. + while (orl_empty == 0) {
  18609. + const struct qm_mr_entry *msg;
  18610. + qm_mr_pvb_update(portal[0]);
  18611. + msg = qm_mr_current(portal[0]);
  18612. + while (msg) {
  18613. + if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
  18614. + QM_MR_VERB_FQRL)
  18615. + orl_empty = 1;
  18616. + qm_mr_next(portal[0]);
  18617. + qm_mr_cci_consume_to_current(portal[0]);
  18618. + qm_mr_pvb_update(portal[0]);
  18619. + msg = qm_mr_current(portal[0]);
  18620. + }
  18621. + cpu_relax();
  18622. + }
  18623. + mcc = qm_mc_start(portal[0]);
  18624. + mcc->alterfq.fqid = cpu_to_be32(fqid);
  18625. + qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
  18626. + while (!(mcr = qm_mc_result(portal[0])))
  18627. + cpu_relax();
  18628. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  18629. + QM_MCR_VERB_ALTER_OOS);
  18630. + if (mcr->result != QM_MCR_RESULT_OK) {
  18631. + pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
  18632. + fqid, mcr->result);
  18633. + return -1;
  18634. + }
  18635. + return 0;
  18636. + case QM_MCR_NP_STATE_RETIRED:
  18637. + /* Send OOS Command */
  18638. + mcc = qm_mc_start(portal[0]);
  18639. + mcc->alterfq.fqid = cpu_to_be32(fqid);
  18640. + qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
  18641. + while (!(mcr = qm_mc_result(portal[0])))
  18642. + cpu_relax();
  18643. + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
  18644. + QM_MCR_VERB_ALTER_OOS);
  18645. + if (mcr->result) {
  18646. + pr_err("OOS Failed on FQID 0x%x\n", fqid);
  18647. + return -1;
  18648. + }
  18649. + return 0;
  18650. + }
  18651. + return -1;
  18652. +}
  18653. --- /dev/null
  18654. +++ b/drivers/staging/fsl_qbman/qman_private.h
  18655. @@ -0,0 +1,398 @@
  18656. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  18657. + *
  18658. + * Redistribution and use in source and binary forms, with or without
  18659. + * modification, are permitted provided that the following conditions are met:
  18660. + * * Redistributions of source code must retain the above copyright
  18661. + * notice, this list of conditions and the following disclaimer.
  18662. + * * Redistributions in binary form must reproduce the above copyright
  18663. + * notice, this list of conditions and the following disclaimer in the
  18664. + * documentation and/or other materials provided with the distribution.
  18665. + * * Neither the name of Freescale Semiconductor nor the
  18666. + * names of its contributors may be used to endorse or promote products
  18667. + * derived from this software without specific prior written permission.
  18668. + *
  18669. + *
  18670. + * ALTERNATIVELY, this software may be distributed under the terms of the
  18671. + * GNU General Public License ("GPL") as published by the Free Software
  18672. + * Foundation, either version 2 of that License or (at your option) any
  18673. + * later version.
  18674. + *
  18675. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  18676. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  18677. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18678. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  18679. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  18680. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  18681. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  18682. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  18683. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  18684. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  18685. + */
  18686. +
  18687. +#include "dpa_sys.h"
  18688. +#include <linux/fsl_qman.h>
  18689. +#include <linux/iommu.h>
  18690. +
  18691. +#if defined(CONFIG_FSL_PAMU)
  18692. +#include <asm/fsl_pamu_stash.h>
  18693. +#endif
  18694. +
  18695. +#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64)
  18696. +#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP"
  18697. +#endif
  18698. +
  18699. +#define QBMAN_ANY_PORTAL_IDX 0xffffffff
  18700. + /* ----------------- */
  18701. + /* Congestion Groups */
  18702. + /* ----------------- */
  18703. +/* This wrapper represents a bit-array for the state of the 256 Qman congestion
  18704. + * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
  18705. + * those that don't concern us. We harness the structure and accessor details
  18706. + * already used in the management command to query congestion groups. */
  18707. +struct qman_cgrs {
  18708. + struct __qm_mcr_querycongestion q;
  18709. +};
  18710. +static inline void qman_cgrs_init(struct qman_cgrs *c)
  18711. +{
  18712. + memset(c, 0, sizeof(*c));
  18713. +}
  18714. +static inline void qman_cgrs_fill(struct qman_cgrs *c)
  18715. +{
  18716. + memset(c, 0xff, sizeof(*c));
  18717. +}
  18718. +static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
  18719. +{
  18720. + return QM_MCR_QUERYCONGESTION(&c->q, num);
  18721. +}
  18722. +static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
  18723. +{
  18724. + c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
  18725. +}
  18726. +static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
  18727. +{
  18728. + c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
  18729. +}
  18730. +static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
  18731. +{
  18732. + while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
  18733. + ;
  18734. + return num;
  18735. +}
  18736. +static inline void qman_cgrs_cp(struct qman_cgrs *dest,
  18737. + const struct qman_cgrs *src)
  18738. +{
  18739. + *dest = *src;
  18740. +}
  18741. +static inline void qman_cgrs_and(struct qman_cgrs *dest,
  18742. + const struct qman_cgrs *a, const struct qman_cgrs *b)
  18743. +{
  18744. + int ret;
  18745. + u32 *_d = dest->q.__state;
  18746. + const u32 *_a = a->q.__state;
  18747. + const u32 *_b = b->q.__state;
  18748. + for (ret = 0; ret < 8; ret++)
  18749. + *(_d++) = *(_a++) & *(_b++);
  18750. +}
  18751. +static inline void qman_cgrs_xor(struct qman_cgrs *dest,
  18752. + const struct qman_cgrs *a, const struct qman_cgrs *b)
  18753. +{
  18754. + int ret;
  18755. + u32 *_d = dest->q.__state;
  18756. + const u32 *_a = a->q.__state;
  18757. + const u32 *_b = b->q.__state;
  18758. + for (ret = 0; ret < 8; ret++)
  18759. + *(_d++) = *(_a++) ^ *(_b++);
  18760. +}
  18761. +
  18762. + /* ----------------------- */
  18763. + /* CEETM Congestion Groups */
  18764. + /* ----------------------- */
  18765. +/* This wrapper represents a bit-array for the state of the 512 Qman CEETM
  18766. + * congestion groups.
  18767. + */
  18768. +struct qman_ccgrs {
  18769. + struct __qm_mcr_querycongestion q[2];
  18770. +};
  18771. +static inline void qman_ccgrs_init(struct qman_ccgrs *c)
  18772. +{
  18773. + memset(c, 0, sizeof(*c));
  18774. +}
  18775. +static inline void qman_ccgrs_fill(struct qman_ccgrs *c)
  18776. +{
  18777. + memset(c, 0xff, sizeof(*c));
  18778. +}
  18779. +static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num)
  18780. +{
  18781. + if (num < __CGR_NUM)
  18782. + return QM_MCR_QUERYCONGESTION(&c->q[0], num);
  18783. + else
  18784. + return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM));
  18785. +}
  18786. +static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num)
  18787. +{
  18788. + while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num))
  18789. + ;
  18790. + return num;
  18791. +}
  18792. +static inline void qman_ccgrs_cp(struct qman_ccgrs *dest,
  18793. + const struct qman_ccgrs *src)
  18794. +{
  18795. + *dest = *src;
  18796. +}
  18797. +static inline void qman_ccgrs_and(struct qman_ccgrs *dest,
  18798. + const struct qman_ccgrs *a, const struct qman_ccgrs *b)
  18799. +{
  18800. + int ret, i;
  18801. + u32 *_d;
  18802. + const u32 *_a, *_b;
  18803. + for (i = 0; i < 2; i++) {
  18804. + _d = dest->q[i].__state;
  18805. + _a = a->q[i].__state;
  18806. + _b = b->q[i].__state;
  18807. + for (ret = 0; ret < 8; ret++)
  18808. + *(_d++) = *(_a++) & *(_b++);
  18809. + }
  18810. +}
  18811. +static inline void qman_ccgrs_xor(struct qman_ccgrs *dest,
  18812. + const struct qman_ccgrs *a, const struct qman_ccgrs *b)
  18813. +{
  18814. + int ret, i;
  18815. + u32 *_d;
  18816. + const u32 *_a, *_b;
  18817. + for (i = 0; i < 2; i++) {
  18818. + _d = dest->q[i].__state;
  18819. + _a = a->q[i].__state;
  18820. + _b = b->q[i].__state;
  18821. + for (ret = 0; ret < 8; ret++)
  18822. + *(_d++) = *(_a++) ^ *(_b++);
  18823. + }
  18824. +}
  18825. +
  18826. +/* used by CCSR and portal interrupt code */
  18827. +enum qm_isr_reg {
  18828. + qm_isr_status = 0,
  18829. + qm_isr_enable = 1,
  18830. + qm_isr_disable = 2,
  18831. + qm_isr_inhibit = 3
  18832. +};
  18833. +
  18834. +struct qm_portal_config {
  18835. + /* Corenet portal addresses;
  18836. + * [0]==cache-enabled, [1]==cache-inhibited. */
  18837. + __iomem void *addr_virt[2];
  18838. + struct resource addr_phys[2];
  18839. + struct device dev;
  18840. + struct iommu_domain *iommu_domain;
  18841. + /* Allow these to be joined in lists */
  18842. + struct list_head list;
  18843. + /* User-visible portal configuration settings */
  18844. + struct qman_portal_config public_cfg;
  18845. + /* power management saved data */
  18846. + u32 saved_isdr;
  18847. +};
  18848. +
  18849. +/* Revision info (for errata and feature handling) */
  18850. +#define QMAN_REV11 0x0101
  18851. +#define QMAN_REV12 0x0102
  18852. +#define QMAN_REV20 0x0200
  18853. +#define QMAN_REV30 0x0300
  18854. +#define QMAN_REV31 0x0301
  18855. +#define QMAN_REV32 0x0302
  18856. +
  18857. +/* QMan REV_2 register contains the Cfg option */
  18858. +#define QMAN_REV_CFG_0 0x0
  18859. +#define QMAN_REV_CFG_1 0x1
  18860. +#define QMAN_REV_CFG_2 0x2
  18861. +#define QMAN_REV_CFG_3 0x3
  18862. +
  18863. +extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
  18864. +extern u8 qman_ip_cfg;
  18865. +extern u32 qman_clk;
  18866. +extern u16 qman_portal_max;
  18867. +
  18868. +#ifdef CONFIG_FSL_QMAN_CONFIG
  18869. +/* Hooks from qman_driver.c to qman_config.c */
  18870. +int qman_init_ccsr(struct device_node *node);
  18871. +void qman_liodn_fixup(u16 channel);
  18872. +int qman_set_sdest(u16 channel, unsigned int cpu_idx);
  18873. +size_t get_qman_fqd_size(void);
  18874. +#else
  18875. +static inline size_t get_qman_fqd_size(void)
  18876. +{
  18877. + return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ);
  18878. +}
  18879. +#endif
  18880. +
  18881. +int qm_set_wpm(int wpm);
  18882. +int qm_get_wpm(int *wpm);
  18883. +
  18884. +/* Hooks from qman_driver.c in to qman_high.c */
  18885. +struct qman_portal *qman_create_portal(
  18886. + struct qman_portal *portal,
  18887. + const struct qm_portal_config *config,
  18888. + const struct qman_cgrs *cgrs);
  18889. +
  18890. +struct qman_portal *qman_create_affine_portal(
  18891. + const struct qm_portal_config *config,
  18892. + const struct qman_cgrs *cgrs);
  18893. +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
  18894. + int cpu);
  18895. +const struct qm_portal_config *qman_destroy_affine_portal(void);
  18896. +void qman_destroy_portal(struct qman_portal *qm);
  18897. +
  18898. +/* Hooks from fsl_usdpaa.c to qman_driver.c */
  18899. +struct qm_portal_config *qm_get_unused_portal(void);
  18900. +struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
  18901. +
  18902. +void qm_put_unused_portal(struct qm_portal_config *pcfg);
  18903. +void qm_set_liodns(struct qm_portal_config *pcfg);
  18904. +
  18905. +/* This CGR feature is supported by h/w and required by unit-tests and the
  18906. + * debugfs hooks, so is implemented in the driver. However it allows an explicit
  18907. + * corruption of h/w fields by s/w that are usually incorruptible (because the
  18908. + * counters are usually maintained entirely within h/w). As such, we declare
  18909. + * this API internally. */
  18910. +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
  18911. + struct qm_mcr_cgrtestwrite *result);
  18912. +
  18913. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  18914. +/* If the fq object pointer is greater than the size of context_b field,
  18915. + * than a lookup table is required. */
  18916. +int qman_setup_fq_lookup_table(size_t num_entries);
  18917. +#endif
  18918. +
  18919. +
  18920. +/*************************************************/
  18921. +/* QMan s/w corenet portal, low-level i/face */
  18922. +/*************************************************/
  18923. +
  18924. +/* Note: most functions are only used by the high-level interface, so are
  18925. + * inlined from qman_low.h. The stuff below is for use by other parts of the
  18926. + * driver. */
  18927. +
  18928. +/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
  18929. + * dequeue TYPE. Choose TOKEN (8-bit).
  18930. + * If SOURCE == CHANNELS,
  18931. + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
  18932. + * You can choose DEDICATED_PRECEDENCE if the portal channel should have
  18933. + * priority.
  18934. + * If SOURCE == SPECIFICWQ,
  18935. + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
  18936. + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
  18937. + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
  18938. + * same value.
  18939. + */
  18940. +#define QM_SDQCR_SOURCE_CHANNELS 0x0
  18941. +#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
  18942. +#define QM_SDQCR_COUNT_EXACT1 0x0
  18943. +#define QM_SDQCR_COUNT_UPTO3 0x20000000
  18944. +#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
  18945. +#define QM_SDQCR_TYPE_MASK 0x03000000
  18946. +#define QM_SDQCR_TYPE_NULL 0x0
  18947. +#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
  18948. +#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
  18949. +#define QM_SDQCR_TYPE_ACTIVE 0x03000000
  18950. +#define QM_SDQCR_TOKEN_MASK 0x00ff0000
  18951. +#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
  18952. +#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
  18953. +#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
  18954. +#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
  18955. +#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
  18956. +#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
  18957. +#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
  18958. +
  18959. +/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
  18960. +#define QM_VDQCR_FQID_MASK 0x00ffffff
  18961. +#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
  18962. +
  18963. +/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
  18964. + * If MODE==SCHEDULED
  18965. + * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
  18966. + * If CHANNELS,
  18967. + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
  18968. + * You can choose DEDICATED_PRECEDENCE if the portal channel should have
  18969. + * priority.
  18970. + * If SPECIFICWQ,
  18971. + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
  18972. + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
  18973. + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
  18974. + * same value.
  18975. + * If MODE==UNSCHEDULED
  18976. + * Choose FQID().
  18977. + */
  18978. +#define QM_PDQCR_MODE_SCHEDULED 0x0
  18979. +#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
  18980. +#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
  18981. +#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
  18982. +#define QM_PDQCR_COUNT_EXACT1 0x0
  18983. +#define QM_PDQCR_COUNT_UPTO3 0x20000000
  18984. +#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
  18985. +#define QM_PDQCR_TYPE_MASK 0x03000000
  18986. +#define QM_PDQCR_TYPE_NULL 0x0
  18987. +#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
  18988. +#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
  18989. +#define QM_PDQCR_TYPE_ACTIVE 0x03000000
  18990. +#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
  18991. +#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
  18992. +#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
  18993. +#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
  18994. +#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
  18995. +#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
  18996. +#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
  18997. +
  18998. +/* Used by all portal interrupt registers except 'inhibit'
  18999. + * Channels with frame availability
  19000. + */
  19001. +#define QM_PIRQ_DQAVAIL 0x0000ffff
  19002. +
  19003. +/* The DQAVAIL interrupt fields break down into these bits; */
  19004. +#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
  19005. +#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
  19006. +#define QM_DQAVAIL_MASK 0xffff
  19007. +/* This mask contains all the "irqsource" bits visible to API users */
  19008. +#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
  19009. +
  19010. +/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
  19011. + * the disable register" rather than "disable the ability to write". */
  19012. +#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
  19013. +#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
  19014. +#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
  19015. +#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
  19016. +#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
  19017. +#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
  19018. +/* TODO: unfortunate name-clash here, reword? */
  19019. +#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
  19020. +#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
  19021. +
  19022. +#ifdef CONFIG_FSL_QMAN_CONFIG
  19023. +int qman_have_ccsr(void);
  19024. +#else
  19025. +#define qman_have_ccsr 0
  19026. +#endif
  19027. +
  19028. +__init int qman_init(void);
  19029. +__init int qman_resource_init(void);
  19030. +
  19031. +/* CEETM related */
  19032. +#define QMAN_CEETM_MAX 2
  19033. +extern u8 num_ceetms;
  19034. +extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
  19035. +int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
  19036. +int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
  19037. +int qman_ceetm_set_prescaler(enum qm_dc_portal portal);
  19038. +int qman_ceetm_get_prescaler(u16 *pres);
  19039. +int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
  19040. + struct qm_mcr_ceetm_cq_query *cq_query);
  19041. +int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
  19042. + struct qm_mcr_ceetm_ccgr_query *response);
  19043. +int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num);
  19044. +
  19045. +extern void *affine_portals[NR_CPUS];
  19046. +const struct qm_portal_config *qman_get_qm_portal_config(
  19047. + struct qman_portal *portal);
  19048. +
  19049. +/* power management */
  19050. +#ifdef CONFIG_SUSPEND
  19051. +void suspend_unused_qportal(void);
  19052. +void resume_unused_qportal(void);
  19053. +#endif
  19054. --- /dev/null
  19055. +++ b/drivers/staging/fsl_qbman/qman_test.c
  19056. @@ -0,0 +1,57 @@
  19057. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  19058. + *
  19059. + * Redistribution and use in source and binary forms, with or without
  19060. + * modification, are permitted provided that the following conditions are met:
  19061. + * * Redistributions of source code must retain the above copyright
  19062. + * notice, this list of conditions and the following disclaimer.
  19063. + * * Redistributions in binary form must reproduce the above copyright
  19064. + * notice, this list of conditions and the following disclaimer in the
  19065. + * documentation and/or other materials provided with the distribution.
  19066. + * * Neither the name of Freescale Semiconductor nor the
  19067. + * names of its contributors may be used to endorse or promote products
  19068. + * derived from this software without specific prior written permission.
  19069. + *
  19070. + *
  19071. + * ALTERNATIVELY, this software may be distributed under the terms of the
  19072. + * GNU General Public License ("GPL") as published by the Free Software
  19073. + * Foundation, either version 2 of that License or (at your option) any
  19074. + * later version.
  19075. + *
  19076. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  19077. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  19078. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19079. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  19080. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  19081. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19082. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  19083. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  19084. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  19085. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  19086. + */
  19087. +
  19088. +#include "qman_test.h"
  19089. +
  19090. +MODULE_AUTHOR("Geoff Thorpe");
  19091. +MODULE_LICENSE("Dual BSD/GPL");
  19092. +MODULE_DESCRIPTION("Qman testing");
  19093. +
  19094. +static int test_init(void)
  19095. +{
  19096. + int loop = 1;
  19097. + while (loop--) {
  19098. +#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO
  19099. + qman_test_hotpotato();
  19100. +#endif
  19101. +#ifdef CONFIG_FSL_QMAN_TEST_HIGH
  19102. + qman_test_high();
  19103. +#endif
  19104. + }
  19105. + return 0;
  19106. +}
  19107. +
  19108. +static void test_exit(void)
  19109. +{
  19110. +}
  19111. +
  19112. +module_init(test_init);
  19113. +module_exit(test_exit);
  19114. --- /dev/null
  19115. +++ b/drivers/staging/fsl_qbman/qman_test.h
  19116. @@ -0,0 +1,45 @@
  19117. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  19118. + *
  19119. + * Redistribution and use in source and binary forms, with or without
  19120. + * modification, are permitted provided that the following conditions are met:
  19121. + * * Redistributions of source code must retain the above copyright
  19122. + * notice, this list of conditions and the following disclaimer.
  19123. + * * Redistributions in binary form must reproduce the above copyright
  19124. + * notice, this list of conditions and the following disclaimer in the
  19125. + * documentation and/or other materials provided with the distribution.
  19126. + * * Neither the name of Freescale Semiconductor nor the
  19127. + * names of its contributors may be used to endorse or promote products
  19128. + * derived from this software without specific prior written permission.
  19129. + *
  19130. + *
  19131. + * ALTERNATIVELY, this software may be distributed under the terms of the
  19132. + * GNU General Public License ("GPL") as published by the Free Software
  19133. + * Foundation, either version 2 of that License or (at your option) any
  19134. + * later version.
  19135. + *
  19136. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  19137. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  19138. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19139. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  19140. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  19141. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19142. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  19143. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  19144. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  19145. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  19146. + */
  19147. +
  19148. +#include <linux/kernel.h>
  19149. +#include <linux/errno.h>
  19150. +#include <linux/io.h>
  19151. +#include <linux/slab.h>
  19152. +#include <linux/module.h>
  19153. +#include <linux/interrupt.h>
  19154. +#include <linux/delay.h>
  19155. +#include <linux/sched.h>
  19156. +
  19157. +#include <linux/fsl_qman.h>
  19158. +
  19159. +void qman_test_hotpotato(void);
  19160. +void qman_test_high(void);
  19161. +
  19162. --- /dev/null
  19163. +++ b/drivers/staging/fsl_qbman/qman_test_high.c
  19164. @@ -0,0 +1,216 @@
  19165. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  19166. + *
  19167. + * Redistribution and use in source and binary forms, with or without
  19168. + * modification, are permitted provided that the following conditions are met:
  19169. + * * Redistributions of source code must retain the above copyright
  19170. + * notice, this list of conditions and the following disclaimer.
  19171. + * * Redistributions in binary form must reproduce the above copyright
  19172. + * notice, this list of conditions and the following disclaimer in the
  19173. + * documentation and/or other materials provided with the distribution.
  19174. + * * Neither the name of Freescale Semiconductor nor the
  19175. + * names of its contributors may be used to endorse or promote products
  19176. + * derived from this software without specific prior written permission.
  19177. + *
  19178. + *
  19179. + * ALTERNATIVELY, this software may be distributed under the terms of the
  19180. + * GNU General Public License ("GPL") as published by the Free Software
  19181. + * Foundation, either version 2 of that License or (at your option) any
  19182. + * later version.
  19183. + *
  19184. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  19185. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  19186. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19187. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  19188. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  19189. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19190. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  19191. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  19192. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  19193. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  19194. + */
  19195. +
  19196. +#include "qman_test.h"
  19197. +
  19198. +/*************/
  19199. +/* constants */
  19200. +/*************/
  19201. +
  19202. +#define CGR_ID 27
  19203. +#define POOL_ID 2
  19204. +#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
  19205. +#define NUM_ENQUEUES 10
  19206. +#define NUM_PARTIAL 4
  19207. +#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
  19208. + QM_SDQCR_TYPE_PRIO_QOS | \
  19209. + QM_SDQCR_TOKEN_SET(0x98) | \
  19210. + QM_SDQCR_CHANNELS_DEDICATED | \
  19211. + QM_SDQCR_CHANNELS_POOL(POOL_ID))
  19212. +#define PORTAL_OPAQUE ((void *)0xf00dbeef)
  19213. +#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
  19214. +
  19215. +/*************************************/
  19216. +/* Predeclarations (eg. for fq_base) */
  19217. +/*************************************/
  19218. +
  19219. +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
  19220. + struct qman_fq *,
  19221. + const struct qm_dqrr_entry *);
  19222. +static void cb_ern(struct qman_portal *, struct qman_fq *,
  19223. + const struct qm_mr_entry *);
  19224. +static void cb_fqs(struct qman_portal *, struct qman_fq *,
  19225. + const struct qm_mr_entry *);
  19226. +
  19227. +/***************/
  19228. +/* global vars */
  19229. +/***************/
  19230. +
  19231. +static struct qm_fd fd, fd_dq;
  19232. +static struct qman_fq fq_base = {
  19233. + .cb.dqrr = cb_dqrr,
  19234. + .cb.ern = cb_ern,
  19235. + .cb.fqs = cb_fqs
  19236. +};
  19237. +static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
  19238. +static int retire_complete, sdqcr_complete;
  19239. +
  19240. +/**********************/
  19241. +/* internal functions */
  19242. +/**********************/
  19243. +
  19244. +/* Helpers for initialising and "incrementing" a frame descriptor */
  19245. +static void fd_init(struct qm_fd *__fd)
  19246. +{
  19247. + qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
  19248. + __fd->format = qm_fd_contig_big;
  19249. + __fd->length29 = 0x0000ffff;
  19250. + __fd->cmd = 0xfeedf00d;
  19251. +}
  19252. +
  19253. +static void fd_inc(struct qm_fd *__fd)
  19254. +{
  19255. + u64 t = qm_fd_addr_get64(__fd);
  19256. + int z = t >> 40;
  19257. + t <<= 1;
  19258. + if (z)
  19259. + t |= 1;
  19260. + qm_fd_addr_set64(__fd, t);
  19261. + __fd->length29--;
  19262. + __fd->cmd++;
  19263. +}
  19264. +
  19265. +/* The only part of the 'fd' we can't memcmp() is the ppid */
  19266. +static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
  19267. +{
  19268. + int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
  19269. + if (!r)
  19270. + r = a->format - b->format;
  19271. + if (!r)
  19272. + r = a->opaque - b->opaque;
  19273. + if (!r)
  19274. + r = a->cmd - b->cmd;
  19275. + return r;
  19276. +}
  19277. +
  19278. +/********/
  19279. +/* test */
  19280. +/********/
  19281. +
  19282. +static void do_enqueues(struct qman_fq *fq)
  19283. +{
  19284. + unsigned int loop;
  19285. + for (loop = 0; loop < NUM_ENQUEUES; loop++) {
  19286. + if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
  19287. + (((loop + 1) == NUM_ENQUEUES) ?
  19288. + QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
  19289. + panic("qman_enqueue() failed\n");
  19290. + fd_inc(&fd);
  19291. + }
  19292. +}
  19293. +
  19294. +void qman_test_high(void)
  19295. +{
  19296. + unsigned int flags;
  19297. + int res;
  19298. + struct qman_fq *fq = &fq_base;
  19299. +
  19300. + pr_info("qman_test_high starting\n");
  19301. + fd_init(&fd);
  19302. + fd_init(&fd_dq);
  19303. +
  19304. + /* Initialise (parked) FQ */
  19305. + if (qman_create_fq(0, FQ_FLAGS, fq))
  19306. + panic("qman_create_fq() failed\n");
  19307. + if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
  19308. + panic("qman_init_fq() failed\n");
  19309. +
  19310. + /* Do enqueues + VDQCR, twice. (Parked FQ) */
  19311. + do_enqueues(fq);
  19312. + pr_info("VDQCR (till-empty);\n");
  19313. + if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
  19314. + QM_VDQCR_NUMFRAMES_TILLEMPTY))
  19315. + panic("qman_volatile_dequeue() failed\n");
  19316. + do_enqueues(fq);
  19317. + pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
  19318. + if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
  19319. + QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
  19320. + panic("qman_volatile_dequeue() failed\n");
  19321. + pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
  19322. + NUM_ENQUEUES);
  19323. + if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
  19324. + QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
  19325. + panic("qman_volatile_dequeue() failed\n");
  19326. +
  19327. + do_enqueues(fq);
  19328. + pr_info("scheduled dequeue (till-empty)\n");
  19329. + if (qman_schedule_fq(fq))
  19330. + panic("qman_schedule_fq() failed\n");
  19331. + wait_event(waitqueue, sdqcr_complete);
  19332. +
  19333. + /* Retire and OOS the FQ */
  19334. + res = qman_retire_fq(fq, &flags);
  19335. + if (res < 0)
  19336. + panic("qman_retire_fq() failed\n");
  19337. + wait_event(waitqueue, retire_complete);
  19338. + if (flags & QMAN_FQ_STATE_BLOCKOOS)
  19339. + panic("leaking frames\n");
  19340. + if (qman_oos_fq(fq))
  19341. + panic("qman_oos_fq() failed\n");
  19342. + qman_destroy_fq(fq, 0);
  19343. + pr_info("qman_test_high finished\n");
  19344. +}
  19345. +
  19346. +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
  19347. + struct qman_fq *fq,
  19348. + const struct qm_dqrr_entry *dq)
  19349. +{
  19350. + if (fd_cmp(&fd_dq, &dq->fd)) {
  19351. + pr_err("BADNESS: dequeued frame doesn't match;\n");
  19352. + pr_err("Expected 0x%llx, got 0x%llx\n",
  19353. + (unsigned long long)fd_dq.length29,
  19354. + (unsigned long long)dq->fd.length29);
  19355. + BUG();
  19356. + }
  19357. + fd_inc(&fd_dq);
  19358. + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
  19359. + sdqcr_complete = 1;
  19360. + wake_up(&waitqueue);
  19361. + }
  19362. + return qman_cb_dqrr_consume;
  19363. +}
  19364. +
  19365. +static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
  19366. + const struct qm_mr_entry *msg)
  19367. +{
  19368. + panic("cb_ern() unimplemented");
  19369. +}
  19370. +
  19371. +static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
  19372. + const struct qm_mr_entry *msg)
  19373. +{
  19374. + u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
  19375. + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
  19376. + panic("unexpected FQS message");
  19377. + pr_info("Retirement message received\n");
  19378. + retire_complete = 1;
  19379. + wake_up(&waitqueue);
  19380. +}
  19381. --- /dev/null
  19382. +++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c
  19383. @@ -0,0 +1,499 @@
  19384. +/* Copyright 2009-2012 Freescale Semiconductor, Inc.
  19385. + *
  19386. + * Redistribution and use in source and binary forms, with or without
  19387. + * modification, are permitted provided that the following conditions are met:
  19388. + * * Redistributions of source code must retain the above copyright
  19389. + * notice, this list of conditions and the following disclaimer.
  19390. + * * Redistributions in binary form must reproduce the above copyright
  19391. + * notice, this list of conditions and the following disclaimer in the
  19392. + * documentation and/or other materials provided with the distribution.
  19393. + * * Neither the name of Freescale Semiconductor nor the
  19394. + * names of its contributors may be used to endorse or promote products
  19395. + * derived from this software without specific prior written permission.
  19396. + *
  19397. + *
  19398. + * ALTERNATIVELY, this software may be distributed under the terms of the
  19399. + * GNU General Public License ("GPL") as published by the Free Software
  19400. + * Foundation, either version 2 of that License or (at your option) any
  19401. + * later version.
  19402. + *
  19403. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  19404. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  19405. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19406. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  19407. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  19408. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19409. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  19410. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  19411. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  19412. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  19413. + */
  19414. +
  19415. +#include <linux/kthread.h>
  19416. +#include <linux/platform_device.h>
  19417. +#include <linux/dma-mapping.h>
  19418. +#include "qman_test.h"
  19419. +
  19420. +/* Algorithm:
  19421. + *
  19422. + * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
  19423. + * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
  19424. + * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
  19425. + * shuttle a "hot potato" frame around them such that every forwarding action
  19426. + * moves it from one cpu to another. (The use of more than one handler per cpu
  19427. + * is to allow enough handlers/FQs to truly test the significance of caching -
  19428. + * ie. when cache-expiries are occurring.)
  19429. + *
  19430. + * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
  19431. + * first and last words of the frame data will undergo a transformation step on
  19432. + * each forwarding action. To achieve this, each handler will be assigned a
  19433. + * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
  19434. + * received by a handler, the mixer of the expected sender is XOR'd into all
  19435. + * words of the entire frame, which is then validated against the original
  19436. + * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
  19437. + * the current handler. Apart from validating that the frame is taking the
  19438. + * expected path, this also provides some quasi-realistic overheads to each
  19439. + * forwarding action - dereferencing *all* the frame data, computation, and
  19440. + * conditional branching. There is a "special" handler designated to act as the
  19441. + * instigator of the test by creating an enqueuing the "hot potato" frame, and
  19442. + * to determine when the test has completed by counting HP_LOOPS iterations.
  19443. + *
  19444. + * Init phases:
  19445. + *
  19446. + * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
  19447. + * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
  19448. + * handlers and link-list them (but do no other handler setup).
  19449. + *
  19450. + * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
  19451. + * hp_cpu's 'iterator' to point to its first handler. With each loop,
  19452. + * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
  19453. + * and advance the iterator for the next loop. This includes a final fixup,
  19454. + * which connects the last handler to the first (and which is why phase 2
  19455. + * and 3 are separate).
  19456. + *
  19457. + * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
  19458. + * hp_cpu's 'iterator' to point to its first handler. With each loop,
  19459. + * initialise FQ objects and advance the iterator for the next loop.
  19460. + * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
  19461. + * initialisation targets the correct cpu.
  19462. + */
  19463. +
  19464. +/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
  19465. + * the fn from irq context, which is too restrictive). */
  19466. +struct bstrap {
  19467. + void (*fn)(void);
  19468. + atomic_t started;
  19469. +};
  19470. +static int bstrap_fn(void *__bstrap)
  19471. +{
  19472. + struct bstrap *bstrap = __bstrap;
  19473. + atomic_inc(&bstrap->started);
  19474. + bstrap->fn();
  19475. + while (!kthread_should_stop())
  19476. + msleep(1);
  19477. + return 0;
  19478. +}
  19479. +static int on_all_cpus(void (*fn)(void))
  19480. +{
  19481. + int cpu;
  19482. + for_each_cpu(cpu, cpu_online_mask) {
  19483. + struct bstrap bstrap = {
  19484. + .fn = fn,
  19485. + .started = ATOMIC_INIT(0)
  19486. + };
  19487. + struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
  19488. + "hotpotato%d", cpu);
  19489. + int ret;
  19490. + if (IS_ERR(k))
  19491. + return -ENOMEM;
  19492. + kthread_bind(k, cpu);
  19493. + wake_up_process(k);
  19494. + /* If we call kthread_stop() before the "wake up" has had an
  19495. + * effect, then the thread may exit with -EINTR without ever
  19496. + * running the function. So poll until it's started before
  19497. + * requesting it to stop. */
  19498. + while (!atomic_read(&bstrap.started))
  19499. + msleep(10);
  19500. + ret = kthread_stop(k);
  19501. + if (ret)
  19502. + return ret;
  19503. + }
  19504. + return 0;
  19505. +}
  19506. +
  19507. +struct hp_handler {
  19508. +
  19509. + /* The following data is stashed when 'rx' is dequeued; */
  19510. + /* -------------- */
  19511. + /* The Rx FQ, dequeues of which will stash the entire hp_handler */
  19512. + struct qman_fq rx;
  19513. + /* The Tx FQ we should forward to */
  19514. + struct qman_fq tx;
  19515. + /* The value we XOR post-dequeue, prior to validating */
  19516. + u32 rx_mixer;
  19517. + /* The value we XOR pre-enqueue, after validating */
  19518. + u32 tx_mixer;
  19519. + /* what the hotpotato address should be on dequeue */
  19520. + dma_addr_t addr;
  19521. + u32 *frame_ptr;
  19522. +
  19523. + /* The following data isn't (necessarily) stashed on dequeue; */
  19524. + /* -------------- */
  19525. + u32 fqid_rx, fqid_tx;
  19526. + /* list node for linking us into 'hp_cpu' */
  19527. + struct list_head node;
  19528. + /* Just to check ... */
  19529. + unsigned int processor_id;
  19530. +} ____cacheline_aligned;
  19531. +
  19532. +struct hp_cpu {
  19533. + /* identify the cpu we run on; */
  19534. + unsigned int processor_id;
  19535. + /* root node for the per-cpu list of handlers */
  19536. + struct list_head handlers;
  19537. + /* list node for linking us into 'hp_cpu_list' */
  19538. + struct list_head node;
  19539. + /* when repeatedly scanning 'hp_list', each time linking the n'th
  19540. + * handlers together, this is used as per-cpu iterator state */
  19541. + struct hp_handler *iterator;
  19542. +};
  19543. +
  19544. +/* Each cpu has one of these */
  19545. +static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
  19546. +
  19547. +/* links together the hp_cpu structs, in first-come first-serve order. */
  19548. +static LIST_HEAD(hp_cpu_list);
  19549. +static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
  19550. +
  19551. +static unsigned int hp_cpu_list_length;
  19552. +
  19553. +/* the "special" handler, that starts and terminates the test. */
  19554. +static struct hp_handler *special_handler;
  19555. +static int loop_counter;
  19556. +
  19557. +/* handlers are allocated out of this, so they're properly aligned. */
  19558. +static struct kmem_cache *hp_handler_slab;
  19559. +
  19560. +/* this is the frame data */
  19561. +static void *__frame_ptr;
  19562. +static u32 *frame_ptr;
  19563. +static dma_addr_t frame_dma;
  19564. +
  19565. +/* the main function waits on this */
  19566. +static DECLARE_WAIT_QUEUE_HEAD(queue);
  19567. +
  19568. +#define HP_PER_CPU 2
  19569. +#define HP_LOOPS 8
  19570. +/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
  19571. +#define HP_NUM_WORDS 80
  19572. +/* First word of the LFSR-based frame data */
  19573. +#define HP_FIRST_WORD 0xabbaf00d
  19574. +
  19575. +static inline u32 do_lfsr(u32 prev)
  19576. +{
  19577. + return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
  19578. +}
  19579. +
  19580. +static void allocate_frame_data(void)
  19581. +{
  19582. + u32 lfsr = HP_FIRST_WORD;
  19583. + int loop;
  19584. + struct platform_device *pdev = platform_device_alloc("foobar", -1);
  19585. + if (!pdev)
  19586. + panic("platform_device_alloc() failed");
  19587. + if (platform_device_add(pdev))
  19588. + panic("platform_device_add() failed");
  19589. + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
  19590. + if (!__frame_ptr)
  19591. + panic("kmalloc() failed");
  19592. + frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
  19593. + ~(unsigned long)63);
  19594. + for (loop = 0; loop < HP_NUM_WORDS; loop++) {
  19595. + frame_ptr[loop] = lfsr;
  19596. + lfsr = do_lfsr(lfsr);
  19597. + }
  19598. + frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
  19599. + DMA_BIDIRECTIONAL);
  19600. + platform_device_del(pdev);
  19601. + platform_device_put(pdev);
  19602. +}
  19603. +
  19604. +static void deallocate_frame_data(void)
  19605. +{
  19606. + kfree(__frame_ptr);
  19607. +}
  19608. +
  19609. +static inline void process_frame_data(struct hp_handler *handler,
  19610. + const struct qm_fd *fd)
  19611. +{
  19612. + u32 *p = handler->frame_ptr;
  19613. + u32 lfsr = HP_FIRST_WORD;
  19614. + int loop;
  19615. + if (qm_fd_addr_get64(fd) != handler->addr)
  19616. + panic("bad frame address");
  19617. + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
  19618. + *p ^= handler->rx_mixer;
  19619. + if (*p != lfsr)
  19620. + panic("corrupt frame data");
  19621. + *p ^= handler->tx_mixer;
  19622. + lfsr = do_lfsr(lfsr);
  19623. + }
  19624. +}
  19625. +
  19626. +static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
  19627. + struct qman_fq *fq,
  19628. + const struct qm_dqrr_entry *dqrr)
  19629. +{
  19630. + struct hp_handler *handler = (struct hp_handler *)fq;
  19631. +
  19632. + process_frame_data(handler, &dqrr->fd);
  19633. + if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
  19634. + panic("qman_enqueue() failed");
  19635. + return qman_cb_dqrr_consume;
  19636. +}
  19637. +
  19638. +static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
  19639. + struct qman_fq *fq,
  19640. + const struct qm_dqrr_entry *dqrr)
  19641. +{
  19642. + struct hp_handler *handler = (struct hp_handler *)fq;
  19643. +
  19644. + process_frame_data(handler, &dqrr->fd);
  19645. + if (++loop_counter < HP_LOOPS) {
  19646. + if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
  19647. + panic("qman_enqueue() failed");
  19648. + } else {
  19649. + pr_info("Received final (%dth) frame\n", loop_counter);
  19650. + wake_up(&queue);
  19651. + }
  19652. + return qman_cb_dqrr_consume;
  19653. +}
  19654. +
  19655. +static void create_per_cpu_handlers(void)
  19656. +{
  19657. + struct hp_handler *handler;
  19658. + int loop;
  19659. + struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
  19660. +
  19661. + hp_cpu->processor_id = smp_processor_id();
  19662. + spin_lock(&hp_lock);
  19663. + list_add_tail(&hp_cpu->node, &hp_cpu_list);
  19664. + hp_cpu_list_length++;
  19665. + spin_unlock(&hp_lock);
  19666. + INIT_LIST_HEAD(&hp_cpu->handlers);
  19667. + for (loop = 0; loop < HP_PER_CPU; loop++) {
  19668. + handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
  19669. + if (!handler)
  19670. + panic("kmem_cache_alloc() failed");
  19671. + handler->processor_id = hp_cpu->processor_id;
  19672. + handler->addr = frame_dma;
  19673. + handler->frame_ptr = frame_ptr;
  19674. + list_add_tail(&handler->node, &hp_cpu->handlers);
  19675. + }
  19676. + put_cpu_var(hp_cpus);
  19677. +}
  19678. +
  19679. +static void destroy_per_cpu_handlers(void)
  19680. +{
  19681. + struct list_head *loop, *tmp;
  19682. + struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
  19683. +
  19684. + spin_lock(&hp_lock);
  19685. + list_del(&hp_cpu->node);
  19686. + spin_unlock(&hp_lock);
  19687. + list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
  19688. + u32 flags;
  19689. + struct hp_handler *handler = list_entry(loop, struct hp_handler,
  19690. + node);
  19691. + if (qman_retire_fq(&handler->rx, &flags))
  19692. + panic("qman_retire_fq(rx) failed");
  19693. + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
  19694. + if (qman_oos_fq(&handler->rx))
  19695. + panic("qman_oos_fq(rx) failed");
  19696. + qman_destroy_fq(&handler->rx, 0);
  19697. + qman_destroy_fq(&handler->tx, 0);
  19698. + qman_release_fqid(handler->fqid_rx);
  19699. + list_del(&handler->node);
  19700. + kmem_cache_free(hp_handler_slab, handler);
  19701. + }
  19702. + put_cpu_var(hp_cpus);
  19703. +}
  19704. +
  19705. +static inline u8 num_cachelines(u32 offset)
  19706. +{
  19707. + u8 res = (offset + (L1_CACHE_BYTES - 1))
  19708. + / (L1_CACHE_BYTES);
  19709. + if (res > 3)
  19710. + return 3;
  19711. + return res;
  19712. +}
  19713. +#define STASH_DATA_CL \
  19714. + num_cachelines(HP_NUM_WORDS * 4)
  19715. +#define STASH_CTX_CL \
  19716. + num_cachelines(offsetof(struct hp_handler, fqid_rx))
  19717. +
  19718. +static void init_handler(void *__handler)
  19719. +{
  19720. + struct qm_mcc_initfq opts;
  19721. + struct hp_handler *handler = __handler;
  19722. + BUG_ON(handler->processor_id != smp_processor_id());
  19723. + /* Set up rx */
  19724. + memset(&handler->rx, 0, sizeof(handler->rx));
  19725. + if (handler == special_handler)
  19726. + handler->rx.cb.dqrr = special_dqrr;
  19727. + else
  19728. + handler->rx.cb.dqrr = normal_dqrr;
  19729. + if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
  19730. + panic("qman_create_fq(rx) failed");
  19731. + memset(&opts, 0, sizeof(opts));
  19732. + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
  19733. + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
  19734. + opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
  19735. + opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
  19736. + if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
  19737. + QMAN_INITFQ_FLAG_LOCAL, &opts))
  19738. + panic("qman_init_fq(rx) failed");
  19739. + /* Set up tx */
  19740. + memset(&handler->tx, 0, sizeof(handler->tx));
  19741. + if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
  19742. + &handler->tx))
  19743. + panic("qman_create_fq(tx) failed");
  19744. +}
  19745. +
  19746. +static void init_phase2(void)
  19747. +{
  19748. + int loop;
  19749. + u32 fqid = 0;
  19750. + u32 lfsr = 0xdeadbeef;
  19751. + struct hp_cpu *hp_cpu;
  19752. + struct hp_handler *handler;
  19753. +
  19754. + for (loop = 0; loop < HP_PER_CPU; loop++) {
  19755. + list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
  19756. + int ret;
  19757. + if (!loop)
  19758. + hp_cpu->iterator = list_first_entry(
  19759. + &hp_cpu->handlers,
  19760. + struct hp_handler, node);
  19761. + else
  19762. + hp_cpu->iterator = list_entry(
  19763. + hp_cpu->iterator->node.next,
  19764. + struct hp_handler, node);
  19765. + /* Rx FQID is the previous handler's Tx FQID */
  19766. + hp_cpu->iterator->fqid_rx = fqid;
  19767. + /* Allocate new FQID for Tx */
  19768. + ret = qman_alloc_fqid(&fqid);
  19769. + if (ret)
  19770. + panic("qman_alloc_fqid() failed");
  19771. + hp_cpu->iterator->fqid_tx = fqid;
  19772. + /* Rx mixer is the previous handler's Tx mixer */
  19773. + hp_cpu->iterator->rx_mixer = lfsr;
  19774. + /* Get new mixer for Tx */
  19775. + lfsr = do_lfsr(lfsr);
  19776. + hp_cpu->iterator->tx_mixer = lfsr;
  19777. + }
  19778. + }
  19779. + /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
  19780. + hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
  19781. + handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
  19782. + BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
  19783. + handler->fqid_rx = fqid;
  19784. + handler->rx_mixer = lfsr;
  19785. + /* and tag it as our "special" handler */
  19786. + special_handler = handler;
  19787. +}
  19788. +
  19789. +static void init_phase3(void)
  19790. +{
  19791. + int loop;
  19792. + struct hp_cpu *hp_cpu;
  19793. +
  19794. + for (loop = 0; loop < HP_PER_CPU; loop++) {
  19795. + list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
  19796. + if (!loop)
  19797. + hp_cpu->iterator = list_first_entry(
  19798. + &hp_cpu->handlers,
  19799. + struct hp_handler, node);
  19800. + else
  19801. + hp_cpu->iterator = list_entry(
  19802. + hp_cpu->iterator->node.next,
  19803. + struct hp_handler, node);
  19804. + preempt_disable();
  19805. + if (hp_cpu->processor_id == smp_processor_id())
  19806. + init_handler(hp_cpu->iterator);
  19807. + else
  19808. + smp_call_function_single(hp_cpu->processor_id,
  19809. + init_handler, hp_cpu->iterator, 1);
  19810. + preempt_enable();
  19811. + }
  19812. + }
  19813. +}
  19814. +
  19815. +static void send_first_frame(void *ignore)
  19816. +{
  19817. + u32 *p = special_handler->frame_ptr;
  19818. + u32 lfsr = HP_FIRST_WORD;
  19819. + int loop;
  19820. + struct qm_fd fd;
  19821. +
  19822. + BUG_ON(special_handler->processor_id != smp_processor_id());
  19823. + memset(&fd, 0, sizeof(fd));
  19824. + qm_fd_addr_set64(&fd, special_handler->addr);
  19825. + fd.format = qm_fd_contig_big;
  19826. + fd.length29 = HP_NUM_WORDS * 4;
  19827. + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
  19828. + if (*p != lfsr)
  19829. + panic("corrupt frame data");
  19830. + *p ^= special_handler->tx_mixer;
  19831. + lfsr = do_lfsr(lfsr);
  19832. + }
  19833. + pr_info("Sending first frame\n");
  19834. + if (qman_enqueue(&special_handler->tx, &fd, 0))
  19835. + panic("qman_enqueue() failed");
  19836. +}
  19837. +
  19838. +void qman_test_hotpotato(void)
  19839. +{
  19840. + if (cpumask_weight(cpu_online_mask) < 2) {
  19841. + pr_info("qman_test_hotpotato, skip - only 1 CPU\n");
  19842. + return;
  19843. + }
  19844. +
  19845. + pr_info("qman_test_hotpotato starting\n");
  19846. +
  19847. + hp_cpu_list_length = 0;
  19848. + loop_counter = 0;
  19849. + hp_handler_slab = kmem_cache_create("hp_handler_slab",
  19850. + sizeof(struct hp_handler), L1_CACHE_BYTES,
  19851. + SLAB_HWCACHE_ALIGN, NULL);
  19852. + if (!hp_handler_slab)
  19853. + panic("kmem_cache_create() failed");
  19854. +
  19855. + allocate_frame_data();
  19856. +
  19857. + /* Init phase 1 */
  19858. + pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
  19859. + if (on_all_cpus(create_per_cpu_handlers))
  19860. + panic("on_each_cpu() failed");
  19861. + pr_info("Number of cpus: %d, total of %d handlers\n",
  19862. + hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
  19863. +
  19864. + init_phase2();
  19865. +
  19866. + init_phase3();
  19867. +
  19868. + preempt_disable();
  19869. + if (special_handler->processor_id == smp_processor_id())
  19870. + send_first_frame(NULL);
  19871. + else
  19872. + smp_call_function_single(special_handler->processor_id,
  19873. + send_first_frame, NULL, 1);
  19874. + preempt_enable();
  19875. +
  19876. + wait_event(queue, loop_counter == HP_LOOPS);
  19877. + deallocate_frame_data();
  19878. + if (on_all_cpus(destroy_per_cpu_handlers))
  19879. + panic("on_each_cpu() failed");
  19880. + kmem_cache_destroy(hp_handler_slab);
  19881. + pr_info("qman_test_hotpotato finished\n");
  19882. +}
  19883. --- /dev/null
  19884. +++ b/drivers/staging/fsl_qbman/qman_utility.c
  19885. @@ -0,0 +1,129 @@
  19886. +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
  19887. + *
  19888. + * Redistribution and use in source and binary forms, with or without
  19889. + * modification, are permitted provided that the following conditions are met:
  19890. + * * Redistributions of source code must retain the above copyright
  19891. + * notice, this list of conditions and the following disclaimer.
  19892. + * * Redistributions in binary form must reproduce the above copyright
  19893. + * notice, this list of conditions and the following disclaimer in the
  19894. + * documentation and/or other materials provided with the distribution.
  19895. + * * Neither the name of Freescale Semiconductor nor the
  19896. + * names of its contributors may be used to endorse or promote products
  19897. + * derived from this software without specific prior written permission.
  19898. + *
  19899. + *
  19900. + * ALTERNATIVELY, this software may be distributed under the terms of the
  19901. + * GNU General Public License ("GPL") as published by the Free Software
  19902. + * Foundation, either version 2 of that License or (at your option) any
  19903. + * later version.
  19904. + *
  19905. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  19906. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  19907. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19908. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  19909. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  19910. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19911. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  19912. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  19913. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  19914. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  19915. + */
  19916. +
  19917. +#include "qman_private.h"
  19918. +
  19919. +/* ----------------- */
  19920. +/* --- FQID Pool --- */
  19921. +
  19922. +struct qman_fqid_pool {
  19923. + /* Base and size of the FQID range */
  19924. + u32 fqid_base;
  19925. + u32 total;
  19926. + /* Number of FQIDs currently "allocated" */
  19927. + u32 used;
  19928. + /* Allocation optimisation. When 'used<total', it is the index of an
  19929. + * available FQID. Otherwise there are no available FQIDs, and this
  19930. + * will be set when the next deallocation occurs. */
  19931. + u32 next;
  19932. + /* A bit-field representation of the FQID range. */
  19933. + unsigned long *bits;
  19934. +};
  19935. +
  19936. +#define QLONG_BYTES sizeof(unsigned long)
  19937. +#define QLONG_BITS (QLONG_BYTES * 8)
  19938. +/* Number of 'longs' required for the given number of bits */
  19939. +#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
  19940. +/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
  19941. +#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
  19942. +/* And in bits */
  19943. +#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
  19944. +
  19945. +struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
  19946. +{
  19947. + struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  19948. + unsigned int i;
  19949. +
  19950. + BUG_ON(!num);
  19951. + if (!pool)
  19952. + return NULL;
  19953. + pool->fqid_base = fqid_start;
  19954. + pool->total = num;
  19955. + pool->used = 0;
  19956. + pool->next = 0;
  19957. + pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
  19958. + if (!pool->bits) {
  19959. + kfree(pool);
  19960. + return NULL;
  19961. + }
  19962. + /* If num is not an even multiple of QLONG_BITS (or even 8, for
  19963. + * byte-oriented searching) then we fill the trailing bits with 1, to
  19964. + * make them look allocated (permanently). */
  19965. + for (i = num + 1; i < QNUM_BITS(num); i++)
  19966. + set_bit(i, pool->bits);
  19967. + return pool;
  19968. +}
  19969. +EXPORT_SYMBOL(qman_fqid_pool_create);
  19970. +
  19971. +int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
  19972. +{
  19973. + int ret = pool->used;
  19974. + kfree(pool->bits);
  19975. + kfree(pool);
  19976. + return ret;
  19977. +}
  19978. +EXPORT_SYMBOL(qman_fqid_pool_destroy);
  19979. +
  19980. +int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
  19981. +{
  19982. + int ret;
  19983. + if (pool->used == pool->total)
  19984. + return -ENOMEM;
  19985. + *fqid = pool->fqid_base + pool->next;
  19986. + ret = test_and_set_bit(pool->next, pool->bits);
  19987. + BUG_ON(ret);
  19988. + if (++pool->used == pool->total)
  19989. + return 0;
  19990. + pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
  19991. + if (pool->next >= pool->total)
  19992. + pool->next = find_first_zero_bit(pool->bits, pool->total);
  19993. + BUG_ON(pool->next >= pool->total);
  19994. + return 0;
  19995. +}
  19996. +EXPORT_SYMBOL(qman_fqid_pool_alloc);
  19997. +
  19998. +void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
  19999. +{
  20000. + int ret;
  20001. +
  20002. + fqid -= pool->fqid_base;
  20003. + ret = test_and_clear_bit(fqid, pool->bits);
  20004. + BUG_ON(!ret);
  20005. + if (pool->used-- == pool->total)
  20006. + pool->next = fqid;
  20007. +}
  20008. +EXPORT_SYMBOL(qman_fqid_pool_free);
  20009. +
  20010. +u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
  20011. +{
  20012. + return pool->used;
  20013. +}
  20014. +EXPORT_SYMBOL(qman_fqid_pool_used);
  20015. --- /dev/null
  20016. +++ b/include/linux/fsl_bman.h
  20017. @@ -0,0 +1,532 @@
  20018. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  20019. + *
  20020. + * Redistribution and use in source and binary forms, with or without
  20021. + * modification, are permitted provided that the following conditions are met:
  20022. + * * Redistributions of source code must retain the above copyright
  20023. + * notice, this list of conditions and the following disclaimer.
  20024. + * * Redistributions in binary form must reproduce the above copyright
  20025. + * notice, this list of conditions and the following disclaimer in the
  20026. + * documentation and/or other materials provided with the distribution.
  20027. + * * Neither the name of Freescale Semiconductor nor the
  20028. + * names of its contributors may be used to endorse or promote products
  20029. + * derived from this software without specific prior written permission.
  20030. + *
  20031. + *
  20032. + * ALTERNATIVELY, this software may be distributed under the terms of the
  20033. + * GNU General Public License ("GPL") as published by the Free Software
  20034. + * Foundation, either version 2 of that License or (at your option) any
  20035. + * later version.
  20036. + *
  20037. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  20038. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  20039. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  20040. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  20041. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  20042. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  20043. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  20044. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  20045. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  20046. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  20047. + */
  20048. +
  20049. +#ifndef FSL_BMAN_H
  20050. +#define FSL_BMAN_H
  20051. +
  20052. +#ifdef __cplusplus
  20053. +extern "C" {
  20054. +#endif
  20055. +
  20056. +/* Last updated for v00.79 of the BG */
  20057. +
  20058. +/* Portal processing (interrupt) sources */
  20059. +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
  20060. +#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
  20061. +
  20062. +/* This wrapper represents a bit-array for the depletion state of the 64 Bman
  20063. + * buffer pools. */
  20064. +struct bman_depletion {
  20065. + u32 __state[2];
  20066. +};
  20067. +#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
  20068. +#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
  20069. +#define __bmdep_word(x) ((x) >> 5)
  20070. +#define __bmdep_shift(x) ((x) & 0x1f)
  20071. +#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
  20072. +static inline void bman_depletion_init(struct bman_depletion *c)
  20073. +{
  20074. + c->__state[0] = c->__state[1] = 0;
  20075. +}
  20076. +static inline void bman_depletion_fill(struct bman_depletion *c)
  20077. +{
  20078. + c->__state[0] = c->__state[1] = ~0;
  20079. +}
  20080. +static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
  20081. +{
  20082. + return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
  20083. +}
  20084. +static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
  20085. +{
  20086. + c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
  20087. +}
  20088. +static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
  20089. +{
  20090. + c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
  20091. +}
  20092. +
  20093. +/* ------------------------------------------------------- */
  20094. +/* --- Bman data structures (and associated constants) --- */
  20095. +
  20096. +/* Represents s/w corenet portal mapped data structures */
  20097. +struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
  20098. +struct bm_mc_command; /* MC (Management Command) command */
  20099. +struct bm_mc_result; /* MC result */
  20100. +
  20101. +/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
  20102. + * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
  20103. + * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
  20104. +struct bm_buffer {
  20105. + union {
  20106. + struct {
  20107. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20108. + u8 __reserved1;
  20109. + u8 bpid;
  20110. + u16 hi; /* High 16-bits of 48-bit address */
  20111. + u32 lo; /* Low 32-bits of 48-bit address */
  20112. +#else
  20113. + u32 lo;
  20114. + u16 hi;
  20115. + u8 bpid;
  20116. + u8 __reserved;
  20117. +#endif
  20118. + };
  20119. + struct {
  20120. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20121. + u64 __notaddress:16;
  20122. + u64 addr:48;
  20123. +#else
  20124. + u64 addr:48;
  20125. + u64 __notaddress:16;
  20126. +#endif
  20127. + };
  20128. + u64 opaque;
  20129. + };
  20130. +} __aligned(8);
  20131. +static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
  20132. +{
  20133. + return buf->addr;
  20134. +}
  20135. +static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
  20136. +{
  20137. + return (dma_addr_t)buf->addr;
  20138. +}
  20139. +/* Macro, so we compile better if 'v' isn't always 64-bit */
  20140. +#define bm_buffer_set64(buf, v) \
  20141. + do { \
  20142. + struct bm_buffer *__buf931 = (buf); \
  20143. + __buf931->hi = upper_32_bits(v); \
  20144. + __buf931->lo = lower_32_bits(v); \
  20145. + } while (0)
  20146. +
  20147. +/* See 1.5.3.5.4: "Release Command" */
  20148. +struct bm_rcr_entry {
  20149. + union {
  20150. + struct {
  20151. + u8 __dont_write_directly__verb;
  20152. + u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
  20153. + u8 __reserved1[62];
  20154. + };
  20155. + struct bm_buffer bufs[8];
  20156. + };
  20157. +} __packed;
  20158. +#define BM_RCR_VERB_VBIT 0x80
  20159. +#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
  20160. +#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
  20161. +#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
  20162. +#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
  20163. +
  20164. +/* See 1.5.3.1: "Acquire Command" */
  20165. +/* See 1.5.3.2: "Query Command" */
  20166. +struct bm_mcc_acquire {
  20167. + u8 bpid;
  20168. + u8 __reserved1[62];
  20169. +} __packed;
  20170. +struct bm_mcc_query {
  20171. + u8 __reserved2[63];
  20172. +} __packed;
  20173. +struct bm_mc_command {
  20174. + u8 __dont_write_directly__verb;
  20175. + union {
  20176. + struct bm_mcc_acquire acquire;
  20177. + struct bm_mcc_query query;
  20178. + };
  20179. +} __packed;
  20180. +#define BM_MCC_VERB_VBIT 0x80
  20181. +#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
  20182. +#define BM_MCC_VERB_CMD_ACQUIRE 0x10
  20183. +#define BM_MCC_VERB_CMD_QUERY 0x40
  20184. +#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
  20185. +
  20186. +/* See 1.5.3.3: "Acquire Response" */
  20187. +/* See 1.5.3.4: "Query Response" */
  20188. +struct bm_pool_state {
  20189. + u8 __reserved1[32];
  20190. + /* "availability state" and "depletion state" */
  20191. + struct {
  20192. + u8 __reserved1[8];
  20193. + /* Access using bman_depletion_***() */
  20194. + struct bman_depletion state;
  20195. + } as, ds;
  20196. +};
  20197. +struct bm_mc_result {
  20198. + union {
  20199. + struct {
  20200. + u8 verb;
  20201. + u8 __reserved1[63];
  20202. + };
  20203. + union {
  20204. + struct {
  20205. + u8 __reserved1;
  20206. + u8 bpid;
  20207. + u8 __reserved2[62];
  20208. + };
  20209. + struct bm_buffer bufs[8];
  20210. + } acquire;
  20211. + struct bm_pool_state query;
  20212. + };
  20213. +} __packed;
  20214. +#define BM_MCR_VERB_VBIT 0x80
  20215. +#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
  20216. +#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
  20217. +#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
  20218. +#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
  20219. +#define BM_MCR_VERB_CMD_ERR_ECC 0x70
  20220. +#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
  20221. +/* Determine the "availability state" of pool 'p' from a query result 'r' */
  20222. +#define BM_MCR_QUERY_AVAILABILITY(r, p) \
  20223. + bman_depletion_get(&r->query.as.state, p)
  20224. +/* Determine the "depletion state" of pool 'p' from a query result 'r' */
  20225. +#define BM_MCR_QUERY_DEPLETION(r, p) \
  20226. + bman_depletion_get(&r->query.ds.state, p)
  20227. +
  20228. +/*******************************************************************/
  20229. +/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
  20230. +/*******************************************************************/
  20231. +
  20232. + /* Portal and Buffer Pools */
  20233. + /* ----------------------- */
  20234. +/* Represents a managed portal */
  20235. +struct bman_portal;
  20236. +
  20237. +/* This object type represents Bman buffer pools. */
  20238. +struct bman_pool;
  20239. +
  20240. +struct bman_portal_config {
  20241. + /* This is used for any "core-affine" portals, ie. default portals
  20242. + * associated to the corresponding cpu. -1 implies that there is no core
  20243. + * affinity configured. */
  20244. + int cpu;
  20245. + /* portal interrupt line */
  20246. + int irq;
  20247. + /* the unique index of this portal */
  20248. + u32 index;
  20249. + /* Is this portal shared? (If so, it has coarser locking and demuxes
  20250. + * processing on behalf of other CPUs.) */
  20251. + int is_shared;
  20252. + /* These are the buffer pool IDs that may be used via this portal. */
  20253. + struct bman_depletion mask;
  20254. +};
  20255. +
  20256. +/* This callback type is used when handling pool depletion entry/exit. The
  20257. + * 'cb_ctx' value is the opaque value associated with the pool object in
  20258. + * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
  20259. + * depletion-exit. */
  20260. +typedef void (*bman_cb_depletion)(struct bman_portal *bm,
  20261. + struct bman_pool *pool, void *cb_ctx, int depleted);
  20262. +
  20263. +/* This struct specifies parameters for a bman_pool object. */
  20264. +struct bman_pool_params {
  20265. + /* index of the buffer pool to encapsulate (0-63), ignored if
  20266. + * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
  20267. + u32 bpid;
  20268. + /* bit-mask of BMAN_POOL_FLAG_*** options */
  20269. + u32 flags;
  20270. + /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
  20271. + bman_cb_depletion cb;
  20272. + /* opaque user value passed as a parameter to 'cb' */
  20273. + void *cb_ctx;
  20274. + /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
  20275. + * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
  20276. + * when run in the control plane (which controls Bman CCSR). This array
  20277. + * matches the definition of bm_pool_set(). */
  20278. + u32 thresholds[4];
  20279. +};
  20280. +
  20281. +/* Flags to bman_new_pool() */
  20282. +#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
  20283. +#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
  20284. +#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
  20285. +#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
  20286. +#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
  20287. +#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
  20288. +
  20289. +/* Flags to bman_release() */
  20290. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  20291. +#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
  20292. +#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
  20293. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  20294. +#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
  20295. +#endif
  20296. +#endif
  20297. +#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
  20298. +
  20299. +/* Flags to bman_acquire() */
  20300. +#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
  20301. +
  20302. + /* Portal Management */
  20303. + /* ----------------- */
  20304. +/**
  20305. + * bman_get_portal_config - get portal configuration settings
  20306. + *
  20307. + * This returns a read-only view of the current cpu's affine portal settings.
  20308. + */
  20309. +const struct bman_portal_config *bman_get_portal_config(void);
  20310. +
  20311. +/**
  20312. + * bman_irqsource_get - return the portal work that is interrupt-driven
  20313. + *
  20314. + * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
  20315. + * enabled for interrupt handling on the current cpu's affine portal. These
  20316. + * sources will trigger the portal interrupt and the interrupt handler (or a
  20317. + * tasklet/bottom-half it defers to) will perform the corresponding processing
  20318. + * work. The bman_poll_***() functions will only process sources that are not in
  20319. + * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
  20320. + * this always returns zero.
  20321. + */
  20322. +u32 bman_irqsource_get(void);
  20323. +
  20324. +/**
  20325. + * bman_irqsource_add - add processing sources to be interrupt-driven
  20326. + * @bits: bitmask of BM_PIRQ_**I processing sources
  20327. + *
  20328. + * Adds processing sources that should be interrupt-driven (rather than
  20329. + * processed via bman_poll_***() functions). Returns zero for success, or
  20330. + * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
  20331. +int bman_irqsource_add(u32 bits);
  20332. +
  20333. +/**
  20334. + * bman_irqsource_remove - remove processing sources from being interrupt-driven
  20335. + * @bits: bitmask of BM_PIRQ_**I processing sources
  20336. + *
  20337. + * Removes processing sources from being interrupt-driven, so that they will
  20338. + * instead be processed via bman_poll_***() functions. Returns zero for success,
  20339. + * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
  20340. +int bman_irqsource_remove(u32 bits);
  20341. +
  20342. +/**
  20343. + * bman_affine_cpus - return a mask of cpus that have affine portals
  20344. + */
  20345. +const cpumask_t *bman_affine_cpus(void);
  20346. +
  20347. +/**
  20348. + * bman_poll_slow - process anything that isn't interrupt-driven.
  20349. + *
  20350. + * This function does any portal processing that isn't interrupt-driven. If the
  20351. + * current CPU is sharing a portal hosted on another CPU, this function will
  20352. + * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
  20353. + * indicating what interrupt sources were actually processed by the call.
  20354. + *
  20355. + * NB, unlike the legacy wrapper bman_poll(), this function will
  20356. + * deterministically check for the presence of portal processing work and do it,
  20357. + * which implies some latency even if there's nothing to do. The bman_poll()
  20358. + * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
  20359. + * checking for (and doing) portal processing infrequently. Ie. such that
  20360. + * qman_poll() and bman_poll() can be called from core-processing loops. Use
  20361. + * bman_poll_slow() when you yourself are deciding when to incur the overhead of
  20362. + * processing.
  20363. + */
  20364. +u32 bman_poll_slow(void);
  20365. +
  20366. +/**
  20367. + * bman_poll - process anything that isn't interrupt-driven.
  20368. + *
  20369. + * Dispatcher logic on a cpu can use this to trigger any maintenance of the
  20370. + * affine portal. This function does whatever processing is not triggered by
  20371. + * interrupts. This is a legacy wrapper that can be used in core-processing
  20372. + * loops but mitigates the performance overhead of portal processing by
  20373. + * adaptively bypassing true portal processing most of the time. (Processing is
  20374. + * done once every 10 calls if the previous processing revealed that work needed
  20375. + * to be done, or once very 1000 calls if the previous processing revealed no
  20376. + * work needed doing.) If you wish to control this yourself, call
  20377. + * bman_poll_slow() instead, which always checks for portal processing work.
  20378. + */
  20379. +void bman_poll(void);
  20380. +
  20381. +/**
  20382. + * bman_rcr_is_empty - Determine if portal's RCR is empty
  20383. + *
  20384. + * For use in situations where a cpu-affine caller needs to determine when all
  20385. + * releases for the local portal have been processed by Bman but can't use the
  20386. + * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
  20387. + * The function forces tracking of RCR consumption (which normally doesn't
  20388. + * happen until release processing needs to find space to put new release
  20389. + * commands), and returns zero if the ring still has unprocessed entries,
  20390. + * non-zero if it is empty.
  20391. + */
  20392. +int bman_rcr_is_empty(void);
  20393. +
  20394. +/**
  20395. + * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
  20396. + * @result: is set by the API to the base BPID of the allocated range
  20397. + * @count: the number of BPIDs required
  20398. + * @align: required alignment of the allocated range
  20399. + * @partial: non-zero if the API can return fewer than @count BPIDs
  20400. + *
  20401. + * Returns the number of buffer pools allocated, or a negative error code. If
  20402. + * @partial is non zero, the allocation request may return a smaller range of
  20403. + * BPs than requested (though alignment will be as requested). If @partial is
  20404. + * zero, the return value will either be 'count' or negative.
  20405. + */
  20406. +int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
  20407. +static inline int bman_alloc_bpid(u32 *result)
  20408. +{
  20409. + int ret = bman_alloc_bpid_range(result, 1, 0, 0);
  20410. + return (ret > 0) ? 0 : ret;
  20411. +}
  20412. +
  20413. +/**
  20414. + * bman_release_bpid_range - Release the specified range of buffer pool IDs
  20415. + * @bpid: the base BPID of the range to deallocate
  20416. + * @count: the number of BPIDs in the range
  20417. + *
  20418. + * This function can also be used to seed the allocator with ranges of BPIDs
  20419. + * that it can subsequently allocate from.
  20420. + */
  20421. +void bman_release_bpid_range(u32 bpid, unsigned int count);
  20422. +static inline void bman_release_bpid(u32 bpid)
  20423. +{
  20424. + bman_release_bpid_range(bpid, 1);
  20425. +}
  20426. +
  20427. +int bman_reserve_bpid_range(u32 bpid, unsigned int count);
  20428. +static inline int bman_reserve_bpid(u32 bpid)
  20429. +{
  20430. + return bman_reserve_bpid_range(bpid, 1);
  20431. +}
  20432. +
  20433. +void bman_seed_bpid_range(u32 bpid, unsigned int count);
  20434. +
  20435. +
  20436. +int bman_shutdown_pool(u32 bpid);
  20437. +
  20438. + /* Pool management */
  20439. + /* --------------- */
  20440. +/**
  20441. + * bman_new_pool - Allocates a Buffer Pool object
  20442. + * @params: parameters specifying the buffer pool ID and behaviour
  20443. + *
  20444. + * Creates a pool object for the given @params. A portal and the depletion
  20445. + * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
  20446. + * is set. NB, the fields from @params are copied into the new pool object, so
  20447. + * the structure provided by the caller can be released or reused after the
  20448. + * function returns.
  20449. + */
  20450. +struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
  20451. +
  20452. +/**
  20453. + * bman_free_pool - Deallocates a Buffer Pool object
  20454. + * @pool: the pool object to release
  20455. + *
  20456. + */
  20457. +void bman_free_pool(struct bman_pool *pool);
  20458. +
  20459. +/**
  20460. + * bman_get_params - Returns a pool object's parameters.
  20461. + * @pool: the pool object
  20462. + *
  20463. + * The returned pointer refers to state within the pool object so must not be
  20464. + * modified and can no longer be read once the pool object is destroyed.
  20465. + */
  20466. +const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
  20467. +
  20468. +/**
  20469. + * bman_release - Release buffer(s) to the buffer pool
  20470. + * @pool: the buffer pool object to release to
  20471. + * @bufs: an array of buffers to release
  20472. + * @num: the number of buffers in @bufs (1-8)
  20473. + * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
  20474. + *
  20475. + * Adds the given buffers to RCR entries. If the portal @p was created with the
  20476. + * "COMPACT" flag, then it will be using a compaction algorithm to improve
  20477. + * utilisation of RCR. As such, these buffers may join an existing ring entry
  20478. + * and/or it may not be issued right away so as to allow future releases to join
  20479. + * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
  20480. + * behaviour by committing the RCR entry (or entries) right away. If the RCR
  20481. + * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
  20482. + * is selected, in which case it will sleep waiting for space to become
  20483. + * available in RCR. If the function receives a signal before such time (and
  20484. + * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
  20485. + * it returns zero.
  20486. + */
  20487. +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
  20488. + u32 flags);
  20489. +
  20490. +/**
  20491. + * bman_acquire - Acquire buffer(s) from a buffer pool
  20492. + * @pool: the buffer pool object to acquire from
  20493. + * @bufs: array for storing the acquired buffers
  20494. + * @num: the number of buffers desired (@bufs is at least this big)
  20495. + *
  20496. + * Issues an "Acquire" command via the portal's management command interface.
  20497. + * The return value will be the number of buffers obtained from the pool, or a
  20498. + * negative error code if a h/w error or pool starvation was encountered. In
  20499. + * the latter case, the content of @bufs is undefined.
  20500. + */
  20501. +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
  20502. + u32 flags);
  20503. +
  20504. +/**
  20505. + * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
  20506. + * @pool: the buffer pool object the stockpile belongs
  20507. + * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
  20508. + *
  20509. + * Adds stockpile buffers to RCR entries until the stockpile is empty.
  20510. + * The return value will be a negative error code if a h/w error occurred.
  20511. + * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
  20512. + * -EAGAIN will be returned.
  20513. + */
  20514. +int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
  20515. +
  20516. +/**
  20517. + * bman_query_pools - Query all buffer pool states
  20518. + * @state: storage for the queried availability and depletion states
  20519. + */
  20520. +int bman_query_pools(struct bm_pool_state *state);
  20521. +
  20522. +#ifdef CONFIG_FSL_BMAN_CONFIG
  20523. +/**
  20524. + * bman_query_free_buffers - Query how many free buffers are in buffer pool
  20525. + * @pool: the buffer pool object to query
  20526. + *
  20527. + * Return the number of the free buffers
  20528. + */
  20529. +u32 bman_query_free_buffers(struct bman_pool *pool);
  20530. +
  20531. +/**
  20532. + * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
  20533. + * @pool: the buffer pool object to which the thresholds will be set
  20534. + * @thresholds: the new thresholds
  20535. + */
  20536. +int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
  20537. +#endif
  20538. +
  20539. +/**
  20540. + * The below bman_p_***() variant might be called in a situation that the cpu
  20541. + * which the portal affine to is not online yet.
  20542. + * @bman_portal specifies which portal the API will use.
  20543. +*/
  20544. +int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
  20545. +#ifdef __cplusplus
  20546. +}
  20547. +#endif
  20548. +
  20549. +#endif /* FSL_BMAN_H */
  20550. --- /dev/null
  20551. +++ b/include/linux/fsl_qman.h
  20552. @@ -0,0 +1,3889 @@
  20553. +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
  20554. + *
  20555. + * Redistribution and use in source and binary forms, with or without
  20556. + * modification, are permitted provided that the following conditions are met:
  20557. + * * Redistributions of source code must retain the above copyright
  20558. + * notice, this list of conditions and the following disclaimer.
  20559. + * * Redistributions in binary form must reproduce the above copyright
  20560. + * notice, this list of conditions and the following disclaimer in the
  20561. + * documentation and/or other materials provided with the distribution.
  20562. + * * Neither the name of Freescale Semiconductor nor the
  20563. + * names of its contributors may be used to endorse or promote products
  20564. + * derived from this software without specific prior written permission.
  20565. + *
  20566. + *
  20567. + * ALTERNATIVELY, this software may be distributed under the terms of the
  20568. + * GNU General Public License ("GPL") as published by the Free Software
  20569. + * Foundation, either version 2 of that License or (at your option) any
  20570. + * later version.
  20571. + *
  20572. + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  20573. + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  20574. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  20575. + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  20576. + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  20577. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  20578. + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  20579. + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  20580. + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  20581. + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  20582. + */
  20583. +
  20584. +#ifndef FSL_QMAN_H
  20585. +#define FSL_QMAN_H
  20586. +
  20587. +#ifdef __cplusplus
  20588. +extern "C" {
  20589. +#endif
  20590. +
  20591. +/* Last updated for v00.800 of the BG */
  20592. +
  20593. +/* Hardware constants */
  20594. +#define QM_CHANNEL_SWPORTAL0 0
  20595. +#define QMAN_CHANNEL_POOL1 0x21
  20596. +#define QMAN_CHANNEL_CAAM 0x80
  20597. +#define QMAN_CHANNEL_PME 0xa0
  20598. +#define QMAN_CHANNEL_POOL1_REV3 0x401
  20599. +#define QMAN_CHANNEL_CAAM_REV3 0x840
  20600. +#define QMAN_CHANNEL_PME_REV3 0x860
  20601. +#define QMAN_CHANNEL_DCE 0x8a0
  20602. +#define QMAN_CHANNEL_DCE_QMANREV312 0x880
  20603. +extern u16 qm_channel_pool1;
  20604. +extern u16 qm_channel_caam;
  20605. +extern u16 qm_channel_pme;
  20606. +extern u16 qm_channel_dce;
  20607. +enum qm_dc_portal {
  20608. + qm_dc_portal_fman0 = 0,
  20609. + qm_dc_portal_fman1 = 1,
  20610. + qm_dc_portal_caam = 2,
  20611. + qm_dc_portal_pme = 3,
  20612. + qm_dc_portal_rman = 4,
  20613. + qm_dc_portal_dce = 5
  20614. +};
  20615. +
  20616. +/* Portal processing (interrupt) sources */
  20617. +#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
  20618. +#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
  20619. +#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
  20620. +#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
  20621. +#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
  20622. +#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
  20623. +/* This mask contains all the interrupt sources that need handling except DQRI,
  20624. + * ie. that if present should trigger slow-path processing. */
  20625. +#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
  20626. + QM_PIRQ_MRI | QM_PIRQ_CCSCI)
  20627. +
  20628. +/* --- Clock speed --- */
  20629. +/* A qman driver instance may or may not know the current qman clock speed.
  20630. + * However, certain CEETM calculations may not be possible if this is not known.
  20631. + * The 'set' function will only succeed (return zero) if the driver did not
  20632. + * already know the clock speed. Likewise, the 'get' function will only succeed
  20633. + * if the driver does know the clock speed (either because it knew when booting,
  20634. + * or was told via 'set'). In cases where software is running on a driver
  20635. + * instance that does not know the clock speed (eg. on a hypervised data-plane),
  20636. + * and the user can obtain the current qman clock speed by other means (eg. from
  20637. + * a message sent from the control-plane), then the 'set' function can be used
  20638. + * to enable rate-calculations in a driver where it would otherwise not be
  20639. + * possible. */
  20640. +int qm_get_clock(u64 *clock_hz);
  20641. +int qm_set_clock(u64 clock_hz);
  20642. +
  20643. +/* For qman_static_dequeue_*** APIs */
  20644. +#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
  20645. +/* for n in [1,15] */
  20646. +#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
  20647. +/* for conversion from n of qm_channel */
  20648. +static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
  20649. +{
  20650. + return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
  20651. +}
  20652. +
  20653. +/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
  20654. + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
  20655. + * FQID(n) to fill in the frame queue ID. */
  20656. +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
  20657. +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
  20658. +#define QM_VDQCR_EXACT 0x40000000
  20659. +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
  20660. +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
  20661. +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
  20662. +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
  20663. +
  20664. +
  20665. +/* ------------------------------------------------------- */
  20666. +/* --- Qman data structures (and associated constants) --- */
  20667. +
  20668. +/* Represents s/w corenet portal mapped data structures */
  20669. +struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
  20670. +struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
  20671. +struct qm_mr_entry; /* MR (Message Ring) entries */
  20672. +struct qm_mc_command; /* MC (Management Command) command */
  20673. +struct qm_mc_result; /* MC result */
  20674. +
  20675. +/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
  20676. +#define QM_FD_FORMAT_SG 0x4
  20677. +#define QM_FD_FORMAT_LONG 0x2
  20678. +#define QM_FD_FORMAT_COMPOUND 0x1
  20679. +enum qm_fd_format {
  20680. + /* 'contig' implies a contiguous buffer, whereas 'sg' implies a
  20681. + * scatter-gather table. 'big' implies a 29-bit length with no offset
  20682. + * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
  20683. + * implies a s/g-like table, where each entry itself represents a frame
  20684. + * (contiguous or scatter-gather) and the 29-bit "length" is
  20685. + * interpreted purely for congestion calculations, ie. a "congestion
  20686. + * weight". */
  20687. + qm_fd_contig = 0,
  20688. + qm_fd_contig_big = QM_FD_FORMAT_LONG,
  20689. + qm_fd_sg = QM_FD_FORMAT_SG,
  20690. + qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
  20691. + qm_fd_compound = QM_FD_FORMAT_COMPOUND
  20692. +};
  20693. +
  20694. +/* Capitalised versions are un-typed but can be used in static expressions */
  20695. +#define QM_FD_CONTIG 0
  20696. +#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
  20697. +#define QM_FD_SG QM_FD_FORMAT_SG
  20698. +#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
  20699. +#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
  20700. +
  20701. +/* See 1.5.1.1: "Frame Descriptor (FD)" */
  20702. +struct qm_fd {
  20703. + union {
  20704. + struct {
  20705. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20706. + u8 dd:2; /* dynamic debug */
  20707. + u8 liodn_offset:6;
  20708. + u8 bpid:8; /* Buffer Pool ID */
  20709. + u8 eliodn_offset:4;
  20710. + u8 __reserved:4;
  20711. + u8 addr_hi; /* high 8-bits of 40-bit address */
  20712. + u32 addr_lo; /* low 32-bits of 40-bit address */
  20713. +#else
  20714. + u8 liodn_offset:6;
  20715. + u8 dd:2; /* dynamic debug */
  20716. + u8 bpid:8; /* Buffer Pool ID */
  20717. + u8 __reserved:4;
  20718. + u8 eliodn_offset:4;
  20719. + u8 addr_hi; /* high 8-bits of 40-bit address */
  20720. + u32 addr_lo; /* low 32-bits of 40-bit address */
  20721. +#endif
  20722. + };
  20723. + struct {
  20724. + u64 __notaddress:24;
  20725. + /* More efficient address accessor */
  20726. + u64 addr:40;
  20727. + };
  20728. + u64 opaque_addr;
  20729. + };
  20730. + /* The 'format' field indicates the interpretation of the remaining 29
  20731. + * bits of the 32-bit word. For packing reasons, it is duplicated in the
  20732. + * other union elements. Note, union'd structs are difficult to use with
  20733. + * static initialisation under gcc, in which case use the "opaque" form
  20734. + * with one of the macros. */
  20735. + union {
  20736. + /* For easier/faster copying of this part of the fd (eg. from a
  20737. + * DQRR entry to an EQCR entry) copy 'opaque' */
  20738. + u32 opaque;
  20739. + /* If 'format' is _contig or _sg, 20b length and 9b offset */
  20740. + struct {
  20741. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20742. + enum qm_fd_format format:3;
  20743. + u16 offset:9;
  20744. + u32 length20:20;
  20745. +#else
  20746. + u32 length20:20;
  20747. + u16 offset:9;
  20748. + enum qm_fd_format format:3;
  20749. +#endif
  20750. + };
  20751. + /* If 'format' is _contig_big or _sg_big, 29b length */
  20752. + struct {
  20753. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20754. + enum qm_fd_format _format1:3;
  20755. + u32 length29:29;
  20756. +#else
  20757. + u32 length29:29;
  20758. + enum qm_fd_format _format1:3;
  20759. +#endif
  20760. + };
  20761. + /* If 'format' is _compound, 29b "congestion weight" */
  20762. + struct {
  20763. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20764. + enum qm_fd_format _format2:3;
  20765. + u32 cong_weight:29;
  20766. +#else
  20767. + u32 cong_weight:29;
  20768. + enum qm_fd_format _format2:3;
  20769. +#endif
  20770. + };
  20771. + };
  20772. + union {
  20773. + u32 cmd;
  20774. + u32 status;
  20775. + };
  20776. +} __aligned(8);
  20777. +#define QM_FD_DD_NULL 0x00
  20778. +#define QM_FD_PID_MASK 0x3f
  20779. +static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
  20780. +{
  20781. + return fd->addr;
  20782. +}
  20783. +
  20784. +static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
  20785. +{
  20786. + return (dma_addr_t)fd->addr;
  20787. +}
  20788. +/* Macro, so we compile better if 'v' isn't always 64-bit */
  20789. +#define qm_fd_addr_set64(fd, v) \
  20790. + do { \
  20791. + struct qm_fd *__fd931 = (fd); \
  20792. + __fd931->addr = v; \
  20793. + } while (0)
  20794. +
  20795. +/* For static initialisation of FDs (which is complicated by the use of unions
  20796. + * in "struct qm_fd"), use the following macros. Note that;
  20797. + * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
  20798. + * use-case),
  20799. + * - use capitalised QM_FD_*** formats for static initialisation.
  20800. + */
  20801. +#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
  20802. + { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
  20803. + { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
  20804. + { cmd } }
  20805. +#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
  20806. + { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
  20807. + { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
  20808. + { cmd } }
  20809. +
  20810. +/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
  20811. +#define QM_SG_OFFSET_MASK 0x1FFF
  20812. +struct qm_sg_entry {
  20813. + union {
  20814. + struct {
  20815. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20816. + u8 __reserved1[3];
  20817. + u8 addr_hi; /* high 8-bits of 40-bit address */
  20818. + u32 addr_lo; /* low 32-bits of 40-bit address */
  20819. +#else
  20820. + u32 addr_lo; /* low 32-bits of 40-bit address */
  20821. + u8 addr_hi; /* high 8-bits of 40-bit address */
  20822. + u8 __reserved1[3];
  20823. +#endif
  20824. + };
  20825. + struct {
  20826. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20827. + u64 __notaddress:24;
  20828. + u64 addr:40;
  20829. +#else
  20830. + u64 addr:40;
  20831. + u64 __notaddress:24;
  20832. +#endif
  20833. + };
  20834. + u64 opaque;
  20835. + };
  20836. + union {
  20837. + struct {
  20838. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20839. + u32 extension:1; /* Extension bit */
  20840. + u32 final:1; /* Final bit */
  20841. + u32 length:30;
  20842. +#else
  20843. + u32 length:30;
  20844. + u32 final:1; /* Final bit */
  20845. + u32 extension:1; /* Extension bit */
  20846. +#endif
  20847. + };
  20848. + u32 sgt_efl;
  20849. + };
  20850. + u8 __reserved2;
  20851. + u8 bpid;
  20852. + union {
  20853. + struct {
  20854. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20855. + u16 __reserved3:3;
  20856. + u16 offset:13;
  20857. +#else
  20858. + u16 offset:13;
  20859. + u16 __reserved3:3;
  20860. +#endif
  20861. + };
  20862. + u16 opaque_offset;
  20863. + };
  20864. +} __packed;
  20865. +union qm_sg_efl {
  20866. + struct {
  20867. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  20868. + u32 extension:1; /* Extension bit */
  20869. + u32 final:1; /* Final bit */
  20870. + u32 length:30;
  20871. +#else
  20872. + u32 length:30;
  20873. + u32 final:1; /* Final bit */
  20874. + u32 extension:1; /* Extension bit */
  20875. +#endif
  20876. + };
  20877. + u32 efl;
  20878. +};
  20879. +static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
  20880. +{
  20881. + return be64_to_cpu(sg->opaque);
  20882. +}
  20883. +static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
  20884. +{
  20885. + return (dma_addr_t)be64_to_cpu(sg->opaque);
  20886. +}
  20887. +static inline u8 qm_sg_entry_get_ext(const struct qm_sg_entry *sg)
  20888. +{
  20889. + union qm_sg_efl u;
  20890. +
  20891. + u.efl = be32_to_cpu(sg->sgt_efl);
  20892. + return u.extension;
  20893. +}
  20894. +static inline u8 qm_sg_entry_get_final(const struct qm_sg_entry *sg)
  20895. +{
  20896. + union qm_sg_efl u;
  20897. +
  20898. + u.efl = be32_to_cpu(sg->sgt_efl);
  20899. + return u.final;
  20900. +}
  20901. +static inline u32 qm_sg_entry_get_len(const struct qm_sg_entry *sg)
  20902. +{
  20903. + union qm_sg_efl u;
  20904. +
  20905. + u.efl = be32_to_cpu(sg->sgt_efl);
  20906. + return u.length;
  20907. +}
  20908. +static inline u8 qm_sg_entry_get_bpid(const struct qm_sg_entry *sg)
  20909. +{
  20910. + return sg->bpid;
  20911. +}
  20912. +static inline u16 qm_sg_entry_get_offset(const struct qm_sg_entry *sg)
  20913. +{
  20914. + u32 opaque_offset = be16_to_cpu(sg->opaque_offset);
  20915. +
  20916. + return opaque_offset & 0x1fff;
  20917. +}
  20918. +
  20919. +/* Macro, so we compile better if 'v' isn't always 64-bit */
  20920. +#define qm_sg_entry_set64(sg, v) \
  20921. + do { \
  20922. + struct qm_sg_entry *__sg931 = (sg); \
  20923. + __sg931->opaque = cpu_to_be64(v); \
  20924. + } while (0)
  20925. +#define qm_sg_entry_set_ext(sg, v) \
  20926. + do { \
  20927. + union qm_sg_efl __u932; \
  20928. + __u932.efl = be32_to_cpu((sg)->sgt_efl); \
  20929. + __u932.extension = v; \
  20930. + (sg)->sgt_efl = cpu_to_be32(__u932.efl); \
  20931. + } while (0)
  20932. +#define qm_sg_entry_set_final(sg, v) \
  20933. + do { \
  20934. + union qm_sg_efl __u933; \
  20935. + __u933.efl = be32_to_cpu((sg)->sgt_efl); \
  20936. + __u933.final = v; \
  20937. + (sg)->sgt_efl = cpu_to_be32(__u933.efl); \
  20938. + } while (0)
  20939. +#define qm_sg_entry_set_len(sg, v) \
  20940. + do { \
  20941. + union qm_sg_efl __u934; \
  20942. + __u934.efl = be32_to_cpu((sg)->sgt_efl); \
  20943. + __u934.length = v; \
  20944. + (sg)->sgt_efl = cpu_to_be32(__u934.efl); \
  20945. + } while (0)
  20946. +#define qm_sg_entry_set_bpid(sg, v) \
  20947. + do { \
  20948. + struct qm_sg_entry *__u935 = (sg); \
  20949. + __u935->bpid = v; \
  20950. + } while (0)
  20951. +#define qm_sg_entry_set_offset(sg, v) \
  20952. + do { \
  20953. + struct qm_sg_entry *__u936 = (sg); \
  20954. + __u936->opaque_offset = cpu_to_be16(v); \
  20955. + } while (0)
  20956. +
  20957. +/* See 1.5.8.1: "Enqueue Command" */
  20958. +struct qm_eqcr_entry {
  20959. + u8 __dont_write_directly__verb;
  20960. + u8 dca;
  20961. + u16 seqnum;
  20962. + u32 orp; /* 24-bit */
  20963. + u32 fqid; /* 24-bit */
  20964. + u32 tag;
  20965. + struct qm_fd fd;
  20966. + u8 __reserved3[32];
  20967. +} __packed;
  20968. +#define QM_EQCR_VERB_VBIT 0x80
  20969. +#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
  20970. +#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
  20971. +#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
  20972. +#define QM_EQCR_VERB_COLOUR_GREEN 0x00
  20973. +#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
  20974. +#define QM_EQCR_VERB_COLOUR_RED 0x10
  20975. +#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
  20976. +#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
  20977. +#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
  20978. +#define QM_EQCR_DCA_ENABLE 0x80
  20979. +#define QM_EQCR_DCA_PARK 0x40
  20980. +#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
  20981. +#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
  20982. +#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
  20983. +#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
  20984. +#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
  20985. +
  20986. +/* See 1.5.8.2: "Frame Dequeue Response" */
  20987. +struct qm_dqrr_entry {
  20988. + u8 verb;
  20989. + u8 stat;
  20990. + u16 seqnum; /* 15-bit */
  20991. + u8 tok;
  20992. + u8 __reserved2[3];
  20993. + u32 fqid; /* 24-bit */
  20994. + u32 contextB;
  20995. + struct qm_fd fd;
  20996. + u8 __reserved4[32];
  20997. +};
  20998. +#define QM_DQRR_VERB_VBIT 0x80
  20999. +#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
  21000. +#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
  21001. +#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
  21002. +#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
  21003. +#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
  21004. +#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
  21005. +#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
  21006. +#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
  21007. +
  21008. +/* See 1.5.8.3: "ERN Message Response" */
  21009. +/* See 1.5.8.4: "FQ State Change Notification" */
  21010. +struct qm_mr_entry {
  21011. + u8 verb;
  21012. + union {
  21013. + struct {
  21014. + u8 dca;
  21015. + u16 seqnum;
  21016. + u8 rc; /* Rejection Code */
  21017. + u32 orp:24;
  21018. + u32 fqid; /* 24-bit */
  21019. + u32 tag;
  21020. + struct qm_fd fd;
  21021. + } __packed ern;
  21022. + struct {
  21023. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21024. + u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
  21025. + u8 __reserved1:3;
  21026. + enum qm_dc_portal portal:3;
  21027. +#else
  21028. + enum qm_dc_portal portal:3;
  21029. + u8 __reserved1:3;
  21030. + u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
  21031. +#endif
  21032. + u16 __reserved2;
  21033. + u8 rc; /* Rejection Code */
  21034. + u32 __reserved3:24;
  21035. + u32 fqid; /* 24-bit */
  21036. + u32 tag;
  21037. + struct qm_fd fd;
  21038. + } __packed dcern;
  21039. + struct {
  21040. + u8 fqs; /* Frame Queue Status */
  21041. + u8 __reserved1[6];
  21042. + u32 fqid; /* 24-bit */
  21043. + u32 contextB;
  21044. + u8 __reserved2[16];
  21045. + } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
  21046. + };
  21047. + u8 __reserved2[32];
  21048. +} __packed;
  21049. +#define QM_MR_VERB_VBIT 0x80
  21050. +/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
  21051. + * originating from direct-connect portals ("dcern") use 0x20 as a verb which
  21052. + * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
  21053. + * the other MR types by noting if the 0x20 bit is unset. */
  21054. +#define QM_MR_VERB_TYPE_MASK 0x27
  21055. +#define QM_MR_VERB_DC_ERN 0x20
  21056. +#define QM_MR_VERB_FQRN 0x21
  21057. +#define QM_MR_VERB_FQRNI 0x22
  21058. +#define QM_MR_VERB_FQRL 0x23
  21059. +#define QM_MR_VERB_FQPN 0x24
  21060. +#define QM_MR_RC_MASK 0xf0 /* contains one of; */
  21061. +#define QM_MR_RC_CGR_TAILDROP 0x00
  21062. +#define QM_MR_RC_WRED 0x10
  21063. +#define QM_MR_RC_ERROR 0x20
  21064. +#define QM_MR_RC_ORPWINDOW_EARLY 0x30
  21065. +#define QM_MR_RC_ORPWINDOW_LATE 0x40
  21066. +#define QM_MR_RC_FQ_TAILDROP 0x50
  21067. +#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
  21068. +#define QM_MR_RC_ORP_ZERO 0x70
  21069. +#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
  21070. +#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
  21071. +#define QM_MR_DCERN_COLOUR_GREEN 0x00
  21072. +#define QM_MR_DCERN_COLOUR_YELLOW 0x01
  21073. +#define QM_MR_DCERN_COLOUR_RED 0x02
  21074. +#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
  21075. +
  21076. +/* An identical structure of FQD fields is present in the "Init FQ" command and
  21077. + * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
  21078. + * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
  21079. + * latter has two inlines to assist with converting to/from the mant+exp
  21080. + * representation. */
  21081. +struct qm_fqd_stashing {
  21082. + /* See QM_STASHING_EXCL_<...> */
  21083. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21084. + u8 exclusive;
  21085. + u8 __reserved1:2;
  21086. + /* Numbers of cachelines */
  21087. + u8 annotation_cl:2;
  21088. + u8 data_cl:2;
  21089. + u8 context_cl:2;
  21090. +#else
  21091. + u8 context_cl:2;
  21092. + u8 data_cl:2;
  21093. + u8 annotation_cl:2;
  21094. + u8 __reserved1:2;
  21095. + u8 exclusive;
  21096. +#endif
  21097. +} __packed;
  21098. +struct qm_fqd_taildrop {
  21099. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21100. + u16 __reserved1:3;
  21101. + u16 mant:8;
  21102. + u16 exp:5;
  21103. +#else
  21104. + u16 exp:5;
  21105. + u16 mant:8;
  21106. + u16 __reserved1:3;
  21107. +#endif
  21108. +} __packed;
  21109. +struct qm_fqd_oac {
  21110. + /* See QM_OAC_<...> */
  21111. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21112. + u8 oac:2; /* "Overhead Accounting Control" */
  21113. + u8 __reserved1:6;
  21114. +#else
  21115. + u8 __reserved1:6;
  21116. + u8 oac:2; /* "Overhead Accounting Control" */
  21117. +#endif
  21118. + /* Two's-complement value (-128 to +127) */
  21119. + signed char oal; /* "Overhead Accounting Length" */
  21120. +} __packed;
  21121. +struct qm_fqd {
  21122. + union {
  21123. + u8 orpc;
  21124. + struct {
  21125. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21126. + u8 __reserved1:2;
  21127. + u8 orprws:3;
  21128. + u8 oa:1;
  21129. + u8 olws:2;
  21130. +#else
  21131. + u8 olws:2;
  21132. + u8 oa:1;
  21133. + u8 orprws:3;
  21134. + u8 __reserved1:2;
  21135. +#endif
  21136. + } __packed;
  21137. + };
  21138. + u8 cgid;
  21139. + u16 fq_ctrl; /* See QM_FQCTRL_<...> */
  21140. + union {
  21141. + u16 dest_wq;
  21142. + struct {
  21143. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21144. + u16 channel:13; /* qm_channel */
  21145. + u16 wq:3;
  21146. +#else
  21147. + u16 wq:3;
  21148. + u16 channel:13; /* qm_channel */
  21149. +#endif
  21150. + } __packed dest;
  21151. + };
  21152. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21153. + u16 __reserved2:1;
  21154. + u16 ics_cred:15;
  21155. +#else
  21156. + u16 __reserved2:1;
  21157. + u16 ics_cred:15;
  21158. +#endif
  21159. + /* For "Initialize Frame Queue" commands, the write-enable mask
  21160. + * determines whether 'td' or 'oac_init' is observed. For query
  21161. + * commands, this field is always 'td', and 'oac_query' (below) reflects
  21162. + * the Overhead ACcounting values. */
  21163. + union {
  21164. + struct qm_fqd_taildrop td;
  21165. + struct qm_fqd_oac oac_init;
  21166. + };
  21167. + u32 context_b;
  21168. + union {
  21169. + /* Treat it as 64-bit opaque */
  21170. + u64 opaque;
  21171. + struct {
  21172. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21173. + u32 hi;
  21174. + u32 lo;
  21175. +#else
  21176. + u32 lo;
  21177. + u32 hi;
  21178. +#endif
  21179. + };
  21180. + /* Treat it as s/w portal stashing config */
  21181. + /* See 1.5.6.7.1: "FQD Context_A field used for [...] */
  21182. + struct {
  21183. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21184. + struct qm_fqd_stashing stashing;
  21185. + /* 48-bit address of FQ context to
  21186. + * stash, must be cacheline-aligned */
  21187. + u16 context_hi;
  21188. + u32 context_lo;
  21189. +#else
  21190. + u32 context_lo;
  21191. + u16 context_hi;
  21192. + struct qm_fqd_stashing stashing;
  21193. +#endif
  21194. + } __packed;
  21195. + } context_a;
  21196. + struct qm_fqd_oac oac_query;
  21197. +} __packed;
  21198. +/* 64-bit converters for context_hi/lo */
  21199. +static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
  21200. +{
  21201. + return ((u64)fqd->context_a.context_hi << 32) |
  21202. + (u64)fqd->context_a.context_lo;
  21203. +}
  21204. +static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
  21205. +{
  21206. + return (dma_addr_t)qm_fqd_stashing_get64(fqd);
  21207. +}
  21208. +static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
  21209. +{
  21210. + return ((u64)fqd->context_a.hi << 32) |
  21211. + (u64)fqd->context_a.lo;
  21212. +}
  21213. +/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
  21214. +#define qm_fqd_stashing_set64(fqd, v) \
  21215. + do { \
  21216. + struct qm_fqd *__fqd931 = (fqd); \
  21217. + __fqd931->context_a.context_hi = upper_32_bits(v); \
  21218. + __fqd931->context_a.context_lo = lower_32_bits(v); \
  21219. + } while (0)
  21220. +#define qm_fqd_context_a_set64(fqd, v) \
  21221. + do { \
  21222. + struct qm_fqd *__fqd931 = (fqd); \
  21223. + __fqd931->context_a.hi = upper_32_bits(v); \
  21224. + __fqd931->context_a.lo = lower_32_bits(v); \
  21225. + } while (0)
  21226. +/* convert a threshold value into mant+exp representation */
  21227. +static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
  21228. + int roundup)
  21229. +{
  21230. + u32 e = 0;
  21231. + int oddbit = 0;
  21232. + if (val > 0xe0000000)
  21233. + return -ERANGE;
  21234. + while (val > 0xff) {
  21235. + oddbit = val & 1;
  21236. + val >>= 1;
  21237. + e++;
  21238. + if (roundup && oddbit)
  21239. + val++;
  21240. + }
  21241. + td->exp = e;
  21242. + td->mant = val;
  21243. + return 0;
  21244. +}
  21245. +/* and the other direction */
  21246. +static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
  21247. +{
  21248. + return (u32)td->mant << td->exp;
  21249. +}
  21250. +
  21251. +/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
  21252. +/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
  21253. +#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
  21254. +#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
  21255. +#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
  21256. +#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
  21257. +#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
  21258. +#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
  21259. +#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
  21260. +#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
  21261. +#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
  21262. +#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
  21263. +#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
  21264. +
  21265. +/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
  21266. +/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
  21267. +#define QM_STASHING_EXCL_ANNOTATION 0x04
  21268. +#define QM_STASHING_EXCL_DATA 0x02
  21269. +#define QM_STASHING_EXCL_CTX 0x01
  21270. +
  21271. +/* See 1.5.5.3: "Intra Class Scheduling" */
  21272. +/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
  21273. +#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
  21274. +#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
  21275. +
  21276. +/* See 1.5.8.4: "FQ State Change Notification" */
  21277. +/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
  21278. + * and associated commands/responses. The WRED parameters are calculated from
  21279. + * these fields as follows;
  21280. + * MaxTH = MA * (2 ^ Mn)
  21281. + * Slope = SA / (2 ^ Sn)
  21282. + * MaxP = 4 * (Pn + 1)
  21283. + */
  21284. +struct qm_cgr_wr_parm {
  21285. + union {
  21286. + u32 word;
  21287. + struct {
  21288. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21289. + u32 MA:8;
  21290. + u32 Mn:5;
  21291. + u32 SA:7; /* must be between 64-127 */
  21292. + u32 Sn:6;
  21293. + u32 Pn:6;
  21294. +#else
  21295. + u32 Pn:6;
  21296. + u32 Sn:6;
  21297. + u32 SA:7; /* must be between 64-127 */
  21298. + u32 Mn:5;
  21299. + u32 MA:8;
  21300. +#endif
  21301. + } __packed;
  21302. + };
  21303. +} __packed;
  21304. +/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
  21305. + * management commands, this is padded to a 16-bit structure field, so that's
  21306. + * how we represent it here. The congestion state threshold is calculated from
  21307. + * these fields as follows;
  21308. + * CS threshold = TA * (2 ^ Tn)
  21309. + */
  21310. +struct qm_cgr_cs_thres {
  21311. + union {
  21312. + u16 hword;
  21313. + struct {
  21314. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21315. + u16 __reserved:3;
  21316. + u16 TA:8;
  21317. + u16 Tn:5;
  21318. +#else
  21319. + u16 Tn:5;
  21320. + u16 TA:8;
  21321. + u16 __reserved:3;
  21322. +#endif
  21323. + } __packed;
  21324. + };
  21325. +} __packed;
  21326. +/* This identical structure of CGR fields is present in the "Init/Modify CGR"
  21327. + * commands and the "Query CGR" result. It's suctioned out here into its own
  21328. + * struct. */
  21329. +struct __qm_mc_cgr {
  21330. + struct qm_cgr_wr_parm wr_parm_g;
  21331. + struct qm_cgr_wr_parm wr_parm_y;
  21332. + struct qm_cgr_wr_parm wr_parm_r;
  21333. + u8 wr_en_g; /* boolean, use QM_CGR_EN */
  21334. + u8 wr_en_y; /* boolean, use QM_CGR_EN */
  21335. + u8 wr_en_r; /* boolean, use QM_CGR_EN */
  21336. + u8 cscn_en; /* boolean, use QM_CGR_EN */
  21337. + union {
  21338. + struct {
  21339. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21340. + u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
  21341. + u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
  21342. +#else
  21343. + u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
  21344. + u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
  21345. +#endif
  21346. + };
  21347. + u32 cscn_targ; /* use QM_CGR_TARG_* */
  21348. + };
  21349. + u8 cstd_en; /* boolean, use QM_CGR_EN */
  21350. + u8 cs; /* boolean, only used in query response */
  21351. + union {
  21352. + /* use qm_cgr_cs_thres_set64() */
  21353. + struct qm_cgr_cs_thres cs_thres;
  21354. + u16 __cs_thres;
  21355. + };
  21356. + u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
  21357. +} __packed;
  21358. +#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
  21359. +#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
  21360. +#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
  21361. +#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
  21362. +#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
  21363. +#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
  21364. +/* Convert CGR thresholds to/from "cs_thres" format */
  21365. +static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
  21366. +{
  21367. + return (u64)th->TA << th->Tn;
  21368. +}
  21369. +static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
  21370. + int roundup)
  21371. +{
  21372. + u32 e = 0;
  21373. + int oddbit = 0;
  21374. + while (val > 0xff) {
  21375. + oddbit = val & 1;
  21376. + val >>= 1;
  21377. + e++;
  21378. + if (roundup && oddbit)
  21379. + val++;
  21380. + }
  21381. + th->Tn = e;
  21382. + th->TA = val;
  21383. + return 0;
  21384. +}
  21385. +
  21386. +/* See 1.5.8.5.1: "Initialize FQ" */
  21387. +/* See 1.5.8.5.2: "Query FQ" */
  21388. +/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
  21389. +/* See 1.5.8.5.4: "Alter FQ State Commands " */
  21390. +/* See 1.5.8.6.1: "Initialize/Modify CGR" */
  21391. +/* See 1.5.8.6.2: "CGR Test Write" */
  21392. +/* See 1.5.8.6.3: "Query CGR" */
  21393. +/* See 1.5.8.6.4: "Query Congestion Group State" */
  21394. +struct qm_mcc_initfq {
  21395. + u8 __reserved1;
  21396. + u16 we_mask; /* Write Enable Mask */
  21397. + u32 fqid; /* 24-bit */
  21398. + u16 count; /* Initialises 'count+1' FQDs */
  21399. + struct qm_fqd fqd; /* the FQD fields go here */
  21400. + u8 __reserved3[30];
  21401. +} __packed;
  21402. +struct qm_mcc_queryfq {
  21403. + u8 __reserved1[3];
  21404. + u32 fqid; /* 24-bit */
  21405. + u8 __reserved2[56];
  21406. +} __packed;
  21407. +struct qm_mcc_queryfq_np {
  21408. + u8 __reserved1[3];
  21409. + u32 fqid; /* 24-bit */
  21410. + u8 __reserved2[56];
  21411. +} __packed;
  21412. +struct qm_mcc_alterfq {
  21413. + u8 __reserved1[3];
  21414. + u32 fqid; /* 24-bit */
  21415. + u8 __reserved2;
  21416. + u8 count; /* number of consecutive FQID */
  21417. + u8 __reserved3[10];
  21418. + u32 context_b; /* frame queue context b */
  21419. + u8 __reserved4[40];
  21420. +} __packed;
  21421. +struct qm_mcc_initcgr {
  21422. + u8 __reserved1;
  21423. + u16 we_mask; /* Write Enable Mask */
  21424. + struct __qm_mc_cgr cgr; /* CGR fields */
  21425. + u8 __reserved2[2];
  21426. + u8 cgid;
  21427. + u8 __reserved4[32];
  21428. +} __packed;
  21429. +struct qm_mcc_cgrtestwrite {
  21430. + u8 __reserved1[2];
  21431. + u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
  21432. + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
  21433. + u8 __reserved2[23];
  21434. + u8 cgid;
  21435. + u8 __reserved3[32];
  21436. +} __packed;
  21437. +struct qm_mcc_querycgr {
  21438. + u8 __reserved1[30];
  21439. + u8 cgid;
  21440. + u8 __reserved2[32];
  21441. +} __packed;
  21442. +struct qm_mcc_querycongestion {
  21443. + u8 __reserved[63];
  21444. +} __packed;
  21445. +struct qm_mcc_querywq {
  21446. + u8 __reserved;
  21447. + /* select channel if verb != QUERYWQ_DEDICATED */
  21448. + union {
  21449. + u16 channel_wq; /* ignores wq (3 lsbits) */
  21450. + struct {
  21451. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21452. + u16 id:13; /* qm_channel */
  21453. + u16 __reserved1:3;
  21454. +#else
  21455. + u16 __reserved1:3;
  21456. + u16 id:13; /* qm_channel */
  21457. +#endif
  21458. + } __packed channel;
  21459. + };
  21460. + u8 __reserved2[60];
  21461. +} __packed;
  21462. +
  21463. +struct qm_mcc_ceetm_lfqmt_config {
  21464. + u8 __reserved1[4];
  21465. + u32 lfqid:24;
  21466. + u8 __reserved2[2];
  21467. + u16 cqid;
  21468. + u8 __reserved3[2];
  21469. + u16 dctidx;
  21470. + u8 __reserved4[48];
  21471. +} __packed;
  21472. +
  21473. +struct qm_mcc_ceetm_lfqmt_query {
  21474. + u8 __reserved1[4];
  21475. + u32 lfqid:24;
  21476. + u8 __reserved2[56];
  21477. +} __packed;
  21478. +
  21479. +struct qm_mcc_ceetm_cq_config {
  21480. + u8 __reserved1;
  21481. + u16 cqid;
  21482. + u8 dcpid;
  21483. + u8 __reserved2;
  21484. + u16 ccgid;
  21485. + u8 __reserved3[56];
  21486. +} __packed;
  21487. +
  21488. +struct qm_mcc_ceetm_cq_query {
  21489. + u8 __reserved1;
  21490. + u16 cqid;
  21491. + u8 dcpid;
  21492. + u8 __reserved2[59];
  21493. +} __packed;
  21494. +
  21495. +struct qm_mcc_ceetm_dct_config {
  21496. + u8 __reserved1;
  21497. + u16 dctidx;
  21498. + u8 dcpid;
  21499. + u8 __reserved2[15];
  21500. + u32 context_b;
  21501. + u64 context_a;
  21502. + u8 __reserved3[32];
  21503. +} __packed;
  21504. +
  21505. +struct qm_mcc_ceetm_dct_query {
  21506. + u8 __reserved1;
  21507. + u16 dctidx;
  21508. + u8 dcpid;
  21509. + u8 __reserved2[59];
  21510. +} __packed;
  21511. +
  21512. +struct qm_mcc_ceetm_class_scheduler_config {
  21513. + u8 __reserved1;
  21514. + u16 cqcid;
  21515. + u8 dcpid;
  21516. + u8 __reserved2[6];
  21517. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21518. + u8 gpc_reserved:1;
  21519. + u8 gpc_combine_flag:1;
  21520. + u8 gpc_prio_b:3;
  21521. + u8 gpc_prio_a:3;
  21522. +#else
  21523. + u8 gpc_prio_a:3;
  21524. + u8 gpc_prio_b:3;
  21525. + u8 gpc_combine_flag:1;
  21526. + u8 gpc_reserved:1;
  21527. +#endif
  21528. + u16 crem;
  21529. + u16 erem;
  21530. + u8 w[8];
  21531. + u8 __reserved3[40];
  21532. +} __packed;
  21533. +
  21534. +struct qm_mcc_ceetm_class_scheduler_query {
  21535. + u8 __reserved1;
  21536. + u16 cqcid;
  21537. + u8 dcpid;
  21538. + u8 __reserved2[59];
  21539. +} __packed;
  21540. +
  21541. +#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12)
  21542. +#define CEETM_COMMAND_SP_MAPPING (1 << 12)
  21543. +#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12)
  21544. +#define CEETM_COMMAND_LNI_SHAPER (3 << 12)
  21545. +#define CEETM_COMMAND_TCFC (4 << 12)
  21546. +
  21547. +#define CEETM_CCGRID_MASK 0x01FF
  21548. +#define CEETM_CCGR_CM_CONFIGURE (0 << 14)
  21549. +#define CEETM_CCGR_DN_CONFIGURE (1 << 14)
  21550. +#define CEETM_CCGR_TEST_WRITE (2 << 14)
  21551. +#define CEETM_CCGR_CM_QUERY (0 << 14)
  21552. +#define CEETM_CCGR_DN_QUERY (1 << 14)
  21553. +#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14)
  21554. +#define CEETM_QUERY_CONGESTION_STATE (3 << 14)
  21555. +
  21556. +struct qm_mcc_ceetm_mapping_shaper_tcfc_config {
  21557. + u8 __reserved1;
  21558. + u16 cid;
  21559. + u8 dcpid;
  21560. + union {
  21561. + struct {
  21562. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21563. + u8 map_shaped:1;
  21564. + u8 map_reserved:4;
  21565. + u8 map_lni_id:3;
  21566. +#else
  21567. + u8 map_lni_id:3;
  21568. + u8 map_reserved:4;
  21569. + u8 map_shaped:1;
  21570. +#endif
  21571. + u8 __reserved2[58];
  21572. + } __packed channel_mapping;
  21573. + struct {
  21574. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21575. + u8 map_reserved:5;
  21576. + u8 map_lni_id:3;
  21577. +#else
  21578. + u8 map_lni_id:3;
  21579. + u8 map_reserved:5;
  21580. +#endif
  21581. + u8 __reserved2[58];
  21582. + } __packed sp_mapping;
  21583. + struct {
  21584. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21585. + u8 cpl:1;
  21586. + u8 cpl_reserved:2;
  21587. + u8 oal:5;
  21588. +#else
  21589. + u8 oal:5;
  21590. + u8 cpl_reserved:2;
  21591. + u8 cpl:1;
  21592. +#endif
  21593. + u32 crtcr:24;
  21594. + u32 ertcr:24;
  21595. + u16 crtbl;
  21596. + u16 ertbl;
  21597. + u8 mps; /* This will be hardcoded by driver with 60 */
  21598. + u8 __reserved2[47];
  21599. + } __packed shaper_config;
  21600. + struct {
  21601. + u8 __reserved2[11];
  21602. + u64 lnitcfcc;
  21603. + u8 __reserved3[40];
  21604. + } __packed tcfc_config;
  21605. + };
  21606. +} __packed;
  21607. +
  21608. +struct qm_mcc_ceetm_mapping_shaper_tcfc_query {
  21609. + u8 __reserved1;
  21610. + u16 cid;
  21611. + u8 dcpid;
  21612. + u8 __reserved2[59];
  21613. +} __packed;
  21614. +
  21615. +struct qm_mcc_ceetm_ccgr_config {
  21616. + u8 __reserved1;
  21617. + u16 ccgrid;
  21618. + u8 dcpid;
  21619. + u8 __reserved2;
  21620. + u16 we_mask;
  21621. + union {
  21622. + struct {
  21623. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21624. + u8 ctl_reserved:1;
  21625. + u8 ctl_wr_en_g:1;
  21626. + u8 ctl_wr_en_y:1;
  21627. + u8 ctl_wr_en_r:1;
  21628. + u8 ctl_td_en:1;
  21629. + u8 ctl_td_mode:1;
  21630. + u8 ctl_cscn_en:1;
  21631. + u8 ctl_mode:1;
  21632. +#else
  21633. + u8 ctl_mode:1;
  21634. + u8 ctl_cscn_en:1;
  21635. + u8 ctl_td_mode:1;
  21636. + u8 ctl_td_en:1;
  21637. + u8 ctl_wr_en_r:1;
  21638. + u8 ctl_wr_en_y:1;
  21639. + u8 ctl_wr_en_g:1;
  21640. + u8 ctl_reserved:1;
  21641. +#endif
  21642. + u8 cdv;
  21643. + u16 cscn_tupd;
  21644. + u8 oal;
  21645. + u8 __reserved3;
  21646. + struct qm_cgr_cs_thres cs_thres;
  21647. + struct qm_cgr_cs_thres cs_thres_x;
  21648. + struct qm_cgr_cs_thres td_thres;
  21649. + struct qm_cgr_wr_parm wr_parm_g;
  21650. + struct qm_cgr_wr_parm wr_parm_y;
  21651. + struct qm_cgr_wr_parm wr_parm_r;
  21652. + } __packed cm_config;
  21653. + struct {
  21654. + u8 dnc;
  21655. + u8 dn0;
  21656. + u8 dn1;
  21657. + u64 dnba:40;
  21658. + u8 __reserved3[2];
  21659. + u16 dnth_0;
  21660. + u8 __reserved4[2];
  21661. + u16 dnth_1;
  21662. + u8 __reserved5[8];
  21663. + } __packed dn_config;
  21664. + struct {
  21665. + u8 __reserved3[3];
  21666. + u64 i_cnt:40;
  21667. + u8 __reserved4[16];
  21668. + } __packed test_write;
  21669. + };
  21670. + u8 __reserved5[32];
  21671. +} __packed;
  21672. +
  21673. +struct qm_mcc_ceetm_ccgr_query {
  21674. + u8 __reserved1;
  21675. + u16 ccgrid;
  21676. + u8 dcpid;
  21677. + u8 __reserved2[59];
  21678. +} __packed;
  21679. +
  21680. +struct qm_mcc_ceetm_cq_peek_pop_xsfdrread {
  21681. + u8 __reserved1;
  21682. + u16 cqid;
  21683. + u8 dcpid;
  21684. + u8 ct;
  21685. + u16 xsfdr;
  21686. + u8 __reserved2[56];
  21687. +} __packed;
  21688. +
  21689. +#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00
  21690. +#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01
  21691. +#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02
  21692. +#define CEETM_QUERY_REJECT_STATISTICS 0x03
  21693. +#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04
  21694. +#define CEETM_WRITE_REJECT_STATISTICS 0x05
  21695. +struct qm_mcc_ceetm_statistics_query_write {
  21696. + u8 __reserved1;
  21697. + u16 cid;
  21698. + u8 dcpid;
  21699. + u8 ct;
  21700. + u8 __reserved2[13];
  21701. + u64 frm_cnt:40;
  21702. + u8 __reserved3[2];
  21703. + u64 byte_cnt:48;
  21704. + u8 __reserved[32];
  21705. +} __packed;
  21706. +
  21707. +struct qm_mc_command {
  21708. + u8 __dont_write_directly__verb;
  21709. + union {
  21710. + struct qm_mcc_initfq initfq;
  21711. + struct qm_mcc_queryfq queryfq;
  21712. + struct qm_mcc_queryfq_np queryfq_np;
  21713. + struct qm_mcc_alterfq alterfq;
  21714. + struct qm_mcc_initcgr initcgr;
  21715. + struct qm_mcc_cgrtestwrite cgrtestwrite;
  21716. + struct qm_mcc_querycgr querycgr;
  21717. + struct qm_mcc_querycongestion querycongestion;
  21718. + struct qm_mcc_querywq querywq;
  21719. + struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
  21720. + struct qm_mcc_ceetm_lfqmt_query lfqmt_query;
  21721. + struct qm_mcc_ceetm_cq_config cq_config;
  21722. + struct qm_mcc_ceetm_cq_query cq_query;
  21723. + struct qm_mcc_ceetm_dct_config dct_config;
  21724. + struct qm_mcc_ceetm_dct_query dct_query;
  21725. + struct qm_mcc_ceetm_class_scheduler_config csch_config;
  21726. + struct qm_mcc_ceetm_class_scheduler_query csch_query;
  21727. + struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config;
  21728. + struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query;
  21729. + struct qm_mcc_ceetm_ccgr_config ccgr_config;
  21730. + struct qm_mcc_ceetm_ccgr_query ccgr_query;
  21731. + struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
  21732. + struct qm_mcc_ceetm_statistics_query_write stats_query_write;
  21733. + };
  21734. +} __packed;
  21735. +#define QM_MCC_VERB_VBIT 0x80
  21736. +#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
  21737. +#define QM_MCC_VERB_INITFQ_PARKED 0x40
  21738. +#define QM_MCC_VERB_INITFQ_SCHED 0x41
  21739. +#define QM_MCC_VERB_QUERYFQ 0x44
  21740. +#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
  21741. +#define QM_MCC_VERB_QUERYWQ 0x46
  21742. +#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
  21743. +#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
  21744. +#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
  21745. +#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
  21746. +#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
  21747. +#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
  21748. +#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
  21749. +#define QM_MCC_VERB_INITCGR 0x50
  21750. +#define QM_MCC_VERB_MODIFYCGR 0x51
  21751. +#define QM_MCC_VERB_CGRTESTWRITE 0x52
  21752. +#define QM_MCC_VERB_QUERYCGR 0x58
  21753. +#define QM_MCC_VERB_QUERYCONGESTION 0x59
  21754. +/* INITFQ-specific flags */
  21755. +#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
  21756. +#define QM_INITFQ_WE_OAC 0x0100
  21757. +#define QM_INITFQ_WE_ORPC 0x0080
  21758. +#define QM_INITFQ_WE_CGID 0x0040
  21759. +#define QM_INITFQ_WE_FQCTRL 0x0020
  21760. +#define QM_INITFQ_WE_DESTWQ 0x0010
  21761. +#define QM_INITFQ_WE_ICSCRED 0x0008
  21762. +#define QM_INITFQ_WE_TDTHRESH 0x0004
  21763. +#define QM_INITFQ_WE_CONTEXTB 0x0002
  21764. +#define QM_INITFQ_WE_CONTEXTA 0x0001
  21765. +/* INITCGR/MODIFYCGR-specific flags */
  21766. +#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
  21767. +#define QM_CGR_WE_WR_PARM_G 0x0400
  21768. +#define QM_CGR_WE_WR_PARM_Y 0x0200
  21769. +#define QM_CGR_WE_WR_PARM_R 0x0100
  21770. +#define QM_CGR_WE_WR_EN_G 0x0080
  21771. +#define QM_CGR_WE_WR_EN_Y 0x0040
  21772. +#define QM_CGR_WE_WR_EN_R 0x0020
  21773. +#define QM_CGR_WE_CSCN_EN 0x0010
  21774. +#define QM_CGR_WE_CSCN_TARG 0x0008
  21775. +#define QM_CGR_WE_CSTD_EN 0x0004
  21776. +#define QM_CGR_WE_CS_THRES 0x0002
  21777. +#define QM_CGR_WE_MODE 0x0001
  21778. +
  21779. +/* See 1.5.9.7 CEETM Management Commands */
  21780. +#define QM_CEETM_VERB_LFQMT_CONFIG 0x70
  21781. +#define QM_CEETM_VERB_LFQMT_QUERY 0x71
  21782. +#define QM_CEETM_VERB_CQ_CONFIG 0x72
  21783. +#define QM_CEETM_VERB_CQ_QUERY 0x73
  21784. +#define QM_CEETM_VERB_DCT_CONFIG 0x74
  21785. +#define QM_CEETM_VERB_DCT_QUERY 0x75
  21786. +#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76
  21787. +#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77
  21788. +#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78
  21789. +#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79
  21790. +#define QM_CEETM_VERB_CCGR_CONFIG 0x7A
  21791. +#define QM_CEETM_VERB_CCGR_QUERY 0x7B
  21792. +#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C
  21793. +#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D
  21794. +
  21795. +/* See 1.5.8.5.1: "Initialize FQ" */
  21796. +/* See 1.5.8.5.2: "Query FQ" */
  21797. +/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
  21798. +/* See 1.5.8.5.4: "Alter FQ State Commands " */
  21799. +/* See 1.5.8.6.1: "Initialize/Modify CGR" */
  21800. +/* See 1.5.8.6.2: "CGR Test Write" */
  21801. +/* See 1.5.8.6.3: "Query CGR" */
  21802. +/* See 1.5.8.6.4: "Query Congestion Group State" */
  21803. +struct qm_mcr_initfq {
  21804. + u8 __reserved1[62];
  21805. +} __packed;
  21806. +struct qm_mcr_queryfq {
  21807. + u8 __reserved1[8];
  21808. + struct qm_fqd fqd; /* the FQD fields are here */
  21809. + u8 __reserved2[30];
  21810. +} __packed;
  21811. +struct qm_mcr_queryfq_np {
  21812. + u8 __reserved1;
  21813. + u8 state; /* QM_MCR_NP_STATE_*** */
  21814. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21815. + u8 __reserved2;
  21816. + u32 fqd_link:24;
  21817. + u16 __reserved3:2;
  21818. + u16 odp_seq:14;
  21819. + u16 __reserved4:2;
  21820. + u16 orp_nesn:14;
  21821. + u16 __reserved5:1;
  21822. + u16 orp_ea_hseq:15;
  21823. + u16 __reserved6:1;
  21824. + u16 orp_ea_tseq:15;
  21825. + u8 __reserved7;
  21826. + u32 orp_ea_hptr:24;
  21827. + u8 __reserved8;
  21828. + u32 orp_ea_tptr:24;
  21829. + u8 __reserved9;
  21830. + u32 pfdr_hptr:24;
  21831. + u8 __reserved10;
  21832. + u32 pfdr_tptr:24;
  21833. + u8 __reserved11[5];
  21834. + u8 __reserved12:7;
  21835. + u8 is:1;
  21836. + u16 ics_surp;
  21837. + u32 byte_cnt;
  21838. + u8 __reserved13;
  21839. + u32 frm_cnt:24;
  21840. + u32 __reserved14;
  21841. + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
  21842. + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
  21843. + u16 __reserved15;
  21844. + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
  21845. + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
  21846. + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
  21847. +#else
  21848. + u8 __reserved2;
  21849. + u32 fqd_link:24;
  21850. +
  21851. + u16 odp_seq:14;
  21852. + u16 __reserved3:2;
  21853. +
  21854. + u16 orp_nesn:14;
  21855. + u16 __reserved4:2;
  21856. +
  21857. + u16 orp_ea_hseq:15;
  21858. + u16 __reserved5:1;
  21859. +
  21860. + u16 orp_ea_tseq:15;
  21861. + u16 __reserved6:1;
  21862. +
  21863. + u8 __reserved7;
  21864. + u32 orp_ea_hptr:24;
  21865. +
  21866. + u8 __reserved8;
  21867. + u32 orp_ea_tptr:24;
  21868. +
  21869. + u8 __reserved9;
  21870. + u32 pfdr_hptr:24;
  21871. +
  21872. + u8 __reserved10;
  21873. + u32 pfdr_tptr:24;
  21874. +
  21875. + u8 __reserved11[5];
  21876. + u8 is:1;
  21877. + u8 __reserved12:7;
  21878. + u16 ics_surp;
  21879. + u32 byte_cnt;
  21880. + u8 __reserved13;
  21881. + u32 frm_cnt:24;
  21882. + u32 __reserved14;
  21883. + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
  21884. + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
  21885. + u16 __reserved15;
  21886. + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
  21887. + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
  21888. + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
  21889. +#endif
  21890. +} __packed;
  21891. +
  21892. +
  21893. +struct qm_mcr_alterfq {
  21894. + u8 fqs; /* Frame Queue Status */
  21895. + u8 __reserved1[61];
  21896. +} __packed;
  21897. +struct qm_mcr_initcgr {
  21898. + u8 __reserved1[62];
  21899. +} __packed;
  21900. +struct qm_mcr_cgrtestwrite {
  21901. + u16 __reserved1;
  21902. + struct __qm_mc_cgr cgr; /* CGR fields */
  21903. + u8 __reserved2[3];
  21904. + u32 __reserved3:24;
  21905. + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
  21906. + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
  21907. + u32 __reserved4:24;
  21908. + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
  21909. + u32 a_bcnt_lo; /* low 32-bits of 40-bit */
  21910. + u16 lgt; /* Last Group Tick */
  21911. + u16 wr_prob_g;
  21912. + u16 wr_prob_y;
  21913. + u16 wr_prob_r;
  21914. + u8 __reserved5[8];
  21915. +} __packed;
  21916. +struct qm_mcr_querycgr {
  21917. + u16 __reserved1;
  21918. + struct __qm_mc_cgr cgr; /* CGR fields */
  21919. + u8 __reserved2[3];
  21920. + union {
  21921. + struct {
  21922. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21923. + u32 __reserved3:24;
  21924. + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
  21925. + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
  21926. +#else
  21927. + u32 i_bcnt_lo; /* low 32-bits of 40-bit */
  21928. + u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
  21929. + u32 __reserved3:24;
  21930. +#endif
  21931. + };
  21932. + u64 i_bcnt;
  21933. + };
  21934. + union {
  21935. + struct {
  21936. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21937. + u32 __reserved4:24;
  21938. + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
  21939. + u32 a_bcnt_lo; /* low 32-bits of 40-bit */
  21940. +#else
  21941. + u32 a_bcnt_lo; /* low 32-bits of 40-bit */
  21942. + u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
  21943. + u32 __reserved4:24;
  21944. +#endif
  21945. + };
  21946. + u64 a_bcnt;
  21947. + };
  21948. + union {
  21949. + u32 cscn_targ_swp[4];
  21950. + u8 __reserved5[16];
  21951. + };
  21952. +} __packed;
  21953. +static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
  21954. +{
  21955. + return be64_to_cpu(q->i_bcnt);
  21956. +}
  21957. +static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
  21958. +{
  21959. + return be64_to_cpu(q->a_bcnt);
  21960. +}
  21961. +static inline u64 qm_mcr_cgrtestwrite_i_get64(
  21962. + const struct qm_mcr_cgrtestwrite *q)
  21963. +{
  21964. + return be64_to_cpu(((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo);
  21965. +}
  21966. +static inline u64 qm_mcr_cgrtestwrite_a_get64(
  21967. + const struct qm_mcr_cgrtestwrite *q)
  21968. +{
  21969. + return be64_to_cpu(((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo);
  21970. +}
  21971. +/* Macro, so we compile better if 'v' isn't always 64-bit */
  21972. +#define qm_mcr_querycgr_i_set64(q, v) \
  21973. + do { \
  21974. + struct qm_mcr_querycgr *__q931 = (fd); \
  21975. + __q931->i_bcnt_hi = upper_32_bits(v); \
  21976. + __q931->i_bcnt_lo = lower_32_bits(v); \
  21977. + } while (0)
  21978. +#define qm_mcr_querycgr_a_set64(q, v) \
  21979. + do { \
  21980. + struct qm_mcr_querycgr *__q931 = (fd); \
  21981. + __q931->a_bcnt_hi = upper_32_bits(v); \
  21982. + __q931->a_bcnt_lo = lower_32_bits(v); \
  21983. + } while (0)
  21984. +struct __qm_mcr_querycongestion {
  21985. + u32 __state[8];
  21986. +};
  21987. +struct qm_mcr_querycongestion {
  21988. + u8 __reserved[30];
  21989. + /* Access this struct using QM_MCR_QUERYCONGESTION() */
  21990. + struct __qm_mcr_querycongestion state;
  21991. +} __packed;
  21992. +struct qm_mcr_querywq {
  21993. + union {
  21994. + u16 channel_wq; /* ignores wq (3 lsbits) */
  21995. + struct {
  21996. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  21997. + u16 id:13; /* qm_channel */
  21998. + u16 __reserved:3;
  21999. +#else
  22000. + u16 __reserved:3;
  22001. + u16 id:13; /* qm_channel */
  22002. +#endif
  22003. + } __packed channel;
  22004. + };
  22005. + u8 __reserved[28];
  22006. + u32 wq_len[8];
  22007. +} __packed;
  22008. +
  22009. +/* QMAN CEETM Management Command Response */
  22010. +struct qm_mcr_ceetm_lfqmt_config {
  22011. + u8 __reserved1[62];
  22012. +} __packed;
  22013. +struct qm_mcr_ceetm_lfqmt_query {
  22014. + u8 __reserved1[8];
  22015. + u16 cqid;
  22016. + u8 __reserved2[2];
  22017. + u16 dctidx;
  22018. + u8 __reserved3[2];
  22019. + u16 ccgid;
  22020. + u8 __reserved4[44];
  22021. +} __packed;
  22022. +
  22023. +struct qm_mcr_ceetm_cq_config {
  22024. + u8 __reserved1[62];
  22025. +} __packed;
  22026. +
  22027. +struct qm_mcr_ceetm_cq_query {
  22028. + u8 __reserved1[4];
  22029. + u16 ccgid;
  22030. + u16 state;
  22031. + u32 pfdr_hptr:24;
  22032. + u32 pfdr_tptr:24;
  22033. + u16 od1_xsfdr;
  22034. + u16 od2_xsfdr;
  22035. + u16 od3_xsfdr;
  22036. + u16 od4_xsfdr;
  22037. + u16 od5_xsfdr;
  22038. + u16 od6_xsfdr;
  22039. + u16 ra1_xsfdr;
  22040. + u16 ra2_xsfdr;
  22041. + u8 __reserved2;
  22042. + u32 frm_cnt:24;
  22043. + u8 __reserved333[28];
  22044. +} __packed;
  22045. +
  22046. +struct qm_mcr_ceetm_dct_config {
  22047. + u8 __reserved1[62];
  22048. +} __packed;
  22049. +
  22050. +struct qm_mcr_ceetm_dct_query {
  22051. + u8 __reserved1[18];
  22052. + u32 context_b;
  22053. + u64 context_a;
  22054. + u8 __reserved2[32];
  22055. +} __packed;
  22056. +
  22057. +struct qm_mcr_ceetm_class_scheduler_config {
  22058. + u8 __reserved1[62];
  22059. +} __packed;
  22060. +
  22061. +struct qm_mcr_ceetm_class_scheduler_query {
  22062. + u8 __reserved1[9];
  22063. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  22064. + u8 gpc_reserved:1;
  22065. + u8 gpc_combine_flag:1;
  22066. + u8 gpc_prio_b:3;
  22067. + u8 gpc_prio_a:3;
  22068. +#else
  22069. + u8 gpc_prio_a:3;
  22070. + u8 gpc_prio_b:3;
  22071. + u8 gpc_combine_flag:1;
  22072. + u8 gpc_reserved:1;
  22073. +#endif
  22074. + u16 crem;
  22075. + u16 erem;
  22076. + u8 w[8];
  22077. + u8 __reserved2[5];
  22078. + u32 wbfslist:24;
  22079. + u32 d8;
  22080. + u32 d9;
  22081. + u32 d10;
  22082. + u32 d11;
  22083. + u32 d12;
  22084. + u32 d13;
  22085. + u32 d14;
  22086. + u32 d15;
  22087. +} __packed;
  22088. +
  22089. +struct qm_mcr_ceetm_mapping_shaper_tcfc_config {
  22090. + u16 cid;
  22091. + u8 __reserved2[60];
  22092. +} __packed;
  22093. +
  22094. +struct qm_mcr_ceetm_mapping_shaper_tcfc_query {
  22095. + u16 cid;
  22096. + u8 __reserved1;
  22097. + union {
  22098. + struct {
  22099. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  22100. + u8 map_shaped:1;
  22101. + u8 map_reserved:4;
  22102. + u8 map_lni_id:3;
  22103. +#else
  22104. + u8 map_lni_id:3;
  22105. + u8 map_reserved:4;
  22106. + u8 map_shaped:1;
  22107. +#endif
  22108. + u8 __reserved2[58];
  22109. + } __packed channel_mapping_query;
  22110. + struct {
  22111. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  22112. + u8 map_reserved:5;
  22113. + u8 map_lni_id:3;
  22114. +#else
  22115. + u8 map_lni_id:3;
  22116. + u8 map_reserved:5;
  22117. +#endif
  22118. + u8 __reserved2[58];
  22119. + } __packed sp_mapping_query;
  22120. + struct {
  22121. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  22122. + u8 cpl:1;
  22123. + u8 cpl_reserved:2;
  22124. + u8 oal:5;
  22125. +#else
  22126. + u8 oal:5;
  22127. + u8 cpl_reserved:2;
  22128. + u8 cpl:1;
  22129. +#endif
  22130. + u32 crtcr:24;
  22131. + u32 ertcr:24;
  22132. + u16 crtbl;
  22133. + u16 ertbl;
  22134. + u8 mps;
  22135. + u8 __reserved2[15];
  22136. + u32 crat;
  22137. + u32 erat;
  22138. + u8 __reserved3[24];
  22139. + } __packed shaper_query;
  22140. + struct {
  22141. + u8 __reserved1[11];
  22142. + u64 lnitcfcc;
  22143. + u8 __reserved3[40];
  22144. + } __packed tcfc_query;
  22145. + };
  22146. +} __packed;
  22147. +
  22148. +struct qm_mcr_ceetm_ccgr_config {
  22149. + u8 __reserved1[46];
  22150. + union {
  22151. + u8 __reserved2[8];
  22152. + struct {
  22153. + u16 timestamp;
  22154. + u16 wr_porb_g;
  22155. + u16 wr_prob_y;
  22156. + u16 wr_prob_r;
  22157. + } __packed test_write;
  22158. + };
  22159. + u8 __reserved3[8];
  22160. +} __packed;
  22161. +
  22162. +struct qm_mcr_ceetm_ccgr_query {
  22163. + u8 __reserved1[6];
  22164. + union {
  22165. + struct {
  22166. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  22167. + u8 ctl_reserved:1;
  22168. + u8 ctl_wr_en_g:1;
  22169. + u8 ctl_wr_en_y:1;
  22170. + u8 ctl_wr_en_r:1;
  22171. + u8 ctl_td_en:1;
  22172. + u8 ctl_td_mode:1;
  22173. + u8 ctl_cscn_en:1;
  22174. + u8 ctl_mode:1;
  22175. +#else
  22176. + u8 ctl_mode:1;
  22177. + u8 ctl_cscn_en:1;
  22178. + u8 ctl_td_mode:1;
  22179. + u8 ctl_td_en:1;
  22180. + u8 ctl_wr_en_r:1;
  22181. + u8 ctl_wr_en_y:1;
  22182. + u8 ctl_wr_en_g:1;
  22183. + u8 ctl_reserved:1;
  22184. +#endif
  22185. + u8 cdv;
  22186. + u8 __reserved2[2];
  22187. + u8 oal;
  22188. + u8 __reserved3;
  22189. + struct qm_cgr_cs_thres cs_thres;
  22190. + struct qm_cgr_cs_thres cs_thres_x;
  22191. + struct qm_cgr_cs_thres td_thres;
  22192. + struct qm_cgr_wr_parm wr_parm_g;
  22193. + struct qm_cgr_wr_parm wr_parm_y;
  22194. + struct qm_cgr_wr_parm wr_parm_r;
  22195. + u16 cscn_targ_dcp;
  22196. + u8 dcp_lsn;
  22197. + u64 i_cnt:40;
  22198. + u8 __reserved4[3];
  22199. + u64 a_cnt:40;
  22200. + u32 cscn_targ_swp[4];
  22201. + } __packed cm_query;
  22202. + struct {
  22203. + u8 dnc;
  22204. + u8 dn0;
  22205. + u8 dn1;
  22206. + u64 dnba:40;
  22207. + u8 __reserved2[2];
  22208. + u16 dnth_0;
  22209. + u8 __reserved3[2];
  22210. + u16 dnth_1;
  22211. + u8 __reserved4[10];
  22212. + u16 dnacc_0;
  22213. + u8 __reserved5[2];
  22214. + u16 dnacc_1;
  22215. + u8 __reserved6[24];
  22216. + } __packed dn_query;
  22217. + struct {
  22218. + u8 __reserved2[24];
  22219. + struct __qm_mcr_querycongestion state;
  22220. + } __packed congestion_state;
  22221. +
  22222. + };
  22223. +} __packed;
  22224. +
  22225. +struct qm_mcr_ceetm_cq_peek_pop_xsfdrread {
  22226. + u8 stat;
  22227. + u8 __reserved1[11];
  22228. + u16 dctidx;
  22229. + struct qm_fd fd;
  22230. + u8 __reserved2[32];
  22231. +} __packed;
  22232. +
  22233. +struct qm_mcr_ceetm_statistics_query {
  22234. + u8 __reserved1[17];
  22235. + u64 frm_cnt:40;
  22236. + u8 __reserved2[2];
  22237. + u64 byte_cnt:48;
  22238. + u8 __reserved3[32];
  22239. +} __packed;
  22240. +
  22241. +struct qm_mc_result {
  22242. + u8 verb;
  22243. + u8 result;
  22244. + union {
  22245. + struct qm_mcr_initfq initfq;
  22246. + struct qm_mcr_queryfq queryfq;
  22247. + struct qm_mcr_queryfq_np queryfq_np;
  22248. + struct qm_mcr_alterfq alterfq;
  22249. + struct qm_mcr_initcgr initcgr;
  22250. + struct qm_mcr_cgrtestwrite cgrtestwrite;
  22251. + struct qm_mcr_querycgr querycgr;
  22252. + struct qm_mcr_querycongestion querycongestion;
  22253. + struct qm_mcr_querywq querywq;
  22254. + struct qm_mcr_ceetm_lfqmt_config lfqmt_config;
  22255. + struct qm_mcr_ceetm_lfqmt_query lfqmt_query;
  22256. + struct qm_mcr_ceetm_cq_config cq_config;
  22257. + struct qm_mcr_ceetm_cq_query cq_query;
  22258. + struct qm_mcr_ceetm_dct_config dct_config;
  22259. + struct qm_mcr_ceetm_dct_query dct_query;
  22260. + struct qm_mcr_ceetm_class_scheduler_config csch_config;
  22261. + struct qm_mcr_ceetm_class_scheduler_query csch_query;
  22262. + struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config;
  22263. + struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query;
  22264. + struct qm_mcr_ceetm_ccgr_config ccgr_config;
  22265. + struct qm_mcr_ceetm_ccgr_query ccgr_query;
  22266. + struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
  22267. + struct qm_mcr_ceetm_statistics_query stats_query;
  22268. + };
  22269. +} __packed;
  22270. +
  22271. +#define QM_MCR_VERB_RRID 0x80
  22272. +#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
  22273. +#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
  22274. +#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
  22275. +#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
  22276. +#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
  22277. +#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
  22278. +#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
  22279. +#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
  22280. +#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
  22281. +#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
  22282. +#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
  22283. +#define QM_MCR_RESULT_NULL 0x00
  22284. +#define QM_MCR_RESULT_OK 0xf0
  22285. +#define QM_MCR_RESULT_ERR_FQID 0xf1
  22286. +#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
  22287. +#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
  22288. +#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
  22289. +#define QM_MCR_RESULT_PENDING 0xf8
  22290. +#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
  22291. +#define QM_MCR_NP_STATE_FE 0x10
  22292. +#define QM_MCR_NP_STATE_R 0x08
  22293. +#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
  22294. +#define QM_MCR_NP_STATE_OOS 0x00
  22295. +#define QM_MCR_NP_STATE_RETIRED 0x01
  22296. +#define QM_MCR_NP_STATE_TEN_SCHED 0x02
  22297. +#define QM_MCR_NP_STATE_TRU_SCHED 0x03
  22298. +#define QM_MCR_NP_STATE_PARKED 0x04
  22299. +#define QM_MCR_NP_STATE_ACTIVE 0x05
  22300. +#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
  22301. +#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
  22302. +#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
  22303. +#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
  22304. +#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
  22305. +#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
  22306. +#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
  22307. +/* This extracts the state for congestion group 'n' from a query response.
  22308. + * Eg.
  22309. + * u8 cgr = [...];
  22310. + * struct qm_mc_result *res = [...];
  22311. + * printf("congestion group %d congestion state: %d\n", cgr,
  22312. + * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
  22313. + */
  22314. +#define __CGR_WORD(num) (num >> 5)
  22315. +#define __CGR_SHIFT(num) (num & 0x1f)
  22316. +#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
  22317. +static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
  22318. + u8 cgr)
  22319. +{
  22320. + return be32_to_cpu(p->__state[__CGR_WORD(cgr)]) &
  22321. + (0x80000000 >> __CGR_SHIFT(cgr));
  22322. +}
  22323. +
  22324. +
  22325. +/*********************/
  22326. +/* Utility interface */
  22327. +/*********************/
  22328. +
  22329. +/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
  22330. + * spinlock them yourself if needed. */
  22331. +struct qman_fqid_pool;
  22332. +
  22333. +/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
  22334. + * always succeeds, but returns non-zero if there were "leaked" FQID
  22335. + * allocations. */
  22336. +struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
  22337. +int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
  22338. +/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
  22339. +int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
  22340. +void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
  22341. +u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
  22342. +
  22343. +/*******************************************************************/
  22344. +/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
  22345. +/*******************************************************************/
  22346. +
  22347. + /* Portal and Frame Queues */
  22348. + /* ----------------------- */
  22349. +/* Represents a managed portal */
  22350. +struct qman_portal;
  22351. +
  22352. +/* This object type represents Qman frame queue descriptors (FQD), it is
  22353. + * cacheline-aligned, and initialised by qman_create_fq(). The structure is
  22354. + * defined further down. */
  22355. +struct qman_fq;
  22356. +
  22357. +/* This object type represents a Qman congestion group, it is defined further
  22358. + * down. */
  22359. +struct qman_cgr;
  22360. +
  22361. +struct qman_portal_config {
  22362. + /* If the caller enables DQRR stashing (and thus wishes to operate the
  22363. + * portal from only one cpu), this is the logical CPU that the portal
  22364. + * will stash to. Whether stashing is enabled or not, this setting is
  22365. + * also used for any "core-affine" portals, ie. default portals
  22366. + * associated to the corresponding cpu. -1 implies that there is no core
  22367. + * affinity configured. */
  22368. + int cpu;
  22369. + /* portal interrupt line */
  22370. + int irq;
  22371. + /* the unique index of this portal */
  22372. + u32 index;
  22373. + /* Is this portal shared? (If so, it has coarser locking and demuxes
  22374. + * processing on behalf of other CPUs.) */
  22375. + int is_shared;
  22376. + /* The portal's dedicated channel id, use this value for initialising
  22377. + * frame queues to target this portal when scheduled. */
  22378. + u16 channel;
  22379. + /* A mask of which pool channels this portal has dequeue access to
  22380. + * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
  22381. + u32 pools;
  22382. +};
  22383. +
  22384. +/* This enum, and the callback type that returns it, are used when handling
  22385. + * dequeued frames via DQRR. Note that for "null" callbacks registered with the
  22386. + * portal object (for handling dequeues that do not demux because contextB is
  22387. + * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
  22388. +enum qman_cb_dqrr_result {
  22389. + /* DQRR entry can be consumed */
  22390. + qman_cb_dqrr_consume,
  22391. + /* Like _consume, but requests parking - FQ must be held-active */
  22392. + qman_cb_dqrr_park,
  22393. + /* Does not consume, for DCA mode only. This allows out-of-order
  22394. + * consumes by explicit calls to qman_dca() and/or the use of implicit
  22395. + * DCA via EQCR entries. */
  22396. + qman_cb_dqrr_defer,
  22397. + /* Stop processing without consuming this ring entry. Exits the current
  22398. + * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
  22399. + * interrupt handler, the callback would typically call
  22400. + * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
  22401. + * otherwise the interrupt will reassert immediately. */
  22402. + qman_cb_dqrr_stop,
  22403. + /* Like qman_cb_dqrr_stop, but consumes the current entry. */
  22404. + qman_cb_dqrr_consume_stop
  22405. +};
  22406. +typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
  22407. + struct qman_fq *fq,
  22408. + const struct qm_dqrr_entry *dqrr);
  22409. +
  22410. +/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
  22411. + * are always consumed after the callback returns. */
  22412. +typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
  22413. + const struct qm_mr_entry *msg);
  22414. +
  22415. +/* This callback type is used when handling DCP ERNs */
  22416. +typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
  22417. + const struct qm_mr_entry *msg);
  22418. +
  22419. +/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
  22420. + * held-active + held-suspended are just "sched". Things like "retired" will not
  22421. + * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
  22422. + * then, to indicate it's completing and to gate attempts to retry the retire
  22423. + * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
  22424. + * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
  22425. + * index rather than the FQ that ring entry corresponds to), so repeated park
  22426. + * commands are allowed (if you're silly enough to try) but won't change FQ
  22427. + * state, and the resulting park notifications move FQs from "sched" to
  22428. + * "parked". */
  22429. +enum qman_fq_state {
  22430. + qman_fq_state_oos,
  22431. + qman_fq_state_parked,
  22432. + qman_fq_state_sched,
  22433. + qman_fq_state_retired
  22434. +};
  22435. +
  22436. +/* Frame queue objects (struct qman_fq) are stored within memory passed to
  22437. + * qman_create_fq(), as this allows stashing of caller-provided demux callback
  22438. + * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
  22439. + * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
  22440. + * they should;
  22441. + *
  22442. + * (a) extend the qman_fq structure with their state; eg.
  22443. + *
  22444. + * // myfq is allocated and driver_fq callbacks filled in;
  22445. + * struct my_fq {
  22446. + * struct qman_fq base;
  22447. + * int an_extra_field;
  22448. + * [ ... add other fields to be associated with each FQ ...]
  22449. + * } *myfq = some_my_fq_allocator();
  22450. + * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
  22451. + *
  22452. + * // in a dequeue callback, access extra fields from 'fq' via a cast;
  22453. + * struct my_fq *myfq = (struct my_fq *)fq;
  22454. + * do_something_with(myfq->an_extra_field);
  22455. + * [...]
  22456. + *
  22457. + * (b) when and if configuring the FQ for context stashing, specify how ever
  22458. + * many cachelines are required to stash 'struct my_fq', to accelerate not
  22459. + * only the Qman driver but the callback as well.
  22460. + */
  22461. +
  22462. +struct qman_fq_cb {
  22463. + qman_cb_dqrr dqrr; /* for dequeued frames */
  22464. + qman_cb_mr ern; /* for s/w ERNs */
  22465. + qman_cb_mr fqs; /* frame-queue state changes*/
  22466. +};
  22467. +
  22468. +struct qman_fq {
  22469. + /* Caller of qman_create_fq() provides these demux callbacks */
  22470. + struct qman_fq_cb cb;
  22471. + /* These are internal to the driver, don't touch. In particular, they
  22472. + * may change, be removed, or extended (so you shouldn't rely on
  22473. + * sizeof(qman_fq) being a constant). */
  22474. + spinlock_t fqlock;
  22475. + u32 fqid;
  22476. + volatile unsigned long flags;
  22477. + enum qman_fq_state state;
  22478. + int cgr_groupid;
  22479. + struct rb_node node;
  22480. +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
  22481. + u32 key;
  22482. +#endif
  22483. +};
  22484. +
  22485. +/* This callback type is used when handling congestion group entry/exit.
  22486. + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
  22487. +typedef void (*qman_cb_cgr)(struct qman_portal *qm,
  22488. + struct qman_cgr *cgr, int congested);
  22489. +
  22490. +struct qman_cgr {
  22491. + /* Set these prior to qman_create_cgr() */
  22492. + u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
  22493. + qman_cb_cgr cb;
  22494. + /* These are private to the driver */
  22495. + u16 chan; /* portal channel this object is created on */
  22496. + struct list_head node;
  22497. +};
  22498. +
  22499. +/* Flags to qman_create_fq() */
  22500. +#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
  22501. +#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
  22502. +#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
  22503. +#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
  22504. +#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
  22505. +#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
  22506. +
  22507. +/* Flags to qman_destroy_fq() */
  22508. +#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
  22509. +
  22510. +/* Flags from qman_fq_state() */
  22511. +#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
  22512. +#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
  22513. +#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
  22514. +#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
  22515. +#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
  22516. +#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
  22517. +
  22518. +/* Flags to qman_init_fq() */
  22519. +#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
  22520. +#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
  22521. +
  22522. +/* Flags to qman_volatile_dequeue() */
  22523. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  22524. +#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
  22525. +#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
  22526. +#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
  22527. +#endif
  22528. +
  22529. +/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
  22530. + * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
  22531. + * any change here should be audited in PME.) */
  22532. +#ifdef CONFIG_FSL_DPA_CAN_WAIT
  22533. +#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */
  22534. +#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */
  22535. +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
  22536. +#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
  22537. +#endif
  22538. +#endif
  22539. +#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
  22540. +#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
  22541. +#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
  22542. +#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
  22543. + (((u32)(p) << 2) & 0x00000f00)
  22544. +#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
  22545. +#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
  22546. +#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
  22547. +#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
  22548. +/* For the ORP-specific qman_enqueue_orp() variant;
  22549. + * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
  22550. + * of a frame. */
  22551. +#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
  22552. +/* - this flag performs no enqueue but fills in an ORP sequence number that
  22553. + * would otherwise block it (eg. if a frame has been dropped). */
  22554. +#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
  22555. +/* - this flag performs no enqueue but advances NESN to the given sequence
  22556. + * number. */
  22557. +#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
  22558. +
  22559. +/* Flags to qman_modify_cgr() */
  22560. +#define QMAN_CGR_FLAG_USE_INIT 0x00000001
  22561. +#define QMAN_CGR_MODE_FRAME 0x00000001
  22562. +
  22563. + /* Portal Management */
  22564. + /* ----------------- */
  22565. +/**
  22566. + * qman_get_portal_config - get portal configuration settings
  22567. + *
  22568. + * This returns a read-only view of the current cpu's affine portal settings.
  22569. + */
  22570. +const struct qman_portal_config *qman_get_portal_config(void);
  22571. +
  22572. +/**
  22573. + * qman_irqsource_get - return the portal work that is interrupt-driven
  22574. + *
  22575. + * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
  22576. + * enabled for interrupt handling on the current cpu's affine portal. These
  22577. + * sources will trigger the portal interrupt and the interrupt handler (or a
  22578. + * tasklet/bottom-half it defers to) will perform the corresponding processing
  22579. + * work. The qman_poll_***() functions will only process sources that are not in
  22580. + * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
  22581. + * this always returns zero.
  22582. + */
  22583. +u32 qman_irqsource_get(void);
  22584. +
  22585. +/**
  22586. + * qman_irqsource_add - add processing sources to be interrupt-driven
  22587. + * @bits: bitmask of QM_PIRQ_**I processing sources
  22588. + *
  22589. + * Adds processing sources that should be interrupt-driven (rather than
  22590. + * processed via qman_poll_***() functions). Returns zero for success, or
  22591. + * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
  22592. + */
  22593. +int qman_irqsource_add(u32 bits);
  22594. +
  22595. +/**
  22596. + * qman_irqsource_remove - remove processing sources from being interrupt-driven
  22597. + * @bits: bitmask of QM_PIRQ_**I processing sources
  22598. + *
  22599. + * Removes processing sources from being interrupt-driven, so that they will
  22600. + * instead be processed via qman_poll_***() functions. Returns zero for success,
  22601. + * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
  22602. + */
  22603. +int qman_irqsource_remove(u32 bits);
  22604. +
  22605. +/**
  22606. + * qman_affine_cpus - return a mask of cpus that have affine portals
  22607. + */
  22608. +const cpumask_t *qman_affine_cpus(void);
  22609. +
  22610. +/**
  22611. + * qman_affine_channel - return the channel ID of an portal
  22612. + * @cpu: the cpu whose affine portal is the subject of the query
  22613. + *
  22614. + * If @cpu is -1, the affine portal for the current CPU will be used. It is a
  22615. + * bug to call this function for any value of @cpu (other than -1) that is not a
  22616. + * member of the mask returned from qman_affine_cpus().
  22617. + */
  22618. +u16 qman_affine_channel(int cpu);
  22619. +
  22620. +/**
  22621. + * qman_get_affine_portal - return the portal pointer affine to cpu
  22622. + * @cpu: the cpu whose affine portal is the subject of the query
  22623. + *
  22624. + */
  22625. +void *qman_get_affine_portal(int cpu);
  22626. +
  22627. +/**
  22628. + * qman_poll_dqrr - process DQRR (fast-path) entries
  22629. + * @limit: the maximum number of DQRR entries to process
  22630. + *
  22631. + * Use of this function requires that DQRR processing not be interrupt-driven.
  22632. + * Ie. the value returned by qman_irqsource_get() should not include
  22633. + * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
  22634. + * this function will return -EINVAL, otherwise the return value is >=0 and
  22635. + * represents the number of DQRR entries processed.
  22636. + */
  22637. +int qman_poll_dqrr(unsigned int limit);
  22638. +
  22639. +/**
  22640. + * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
  22641. + *
  22642. + * This function does any portal processing that isn't interrupt-driven. If the
  22643. + * current CPU is sharing a portal hosted on another CPU, this function will
  22644. + * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
  22645. + * indicating what interrupt sources were actually processed by the call.
  22646. + */
  22647. +u32 qman_poll_slow(void);
  22648. +
  22649. +/**
  22650. + * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
  22651. + *
  22652. + * Dispatcher logic on a cpu can use this to trigger any maintenance of the
  22653. + * affine portal. There are two classes of portal processing in question;
  22654. + * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
  22655. + * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
  22656. + * thresholds, congestion state changes, etc). This function does whatever
  22657. + * processing is not triggered by interrupts.
  22658. + *
  22659. + * Note, if DQRR and some slow-path processing are poll-driven (rather than
  22660. + * interrupt-driven) then this function uses a heuristic to determine how often
  22661. + * to run slow-path processing - as slow-path processing introduces at least a
  22662. + * minimum latency each time it is run, whereas fast-path (DQRR) processing is
  22663. + * close to zero-cost if there is no work to be done. Applications can tune this
  22664. + * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
  22665. + * rather than going via this wrapper.
  22666. + */
  22667. +void qman_poll(void);
  22668. +
  22669. +/**
  22670. + * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
  22671. + *
  22672. + * Disables DQRR processing of the portal. This is reference-counted, so
  22673. + * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
  22674. + * truly re-enable dequeuing.
  22675. + */
  22676. +void qman_stop_dequeues(void);
  22677. +
  22678. +/**
  22679. + * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
  22680. + *
  22681. + * Enables DQRR processing of the portal. This is reference-counted, so
  22682. + * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
  22683. + * truly re-enable dequeuing.
  22684. + */
  22685. +void qman_start_dequeues(void);
  22686. +
  22687. +/**
  22688. + * qman_static_dequeue_add - Add pool channels to the portal SDQCR
  22689. + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
  22690. + *
  22691. + * Adds a set of pool channels to the portal's static dequeue command register
  22692. + * (SDQCR). The requested pools are limited to those the portal has dequeue
  22693. + * access to.
  22694. + */
  22695. +void qman_static_dequeue_add(u32 pools);
  22696. +
  22697. +/**
  22698. + * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
  22699. + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
  22700. + *
  22701. + * Removes a set of pool channels from the portal's static dequeue command
  22702. + * register (SDQCR). The requested pools are limited to those the portal has
  22703. + * dequeue access to.
  22704. + */
  22705. +void qman_static_dequeue_del(u32 pools);
  22706. +
  22707. +/**
  22708. + * qman_static_dequeue_get - return the portal's current SDQCR
  22709. + *
  22710. + * Returns the portal's current static dequeue command register (SDQCR). The
  22711. + * entire register is returned, so if only the currently-enabled pool channels
  22712. + * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
  22713. + */
  22714. +u32 qman_static_dequeue_get(void);
  22715. +
  22716. +/**
  22717. + * qman_dca - Perform a Discrete Consumption Acknowledgement
  22718. + * @dq: the DQRR entry to be consumed
  22719. + * @park_request: indicates whether the held-active @fq should be parked
  22720. + *
  22721. + * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
  22722. + * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
  22723. + * does not take a 'portal' argument but implies the core affine portal from the
  22724. + * cpu that is currently executing the function. For reasons of locking, this
  22725. + * function must be called from the same CPU as that which processed the DQRR
  22726. + * entry in the first place.
  22727. + */
  22728. +void qman_dca(struct qm_dqrr_entry *dq, int park_request);
  22729. +
  22730. +/**
  22731. + * qman_eqcr_is_empty - Determine if portal's EQCR is empty
  22732. + *
  22733. + * For use in situations where a cpu-affine caller needs to determine when all
  22734. + * enqueues for the local portal have been processed by Qman but can't use the
  22735. + * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
  22736. + * The function forces tracking of EQCR consumption (which normally doesn't
  22737. + * happen until enqueue processing needs to find space to put new enqueue
  22738. + * commands), and returns zero if the ring still has unprocessed entries,
  22739. + * non-zero if it is empty.
  22740. + */
  22741. +int qman_eqcr_is_empty(void);
  22742. +
  22743. +/**
  22744. + * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
  22745. + * @handler: callback for processing DCP ERNs
  22746. + * @affine: whether this handler is specific to the locally affine portal
  22747. + *
  22748. + * If a hardware block's interface to Qman (ie. its direct-connect portal, or
  22749. + * DCP) is configured not to receive enqueue rejections, then any enqueues
  22750. + * through that DCP that are rejected will be sent to a given software portal.
  22751. + * If @affine is non-zero, then this handler will only be used for DCP ERNs
  22752. + * received on the portal affine to the current CPU. If multiple CPUs share a
  22753. + * portal and they all call this function, they will be setting the handler for
  22754. + * the same portal! If @affine is zero, then this handler will be global to all
  22755. + * portals handled by this instance of the driver. Only those portals that do
  22756. + * not have their own affine handler will use the global handler.
  22757. + */
  22758. +void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
  22759. +
  22760. + /* FQ management */
  22761. + /* ------------- */
  22762. +/**
  22763. + * qman_create_fq - Allocates a FQ
  22764. + * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
  22765. + * @flags: bit-mask of QMAN_FQ_FLAG_*** options
  22766. + * @fq: memory for storing the 'fq', with callbacks filled in
  22767. + *
  22768. + * Creates a frame queue object for the given @fqid, unless the
  22769. + * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
  22770. + * dynamically allocated (or the function fails if none are available). Once
  22771. + * created, the caller should not touch the memory at 'fq' except as extended to
  22772. + * adjacent memory for user-defined fields (see the definition of "struct
  22773. + * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
  22774. + * pre-existing frame-queues that aren't to be otherwise interfered with, it
  22775. + * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
  22776. + * causes the driver to honour any contextB modifications requested in the
  22777. + * qm_init_fq() API, as this indicates the frame queue will be consumed by a
  22778. + * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
  22779. + * software portals, the contextB field is controlled by the driver and can't be
  22780. + * modified by the caller. If the AS_IS flag is specified, management commands
  22781. + * will be used on portal @p to query state for frame queue @fqid and construct
  22782. + * a frame queue object based on that, rather than assuming/requiring that it be
  22783. + * Out of Service.
  22784. + */
  22785. +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
  22786. +
  22787. +/**
  22788. + * qman_destroy_fq - Deallocates a FQ
  22789. + * @fq: the frame queue object to release
  22790. + * @flags: bit-mask of QMAN_FQ_FREE_*** options
  22791. + *
  22792. + * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
  22793. + * not deallocated but the caller regains ownership, to do with as desired. The
  22794. + * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
  22795. + * is specified, in which case it may also be in the 'parked' state.
  22796. + */
  22797. +void qman_destroy_fq(struct qman_fq *fq, u32 flags);
  22798. +
  22799. +/**
  22800. + * qman_fq_fqid - Queries the frame queue ID of a FQ object
  22801. + * @fq: the frame queue object to query
  22802. + */
  22803. +u32 qman_fq_fqid(struct qman_fq *fq);
  22804. +
  22805. +/**
  22806. + * qman_fq_state - Queries the state of a FQ object
  22807. + * @fq: the frame queue object to query
  22808. + * @state: pointer to state enum to return the FQ scheduling state
  22809. + * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
  22810. + *
  22811. + * Queries the state of the FQ object, without performing any h/w commands.
  22812. + * This captures the state, as seen by the driver, at the time the function
  22813. + * executes.
  22814. + */
  22815. +void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
  22816. +
  22817. +/**
  22818. + * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
  22819. + * @fq: the frame queue object to modify, must be 'parked' or new.
  22820. + * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
  22821. + * @opts: the FQ-modification settings, as defined in the low-level API
  22822. + *
  22823. + * The @opts parameter comes from the low-level portal API. Select
  22824. + * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
  22825. + * rather than parked. NB, @opts can be NULL.
  22826. + *
  22827. + * Note that some fields and options within @opts may be ignored or overwritten
  22828. + * by the driver;
  22829. + * 1. the 'count' and 'fqid' fields are always ignored (this operation only
  22830. + * affects one frame queue: @fq).
  22831. + * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
  22832. + * 'fqd' structure's 'context_b' field are sometimes overwritten;
  22833. + * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
  22834. + * initialised to a value used by the driver for demux.
  22835. + * - if context_b is initialised for demux, so is context_a in case stashing
  22836. + * is requested (see item 4).
  22837. + * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
  22838. + * objects.)
  22839. + * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
  22840. + * 'dest::channel' field will be overwritten to match the portal used to issue
  22841. + * the command. If the WE_DESTWQ write-enable bit had already been set by the
  22842. + * caller, the channel workqueue will be left as-is, otherwise the write-enable
  22843. + * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
  22844. + * isn't set, the destination channel/workqueue fields and the write-enable bit
  22845. + * are left as-is.
  22846. + * 4. if the driver overwrites context_a/b for demux, then if
  22847. + * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
  22848. + * context_a.address fields and will leave the stashing fields provided by the
  22849. + * user alone, otherwise it will zero out the context_a.stashing fields.
  22850. + */
  22851. +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
  22852. +
  22853. +/**
  22854. + * qman_schedule_fq - Schedules a FQ
  22855. + * @fq: the frame queue object to schedule, must be 'parked'
  22856. + *
  22857. + * Schedules the frame queue, which must be Parked, which takes it to
  22858. + * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
  22859. + */
  22860. +int qman_schedule_fq(struct qman_fq *fq);
  22861. +
  22862. +/**
  22863. + * qman_retire_fq - Retires a FQ
  22864. + * @fq: the frame queue object to retire
  22865. + * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
  22866. + *
  22867. + * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
  22868. + * the retirement was started asynchronously, otherwise it returns negative for
  22869. + * failure. When this function returns zero, @flags is set to indicate whether
  22870. + * the retired FQ is empty and/or whether it has any ORL fragments (to show up
  22871. + * as ERNs). Otherwise the corresponding flags will be known when a subsequent
  22872. + * FQRN message shows up on the portal's message ring.
  22873. + *
  22874. + * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
  22875. + * Active state), the completion will be via the message ring as a FQRN - but
  22876. + * the corresponding callback may occur before this function returns!! Ie. the
  22877. + * caller should be prepared to accept the callback as the function is called,
  22878. + * not only once it has returned.
  22879. + */
  22880. +int qman_retire_fq(struct qman_fq *fq, u32 *flags);
  22881. +
  22882. +/**
  22883. + * qman_oos_fq - Puts a FQ "out of service"
  22884. + * @fq: the frame queue object to be put out-of-service, must be 'retired'
  22885. + *
  22886. + * The frame queue must be retired and empty, and if any order restoration list
  22887. + * was released as ERNs at the time of retirement, they must all be consumed.
  22888. + */
  22889. +int qman_oos_fq(struct qman_fq *fq);
  22890. +
  22891. +/**
  22892. + * qman_fq_flow_control - Set the XON/XOFF state of a FQ
  22893. + * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
  22894. + * or 'retired' or 'parked' state
  22895. + * @xon: boolean to set fq in XON or XOFF state
  22896. + *
  22897. + * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
  22898. + * otherwise the IFSI interrupt will be asserted.
  22899. + */
  22900. +int qman_fq_flow_control(struct qman_fq *fq, int xon);
  22901. +
  22902. +/**
  22903. + * qman_query_fq - Queries FQD fields (via h/w query command)
  22904. + * @fq: the frame queue object to be queried
  22905. + * @fqd: storage for the queried FQD fields
  22906. + */
  22907. +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
  22908. +
  22909. +/**
  22910. + * qman_query_fq_np - Queries non-programmable FQD fields
  22911. + * @fq: the frame queue object to be queried
  22912. + * @np: storage for the queried FQD fields
  22913. + */
  22914. +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
  22915. +
  22916. +/**
  22917. + * qman_query_wq - Queries work queue lengths
  22918. + * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
  22919. + * to this software portal. Otherwise, query length of WQs in a
  22920. + * channel specified in wq.
  22921. + * @wq: storage for the queried WQs lengths. Also specified the channel to
  22922. + * to query if query_dedicated is zero.
  22923. + */
  22924. +int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
  22925. +
  22926. +/**
  22927. + * qman_volatile_dequeue - Issue a volatile dequeue command
  22928. + * @fq: the frame queue object to dequeue from
  22929. + * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
  22930. + * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
  22931. + *
  22932. + * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
  22933. + * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
  22934. + * the VDQCR is already in use, otherwise returns non-zero for failure. If
  22935. + * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
  22936. + * the VDQCR command has finished executing (ie. once the callback for the last
  22937. + * DQRR entry resulting from the VDQCR command has been called). If not using
  22938. + * the FINISH flag, completion can be determined either by detecting the
  22939. + * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
  22940. + * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
  22941. + * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
  22942. + * "flags" retrieved from qman_fq_state().
  22943. + */
  22944. +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
  22945. +
  22946. +/**
  22947. + * qman_enqueue - Enqueue a frame to a frame queue
  22948. + * @fq: the frame queue object to enqueue to
  22949. + * @fd: a descriptor of the frame to be enqueued
  22950. + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
  22951. + *
  22952. + * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
  22953. + * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
  22954. + * field is ignored. The return value is non-zero on error, such as ring full
  22955. + * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
  22956. + * specified), etc. If the ring is full and FLAG_WAIT is specified, this
  22957. + * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
  22958. + * interrupt will assert when Qman consumes the EQCR entry (subject to "status
  22959. + * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
  22960. + * perform an implied "discrete consumption acknowledgement" on the dequeue
  22961. + * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
  22962. + * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
  22963. + * this implicit DCA can delay the release of a "held active" frame queue
  22964. + * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
  22965. + * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
  22966. + * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
  22967. + * acknowledgement should "park request" the "held active" frame queue. Ie.
  22968. + * when the portal eventually releases that frame queue, it will be left in the
  22969. + * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
  22970. + * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
  22971. + * is requested, and the FQ is a member of a congestion group, then this
  22972. + * function returns -EAGAIN if the congestion group is currently congested.
  22973. + * Note, this does not eliminate ERNs, as the async interface means we can be
  22974. + * sending enqueue commands to an un-congested FQ that becomes congested before
  22975. + * the enqueue commands are processed, but it does minimise needless thrashing
  22976. + * of an already busy hardware resource by throttling many of the to-be-dropped
  22977. + * enqueues "at the source".
  22978. + */
  22979. +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
  22980. +
  22981. +typedef int (*qman_cb_precommit) (void *arg);
  22982. +/**
  22983. + * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
  22984. + * @fq: the frame queue object to enqueue to
  22985. + * @fd: a descriptor of the frame to be enqueued
  22986. + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
  22987. + * @cb: user supplied callback function to invoke before writing commit verb.
  22988. + * @cb_arg: callback function argument
  22989. + *
  22990. + * This is similar to qman_enqueue except that it will invoke a user supplied
  22991. + * callback function just before writng the commit verb. This is useful
  22992. + * when the user want to do something *just before* enqueuing the request and
  22993. + * the enqueue can't fail.
  22994. + */
  22995. +int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
  22996. + u32 flags, qman_cb_precommit cb, void *cb_arg);
  22997. +
  22998. +/**
  22999. + * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
  23000. + * @fq: the frame queue object to enqueue to
  23001. + * @fd: a descriptor of the frame to be enqueued
  23002. + * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
  23003. + * @orp: the frame queue object used as an order restoration point.
  23004. + * @orp_seqnum: the sequence number of this frame in the order restoration path
  23005. + *
  23006. + * Similar to qman_enqueue(), but with the addition of an Order Restoration
  23007. + * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
  23008. + * enqueue operation to employ order restoration. Each frame queue object acts
  23009. + * as an Order Definition Point (ODP) by providing each frame dequeued from it
  23010. + * with an incrementing sequence number, this value is generally ignored unless
  23011. + * that sequence of dequeued frames will need order restoration later. Each
  23012. + * frame queue object also encapsulates an Order Restoration Point (ORP), which
  23013. + * is a re-assembly context for re-ordering frames relative to their sequence
  23014. + * numbers as they are enqueued. The ORP does not have to be within the frame
  23015. + * queue that receives the enqueued frame, in fact it is usually the frame
  23016. + * queue from which the frames were originally dequeued. For the purposes of
  23017. + * order restoration, multiple frames (or "fragments") can be enqueued for a
  23018. + * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
  23019. + * enqueues except the final fragment of a given sequence number. Ordering
  23020. + * between sequence numbers is guaranteed, even if fragments of different
  23021. + * sequence numbers are interlaced with one another. Fragments of the same
  23022. + * sequence number will retain the order in which they are enqueued. If no
  23023. + * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
  23024. + * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
  23025. + * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
  23026. + * sequence number should become the ORP's "Next Expected Sequence Number".
  23027. + *
  23028. + * Side note: a frame queue object can be used purely as an ORP, without
  23029. + * carrying any frames at all. Care should be taken not to deallocate a frame
  23030. + * queue object that is being actively used as an ORP, as a future allocation
  23031. + * of the frame queue object may start using the internal ORP before the
  23032. + * previous use has finished.
  23033. + */
  23034. +int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
  23035. + struct qman_fq *orp, u16 orp_seqnum);
  23036. +
  23037. +/**
  23038. + * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
  23039. + * @result: is set by the API to the base FQID of the allocated range
  23040. + * @count: the number of FQIDs required
  23041. + * @align: required alignment of the allocated range
  23042. + * @partial: non-zero if the API can return fewer than @count FQIDs
  23043. + *
  23044. + * Returns the number of frame queues allocated, or a negative error code. If
  23045. + * @partial is non zero, the allocation request may return a smaller range of
  23046. + * FQs than requested (though alignment will be as requested). If @partial is
  23047. + * zero, the return value will either be 'count' or negative.
  23048. + */
  23049. +int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
  23050. +static inline int qman_alloc_fqid(u32 *result)
  23051. +{
  23052. + int ret = qman_alloc_fqid_range(result, 1, 0, 0);
  23053. + return (ret > 0) ? 0 : ret;
  23054. +}
  23055. +
  23056. +/**
  23057. + * qman_release_fqid_range - Release the specified range of frame queue IDs
  23058. + * @fqid: the base FQID of the range to deallocate
  23059. + * @count: the number of FQIDs in the range
  23060. + *
  23061. + * This function can also be used to seed the allocator with ranges of FQIDs
  23062. + * that it can subsequently allocate from.
  23063. + */
  23064. +void qman_release_fqid_range(u32 fqid, unsigned int count);
  23065. +static inline void qman_release_fqid(u32 fqid)
  23066. +{
  23067. + qman_release_fqid_range(fqid, 1);
  23068. +}
  23069. +
  23070. +void qman_seed_fqid_range(u32 fqid, unsigned int count);
  23071. +
  23072. +
  23073. +int qman_shutdown_fq(u32 fqid);
  23074. +
  23075. +/**
  23076. + * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
  23077. + * @fqid: the base FQID of the range to deallocate
  23078. + * @count: the number of FQIDs in the range
  23079. + */
  23080. +int qman_reserve_fqid_range(u32 fqid, unsigned int count);
  23081. +static inline int qman_reserve_fqid(u32 fqid)
  23082. +{
  23083. + return qman_reserve_fqid_range(fqid, 1);
  23084. +}
  23085. +
  23086. + /* Pool-channel management */
  23087. + /* ----------------------- */
  23088. +/**
  23089. + * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
  23090. + * @result: is set by the API to the base pool-channel ID of the allocated range
  23091. + * @count: the number of pool-channel IDs required
  23092. + * @align: required alignment of the allocated range
  23093. + * @partial: non-zero if the API can return fewer than @count
  23094. + *
  23095. + * Returns the number of pool-channel IDs allocated, or a negative error code.
  23096. + * If @partial is non zero, the allocation request may return a smaller range of
  23097. + * than requested (though alignment will be as requested). If @partial is zero,
  23098. + * the return value will either be 'count' or negative.
  23099. + */
  23100. +int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
  23101. +static inline int qman_alloc_pool(u32 *result)
  23102. +{
  23103. + int ret = qman_alloc_pool_range(result, 1, 0, 0);
  23104. + return (ret > 0) ? 0 : ret;
  23105. +}
  23106. +
  23107. +/**
  23108. + * qman_release_pool_range - Release the specified range of pool-channel IDs
  23109. + * @id: the base pool-channel ID of the range to deallocate
  23110. + * @count: the number of pool-channel IDs in the range
  23111. + */
  23112. +void qman_release_pool_range(u32 id, unsigned int count);
  23113. +static inline void qman_release_pool(u32 id)
  23114. +{
  23115. + qman_release_pool_range(id, 1);
  23116. +}
  23117. +
  23118. +/**
  23119. + * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
  23120. + * @id: the base pool-channel ID of the range to reserve
  23121. + * @count: the number of pool-channel IDs in the range
  23122. + */
  23123. +int qman_reserve_pool_range(u32 id, unsigned int count);
  23124. +static inline int qman_reserve_pool(u32 id)
  23125. +{
  23126. + return qman_reserve_pool_range(id, 1);
  23127. +}
  23128. +
  23129. +void qman_seed_pool_range(u32 id, unsigned int count);
  23130. +
  23131. + /* CGR management */
  23132. + /* -------------- */
  23133. +/**
  23134. + * qman_create_cgr - Register a congestion group object
  23135. + * @cgr: the 'cgr' object, with fields filled in
  23136. + * @flags: QMAN_CGR_FLAG_* values
  23137. + * @opts: optional state of CGR settings
  23138. + *
  23139. + * Registers this object to receiving congestion entry/exit callbacks on the
  23140. + * portal affine to the cpu portal on which this API is executed. If opts is
  23141. + * NULL then only the callback (cgr->cb) function is registered. If @flags
  23142. + * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
  23143. + * any unspecified parameters) will be used rather than a modify hw hardware
  23144. + * (which only modifies the specified parameters).
  23145. + */
  23146. +int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
  23147. + struct qm_mcc_initcgr *opts);
  23148. +
  23149. +/**
  23150. + * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
  23151. + * @cgr: the 'cgr' object, with fields filled in
  23152. + * @flags: QMAN_CGR_FLAG_* values
  23153. + * @dcp_portal: the DCP portal to which the cgr object is registered.
  23154. + * @opts: optional state of CGR settings
  23155. + *
  23156. + */
  23157. +int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
  23158. + struct qm_mcc_initcgr *opts);
  23159. +
  23160. +/**
  23161. + * qman_delete_cgr - Deregisters a congestion group object
  23162. + * @cgr: the 'cgr' object to deregister
  23163. + *
  23164. + * "Unplugs" this CGR object from the portal affine to the cpu on which this API
  23165. + * is executed. This must be excuted on the same affine portal on which it was
  23166. + * created.
  23167. + */
  23168. +int qman_delete_cgr(struct qman_cgr *cgr);
  23169. +
  23170. +/**
  23171. + * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
  23172. + * @cgr: the 'cgr' object to deregister
  23173. + *
  23174. + * This will select the proper CPU and run there qman_delete_cgr().
  23175. + */
  23176. +void qman_delete_cgr_safe(struct qman_cgr *cgr);
  23177. +
  23178. +/**
  23179. + * qman_modify_cgr - Modify CGR fields
  23180. + * @cgr: the 'cgr' object to modify
  23181. + * @flags: QMAN_CGR_FLAG_* values
  23182. + * @opts: the CGR-modification settings
  23183. + *
  23184. + * The @opts parameter comes from the low-level portal API, and can be NULL.
  23185. + * Note that some fields and options within @opts may be ignored or overwritten
  23186. + * by the driver, in particular the 'cgrid' field is ignored (this operation
  23187. + * only affects the given CGR object). If @flags contains
  23188. + * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
  23189. + * unspecified parameters) will be used rather than a modify hw hardware (which
  23190. + * only modifies the specified parameters).
  23191. + */
  23192. +int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
  23193. + struct qm_mcc_initcgr *opts);
  23194. +
  23195. +/**
  23196. +* qman_query_cgr - Queries CGR fields
  23197. +* @cgr: the 'cgr' object to query
  23198. +* @result: storage for the queried congestion group record
  23199. +*/
  23200. +int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
  23201. +
  23202. +/**
  23203. + * qman_query_congestion - Queries the state of all congestion groups
  23204. + * @congestion: storage for the queried state of all congestion groups
  23205. + */
  23206. +int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
  23207. +
  23208. +/**
  23209. + * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
  23210. + * @result: is set by the API to the base CGR ID of the allocated range
  23211. + * @count: the number of CGR IDs required
  23212. + * @align: required alignment of the allocated range
  23213. + * @partial: non-zero if the API can return fewer than @count
  23214. + *
  23215. + * Returns the number of CGR IDs allocated, or a negative error code.
  23216. + * If @partial is non zero, the allocation request may return a smaller range of
  23217. + * than requested (though alignment will be as requested). If @partial is zero,
  23218. + * the return value will either be 'count' or negative.
  23219. + */
  23220. +int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
  23221. +static inline int qman_alloc_cgrid(u32 *result)
  23222. +{
  23223. + int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
  23224. + return (ret > 0) ? 0 : ret;
  23225. +}
  23226. +
  23227. +/**
  23228. + * qman_release_cgrid_range - Release the specified range of CGR IDs
  23229. + * @id: the base CGR ID of the range to deallocate
  23230. + * @count: the number of CGR IDs in the range
  23231. + */
  23232. +void qman_release_cgrid_range(u32 id, unsigned int count);
  23233. +static inline void qman_release_cgrid(u32 id)
  23234. +{
  23235. + qman_release_cgrid_range(id, 1);
  23236. +}
  23237. +
  23238. +/**
  23239. + * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
  23240. + * @id: the base CGR ID of the range to reserve
  23241. + * @count: the number of CGR IDs in the range
  23242. + */
  23243. +int qman_reserve_cgrid_range(u32 id, unsigned int count);
  23244. +static inline int qman_reserve_cgrid(u32 id)
  23245. +{
  23246. + return qman_reserve_cgrid_range(id, 1);
  23247. +}
  23248. +
  23249. +void qman_seed_cgrid_range(u32 id, unsigned int count);
  23250. +
  23251. +
  23252. + /* Helpers */
  23253. + /* ------- */
  23254. +/**
  23255. + * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
  23256. + * @fqid: the FQID that will be initialised by other s/w
  23257. + *
  23258. + * In many situations, a FQID is provided for communication between s/w
  23259. + * entities, and whilst the consumer is responsible for initialising and
  23260. + * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
  23261. + * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
  23262. + * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
  23263. + * However, data can not be enqueued to the FQ until it is initialised out of
  23264. + * the OOS state - this function polls for that condition. It is particularly
  23265. + * useful for users of IPC functions - each endpoint's Rx FQ is the other
  23266. + * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
  23267. + * and then use this API on the (NO_MODIFY) Tx FQ object in order to
  23268. + * synchronise. The function returns zero for success, +1 if the FQ is still in
  23269. + * the OOS state, or negative if there was an error.
  23270. + */
  23271. +static inline int qman_poll_fq_for_init(struct qman_fq *fq)
  23272. +{
  23273. + struct qm_mcr_queryfq_np np;
  23274. + int err;
  23275. + err = qman_query_fq_np(fq, &np);
  23276. + if (err)
  23277. + return err;
  23278. + if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
  23279. + return 1;
  23280. + return 0;
  23281. +}
  23282. +
  23283. + /* -------------- */
  23284. + /* CEETM :: types */
  23285. + /* -------------- */
  23286. +/**
  23287. + * Token Rate Structure
  23288. + * Shaping rates are based on a "credit" system and a pre-configured h/w
  23289. + * internal timer. The following type represents a shaper "rate" parameter as a
  23290. + * fractional number of "tokens". Here's how it works. This (fractional) number
  23291. + * of tokens is added to the shaper's "credit" every time the h/w timer elapses
  23292. + * (up to a limit which is set by another shaper parameter). Every time a frame
  23293. + * is enqueued through a shaper, the shaper deducts as many tokens as there are
  23294. + * bytes of data in the enqueued frame. A shaper will not allow itself to
  23295. + * enqueue any frames if its token count is negative. As such;
  23296. + *
  23297. + * The rate at which data is enqueued is limited by the
  23298. + * rate at which tokens are added.
  23299. + *
  23300. + * Therefore if the user knows the period between these h/w timer updates in
  23301. + * seconds, they can calculate the maximum traffic rate of the shaper (in
  23302. + * bytes-per-second) from the token rate. And vice versa, they can calculate
  23303. + * the token rate to use in order to achieve a given traffic rate.
  23304. + */
  23305. +struct qm_ceetm_rate {
  23306. + /* The token rate is; whole + (fraction/8192) */
  23307. + u32 whole:11; /* 0..2047 */
  23308. + u32 fraction:13; /* 0..8191 */
  23309. +};
  23310. +
  23311. +struct qm_ceetm_weight_code {
  23312. + /* The weight code is; 5 msbits + 3 lsbits */
  23313. + u8 y:5;
  23314. + u8 x:3;
  23315. +};
  23316. +
  23317. +struct qm_ceetm {
  23318. + unsigned int idx;
  23319. + struct list_head sub_portals;
  23320. + struct list_head lnis;
  23321. + unsigned int sp_range[2];
  23322. + unsigned int lni_range[2];
  23323. +};
  23324. +
  23325. +struct qm_ceetm_sp {
  23326. + struct list_head node;
  23327. + unsigned int idx;
  23328. + unsigned int dcp_idx;
  23329. + int is_claimed;
  23330. + struct qm_ceetm_lni *lni;
  23331. +};
  23332. +
  23333. +/* Logical Network Interface */
  23334. +struct qm_ceetm_lni {
  23335. + struct list_head node;
  23336. + unsigned int idx;
  23337. + unsigned int dcp_idx;
  23338. + int is_claimed;
  23339. + struct qm_ceetm_sp *sp;
  23340. + struct list_head channels;
  23341. + int shaper_enable;
  23342. + int shaper_couple;
  23343. + int oal;
  23344. + struct qm_ceetm_rate cr_token_rate;
  23345. + struct qm_ceetm_rate er_token_rate;
  23346. + u16 cr_token_bucket_limit;
  23347. + u16 er_token_bucket_limit;
  23348. +};
  23349. +
  23350. +/* Class Queue Channel */
  23351. +struct qm_ceetm_channel {
  23352. + struct list_head node;
  23353. + unsigned int idx;
  23354. + unsigned int lni_idx;
  23355. + unsigned int dcp_idx;
  23356. + struct list_head class_queues;
  23357. + struct list_head ccgs;
  23358. + u8 shaper_enable;
  23359. + u8 shaper_couple;
  23360. + struct qm_ceetm_rate cr_token_rate;
  23361. + struct qm_ceetm_rate er_token_rate;
  23362. + u16 cr_token_bucket_limit;
  23363. + u16 er_token_bucket_limit;
  23364. +};
  23365. +
  23366. +struct qm_ceetm_ccg;
  23367. +
  23368. +/* This callback type is used when handling congestion entry/exit. The
  23369. + * 'cb_ctx' value is the opaque value associated with ccg object.
  23370. + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
  23371. + */
  23372. +typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx,
  23373. + int congested);
  23374. +
  23375. +/* Class Congestion Group */
  23376. +struct qm_ceetm_ccg {
  23377. + struct qm_ceetm_channel *parent;
  23378. + struct list_head node;
  23379. + struct list_head cb_node;
  23380. + qman_cb_ccgr cb;
  23381. + void *cb_ctx;
  23382. + unsigned int idx;
  23383. +};
  23384. +
  23385. +/* Class Queue */
  23386. +struct qm_ceetm_cq {
  23387. + struct qm_ceetm_channel *parent;
  23388. + struct qm_ceetm_ccg *ccg;
  23389. + struct list_head node;
  23390. + unsigned int idx;
  23391. + int is_claimed;
  23392. + struct list_head bound_lfqids;
  23393. + struct list_head binding_node;
  23394. +};
  23395. +
  23396. +/* Logical Frame Queue */
  23397. +struct qm_ceetm_lfq {
  23398. + struct qm_ceetm_channel *parent;
  23399. + struct list_head node;
  23400. + unsigned int idx;
  23401. + unsigned int dctidx;
  23402. + u64 context_a;
  23403. + u32 context_b;
  23404. + qman_cb_mr ern;
  23405. +};
  23406. +
  23407. +/**
  23408. + * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps
  23409. + * (ie. bits-per-second), compute the 'token_rate' fraction that best
  23410. + * approximates that rate.
  23411. + * @bps: the desired shaper rate in bps.
  23412. + * @token_rate: the output token rate computed with the given kbps.
  23413. + * @rounding: dictates how to round if an exact conversion is not possible; if
  23414. + * it is negative then 'token_rate' will round down to the highest value that
  23415. + * does not exceed the desired rate, if it is positive then 'token_rate' will
  23416. + * round up to the lowest value that is greater than or equal to the desired
  23417. + * rate, and if it is zero then it will round to the nearest approximation,
  23418. + * whether that be up or down.
  23419. + *
  23420. + * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
  23421. + */
  23422. +int qman_ceetm_bps2tokenrate(u64 bps,
  23423. + struct qm_ceetm_rate *token_rate,
  23424. + int rounding);
  23425. +
  23426. +/**
  23427. + * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the
  23428. + * corresponding number of 'bps'.
  23429. + * @token_rate: the input desired token_rate fraction.
  23430. + * @bps: the output shaper rate in bps computed with the give token rate.
  23431. + * @rounding: has the same semantics as the previous function.
  23432. + *
  23433. + * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
  23434. + */
  23435. +int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate,
  23436. + u64 *bps,
  23437. + int rounding);
  23438. +
  23439. +int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
  23440. + int partial);
  23441. +static inline int qman_alloc_ceetm0_channel(u32 *result)
  23442. +{
  23443. + int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0);
  23444. + return (ret > 0) ? 0 : ret;
  23445. +}
  23446. +void qman_release_ceetm0_channel_range(u32 channelid, u32 count);
  23447. +static inline void qman_release_ceetm0_channelid(u32 channelid)
  23448. +{
  23449. + qman_release_ceetm0_channel_range(channelid, 1);
  23450. +}
  23451. +
  23452. +int qman_reserve_ceetm0_channel_range(u32 channelid, u32 count);
  23453. +static inline int qman_reserve_ceetm0_channelid(u32 channelid)
  23454. +{
  23455. + return qman_reserve_ceetm0_channel_range(channelid, 1);
  23456. +}
  23457. +
  23458. +void qman_seed_ceetm0_channel_range(u32 channelid, u32 count);
  23459. +
  23460. +
  23461. +int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
  23462. + int partial);
  23463. +static inline int qman_alloc_ceetm1_channel(u32 *result)
  23464. +{
  23465. + int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0);
  23466. + return (ret > 0) ? 0 : ret;
  23467. +}
  23468. +void qman_release_ceetm1_channel_range(u32 channelid, u32 count);
  23469. +static inline void qman_release_ceetm1_channelid(u32 channelid)
  23470. +{
  23471. + qman_release_ceetm1_channel_range(channelid, 1);
  23472. +}
  23473. +int qman_reserve_ceetm1_channel_range(u32 channelid, u32 count);
  23474. +static inline int qman_reserve_ceetm1_channelid(u32 channelid)
  23475. +{
  23476. + return qman_reserve_ceetm1_channel_range(channelid, 1);
  23477. +}
  23478. +
  23479. +void qman_seed_ceetm1_channel_range(u32 channelid, u32 count);
  23480. +
  23481. +
  23482. +int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
  23483. + int partial);
  23484. +static inline int qman_alloc_ceetm0_lfqid(u32 *result)
  23485. +{
  23486. + int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0);
  23487. + return (ret > 0) ? 0 : ret;
  23488. +}
  23489. +void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count);
  23490. +static inline void qman_release_ceetm0_lfqid(u32 lfqid)
  23491. +{
  23492. + qman_release_ceetm0_lfqid_range(lfqid, 1);
  23493. +}
  23494. +int qman_reserve_ceetm0_lfqid_range(u32 lfqid, u32 count);
  23495. +static inline int qman_reserve_ceetm0_lfqid(u32 lfqid)
  23496. +{
  23497. + return qman_reserve_ceetm0_lfqid_range(lfqid, 1);
  23498. +}
  23499. +
  23500. +void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count);
  23501. +
  23502. +
  23503. +int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
  23504. + int partial);
  23505. +static inline int qman_alloc_ceetm1_lfqid(u32 *result)
  23506. +{
  23507. + int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0);
  23508. + return (ret > 0) ? 0 : ret;
  23509. +}
  23510. +void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count);
  23511. +static inline void qman_release_ceetm1_lfqid(u32 lfqid)
  23512. +{
  23513. + qman_release_ceetm1_lfqid_range(lfqid, 1);
  23514. +}
  23515. +int qman_reserve_ceetm1_lfqid_range(u32 lfqid, u32 count);
  23516. +static inline int qman_reserve_ceetm1_lfqid(u32 lfqid)
  23517. +{
  23518. + return qman_reserve_ceetm1_lfqid_range(lfqid, 1);
  23519. +}
  23520. +
  23521. +void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count);
  23522. +
  23523. +
  23524. + /* ----------------------------- */
  23525. + /* CEETM :: sub-portals */
  23526. + /* ----------------------------- */
  23527. +
  23528. +/**
  23529. + * qman_ceetm_claim_sp - Claims the given sub-portal, provided it is available
  23530. + * to us and configured for traffic-management.
  23531. + * @sp: the returned sub-portal object, if successful.
  23532. + * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
  23533. + * instance),
  23534. + * @sp_idx" is the desired sub-portal index from 0 to 15.
  23535. + *
  23536. + * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL
  23537. + * if the sp_idx is out of range.
  23538. + *
  23539. + * Note that if there are multiple driver domains (eg. a linux kernel versus
  23540. + * user-space drivers in USDPAA, or multiple guests running under a hypervisor)
  23541. + * then a sub-portal may be accessible by more than one instance of a qman
  23542. + * driver and so it may be claimed multiple times. If this is the case, it is
  23543. + * up to the system architect to prevent conflicting configuration actions
  23544. + * coming from the different driver domains. The qman drivers do not have any
  23545. + * behind-the-scenes coordination to prevent this from happening.
  23546. + */
  23547. +int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp,
  23548. + enum qm_dc_portal dcp_idx,
  23549. + unsigned int sp_idx);
  23550. +
  23551. +/**
  23552. + * qman_ceetm_sp_release - Releases a previously claimed sub-portal.
  23553. + * @sp: the sub-portal to be released.
  23554. + *
  23555. + * Returns 0 for success, or -EBUSY for failure if the dependencies are not
  23556. + * released.
  23557. + */
  23558. +int qman_ceetm_sp_release(struct qm_ceetm_sp *sp);
  23559. +
  23560. + /* ----------------------------------- */
  23561. + /* CEETM :: logical network interfaces */
  23562. + /* ----------------------------------- */
  23563. +
  23564. +/**
  23565. + * qman_ceetm_lni_claim - Claims an unclaimed LNI.
  23566. + * @lni: the returned LNI object, if successful.
  23567. + * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
  23568. + * instance)
  23569. + * @lni_idx: is the desired LNI index.
  23570. + *
  23571. + * Returns zero for success, or -EINVAL on failure, which will happen if the LNI
  23572. + * is not available or has already been claimed (and not yet successfully
  23573. + * released), or lni_dix is out of range.
  23574. + *
  23575. + * Note that there may be multiple driver domains (or instances) that need to
  23576. + * transmit out the same LNI, so this claim is only guaranteeing exclusivity
  23577. + * within the domain of the driver being called. See qman_ceetm_sp_claim() and
  23578. + * qman_ceetm_sp_get_lni() for more information.
  23579. + */
  23580. +int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni,
  23581. + enum qm_dc_portal dcp_id,
  23582. + unsigned int lni_idx);
  23583. +
  23584. +/**
  23585. + * qman_ceetm_lni_releaes - Releases a previously claimed LNI.
  23586. + * @lni: the lni needs to be released.
  23587. + *
  23588. + * This will only succeed if all dependent objects have been released.
  23589. + * Returns zero for success, or -EBUSY if the dependencies are not released.
  23590. + */
  23591. +int qman_ceetm_lni_release(struct qm_ceetm_lni *lni);
  23592. +
  23593. +/**
  23594. + * qman_ceetm_sp_set_lni
  23595. + * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently
  23596. + * mapped to.
  23597. + * @sp: the given sub-portal.
  23598. + * @lni(in "set"function): the LNI object which the sp will be mappaed to.
  23599. + * @lni_idx(in "get" function): the LNI index which the sp is mapped to.
  23600. + *
  23601. + * Returns zero for success, or -EINVAL for the "set" function when this sp-lni
  23602. + * mapping has been set, or configure mapping command returns error, and
  23603. + * -EINVAL for "get" function when this sp-lni mapping is not set or the query
  23604. + * mapping command returns error.
  23605. + *
  23606. + * This may be useful in situations where multiple driver domains have access
  23607. + * to the same sub-portals in order to all be able to transmit out the same
  23608. + * physical interface (perhaps they're on different IP addresses or VPNs, so
  23609. + * Fman is splitting Rx traffic and here we need to converge Tx traffic). In
  23610. + * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed
  23611. + * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains
  23612. + * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim()
  23613. + * in order to determine the LNI that the control-plane had assigned. This is
  23614. + * why the "get" returns an index, whereas the "set" takes an (already claimed)
  23615. + * LNI object.
  23616. + */
  23617. +int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp,
  23618. + struct qm_ceetm_lni *lni);
  23619. +int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp,
  23620. + unsigned int *lni_idx);
  23621. +
  23622. +/**
  23623. + * qman_ceetm_lni_enable_shaper
  23624. + * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI.
  23625. + * @lni: the given LNI.
  23626. + * @coupled: indicates whether CR and ER are coupled.
  23627. + * @oal: the overhead accounting length which is added to the actual length of
  23628. + * each frame when performing shaper calculations.
  23629. + *
  23630. + * When the number of (unused) committed-rate tokens reach the committed-rate
  23631. + * token limit, 'coupled' indicates whether surplus tokens should be added to
  23632. + * the excess-rate token count (up to the excess-rate token limit).
  23633. + * When LNI is claimed, the shaper is disabled by default. The enable function
  23634. + * will turn on this shaper for this lni.
  23635. + * Whenever a claimed LNI is first enabled for shaping, its committed and
  23636. + * excess token rates and limits are zero, so will need to be changed to do
  23637. + * anything useful. The shaper can subsequently be enabled/disabled without
  23638. + * resetting the shaping parameters, but the shaping parameters will be reset
  23639. + * when the LNI is released.
  23640. + *
  23641. + * Returns zero for success, or errno for "enable" function in the cases as:
  23642. + * a) -EINVAL if the shaper is already enabled,
  23643. + * b) -EIO if the configure shaper command returns error.
  23644. + * For "disable" function, returns:
  23645. + * a) -EINVAL if the shaper is has already disabled.
  23646. + * b) -EIO if calling configure shaper command returns error.
  23647. + */
  23648. +int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
  23649. + int oal);
  23650. +int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni);
  23651. +
  23652. +/**
  23653. + * qman_ceetm_lni_is_shaper_enabled - Check LNI shaper status
  23654. + * @lni: the give LNI
  23655. + */
  23656. +int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni);
  23657. +
  23658. +/**
  23659. + * qman_ceetm_lni_set_commit_rate
  23660. + * qman_ceetm_lni_get_commit_rate
  23661. + * qman_ceetm_lni_set_excess_rate
  23662. + * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and
  23663. + * token limit for the given LNI.
  23664. + * @lni: the given LNI.
  23665. + * @token_rate: the desired token rate for "set" fuction, or the token rate of
  23666. + * the LNI queried by "get" function.
  23667. + * @token_limit: the desired token bucket limit for "set" function, or the token
  23668. + * limit of the given LNI queried by "get" function.
  23669. + *
  23670. + * Returns zero for success. The "set" function returns -EINVAL if the given
  23671. + * LNI is unshapped or -EIO if the configure shaper command returns error.
  23672. + * The "get" function returns -EINVAL if the token rate or the token limit is
  23673. + * not set or the query command returns error.
  23674. + */
  23675. +int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
  23676. + const struct qm_ceetm_rate *token_rate,
  23677. + u16 token_limit);
  23678. +int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
  23679. + struct qm_ceetm_rate *token_rate,
  23680. + u16 *token_limit);
  23681. +int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
  23682. + const struct qm_ceetm_rate *token_rate,
  23683. + u16 token_limit);
  23684. +int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
  23685. + struct qm_ceetm_rate *token_rate,
  23686. + u16 *token_limit);
  23687. +/**
  23688. + * qman_ceetm_lni_set_commit_rate_bps
  23689. + * qman_ceetm_lni_get_commit_rate_bps
  23690. + * qman_ceetm_lni_set_excess_rate_bps
  23691. + * qman_ceetm_lni_get_excess_rate_bps - Set/get the shaper CR/ER rate
  23692. + * and token limit for the given LNI.
  23693. + * @lni: the given LNI.
  23694. + * @bps: the desired shaping rate in bps for "set" fuction, or the shaping rate
  23695. + * of the LNI queried by "get" function.
  23696. + * @token_limit: the desired token bucket limit for "set" function, or the token
  23697. + * limit of the given LNI queried by "get" function.
  23698. + *
  23699. + * Returns zero for success. The "set" function returns -EINVAL if the given
  23700. + * LNI is unshapped or -EIO if the configure shaper command returns error.
  23701. + * The "get" function returns -EINVAL if the token rate or the token limit is
  23702. + * not set or the query command returns error.
  23703. + */
  23704. +int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
  23705. + u64 bps,
  23706. + u16 token_limit);
  23707. +int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
  23708. + u64 *bps, u16 *token_limit);
  23709. +int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
  23710. + u64 bps,
  23711. + u16 token_limit);
  23712. +int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
  23713. + u64 *bps, u16 *token_limit);
  23714. +
  23715. +/**
  23716. + * qman_ceetm_lni_set_tcfcc
  23717. + * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control".
  23718. + * @lni: the given LNI.
  23719. + * @cq_level: is between 0 and 15, representing individual class queue levels
  23720. + * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15
  23721. + * for every channel).
  23722. + * @traffic_class: is between 0 and 7 when associating a given class queue level
  23723. + * to a traffic class, or -1 when disabling traffic class flow control for this
  23724. + * class queue level.
  23725. + *
  23726. + * Return zero for success, or -EINVAL if the cq_level or traffic_class is out
  23727. + * of range as indicated above, or -EIO if the configure/query tcfcc command
  23728. + * returns error.
  23729. + *
  23730. + * Refer to the section of QMan CEETM traffic class flow control in the
  23731. + * Reference Manual.
  23732. + */
  23733. +int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
  23734. + unsigned int cq_level,
  23735. + int traffic_class);
  23736. +int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni,
  23737. + unsigned int cq_level,
  23738. + int *traffic_class);
  23739. +
  23740. + /* ----------------------------- */
  23741. + /* CEETM :: class queue channels */
  23742. + /* ----------------------------- */
  23743. +
  23744. +/**
  23745. + * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to
  23746. + * the given LNI.
  23747. + * @channel: the returned class queue channel object, if successful.
  23748. + * @lni: the LNI that the channel belongs to.
  23749. + *
  23750. + * Channels are always initially "unshaped".
  23751. + *
  23752. + * Return zero for success, or -ENODEV if there is no channel available(all 32
  23753. + * channels are claimed) or -EINVAL if the channel mapping command returns
  23754. + * error.
  23755. + */
  23756. +int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
  23757. + struct qm_ceetm_lni *lni);
  23758. +
  23759. +/**
  23760. + * qman_ceetm_channel_release - Releases a previously claimed CQ channel.
  23761. + * @channel: the channel needs to be released.
  23762. + *
  23763. + * Returns zero for success, or -EBUSY if the dependencies are still in use.
  23764. + *
  23765. + * Note any shaping of the channel will be cleared to leave it in an unshaped
  23766. + * state.
  23767. + */
  23768. +int qman_ceetm_channel_release(struct qm_ceetm_channel *channel);
  23769. +
  23770. +/**
  23771. + * qman_ceetm_channel_enable_shaper
  23772. + * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel.
  23773. + * @channel: the given channel.
  23774. + * @coupled: indicates whether surplus CR tokens should be added to the
  23775. + * excess-rate token count (up to the excess-rate token limit) when the number
  23776. + * of (unused) committed-rate tokens reach the committed_rate token limit.
  23777. + *
  23778. + * Whenever a claimed channel is first enabled for shaping, its committed and
  23779. + * excess token rates and limits are zero, so will need to be changed to do
  23780. + * anything useful. The shaper can subsequently be enabled/disabled without
  23781. + * resetting the shaping parameters, but the shaping parameters will be reset
  23782. + * when the channel is released.
  23783. + *
  23784. + * Return 0 for success, or -EINVAL for failure, in the case that the channel
  23785. + * shaper has been enabled/disabled or the management command returns error.
  23786. + */
  23787. +int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
  23788. + int coupled);
  23789. +int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel);
  23790. +
  23791. +/**
  23792. + * qman_ceetm_channel_is_shaper_enabled - Check channel shaper status.
  23793. + * @channel: the give channel.
  23794. + */
  23795. +int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel);
  23796. +
  23797. +/**
  23798. + * qman_ceetm_channel_set_commit_rate
  23799. + * qman_ceetm_channel_get_commit_rate
  23800. + * qman_ceetm_channel_set_excess_rate
  23801. + * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters.
  23802. + * @channel: the given channel.
  23803. + * @token_rate: the desired token rate for "set" function, or the queried token
  23804. + * rate for "get" function.
  23805. + * @token_limit: the desired token limit for "set" function, or the queried
  23806. + * token limit for "get" function.
  23807. + *
  23808. + * Return zero for success. The "set" function returns -EINVAL if the channel
  23809. + * is unshaped, or -EIO if the configure shapper command returns error. The
  23810. + * "get" function returns -EINVAL if token rate of token limit is not set, or
  23811. + * the query shaper command returns error.
  23812. + */
  23813. +int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
  23814. + const struct qm_ceetm_rate *token_rate,
  23815. + u16 token_limit);
  23816. +int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
  23817. + struct qm_ceetm_rate *token_rate,
  23818. + u16 *token_limit);
  23819. +int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
  23820. + const struct qm_ceetm_rate *token_rate,
  23821. + u16 token_limit);
  23822. +int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
  23823. + struct qm_ceetm_rate *token_rate,
  23824. + u16 *token_limit);
  23825. +/**
  23826. + * qman_ceetm_channel_set_commit_rate_bps
  23827. + * qman_ceetm_channel_get_commit_rate_bps
  23828. + * qman_ceetm_channel_set_excess_rate_bps
  23829. + * qman_ceetm_channel_get_excess_rate_bps - Set/get channel CR/ER shaper
  23830. + * parameters.
  23831. + * @channel: the given channel.
  23832. + * @token_rate: the desired shaper rate in bps for "set" function, or the
  23833. + * shaper rate in bps for "get" function.
  23834. + * @token_limit: the desired token limit for "set" function, or the queried
  23835. + * token limit for "get" function.
  23836. + *
  23837. + * Return zero for success. The "set" function returns -EINVAL if the channel
  23838. + * is unshaped, or -EIO if the configure shapper command returns error. The
  23839. + * "get" function returns -EINVAL if token rate of token limit is not set, or
  23840. + * the query shaper command returns error.
  23841. + */
  23842. +int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
  23843. + u64 bps, u16 token_limit);
  23844. +int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
  23845. + u64 *bps, u16 *token_limit);
  23846. +int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
  23847. + u64 bps, u16 token_limit);
  23848. +int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
  23849. + u64 *bps, u16 *token_limit);
  23850. +
  23851. +/**
  23852. + * qman_ceetm_channel_set_weight
  23853. + * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel
  23854. + * @channel: the given channel.
  23855. + * @token_limit: the desired token limit as the weight of the unshaped channel
  23856. + * for "set" function, or the queried token limit for "get" function.
  23857. + *
  23858. + * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel.
  23859. + * It allows the unshaped channels to be included in the CR time eligible list,
  23860. + * and thus use the configured CR token limit value as their fair queuing
  23861. + * weight.
  23862. + *
  23863. + * Return zero for success, or -EINVAL if the channel is a shaped channel or
  23864. + * the management command returns error.
  23865. + */
  23866. +int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
  23867. + u16 token_limit);
  23868. +int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
  23869. + u16 *token_limit);
  23870. +
  23871. +/**
  23872. + * qman_ceetm_channel_set_group
  23873. + * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler.
  23874. + * @channel: the given channel.
  23875. + * @group_b: indicates whether there is group B in this channel.
  23876. + * @prio_a: the priority of group A.
  23877. + * @prio_b: the priority of group B.
  23878. + *
  23879. + * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues
  23880. + * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in
  23881. + * group A, otherwise they are split into group A (CQ8-11) and group B
  23882. + * (CQ12-C15). The individual class queues and the group(s) are in strict
  23883. + * priority order relative to each other. Within the group(s), the scheduling
  23884. + * is not strict priority order, but the result of scheduling within a group
  23885. + * is in strict priority order relative to the other class queues in the
  23886. + * channel. 'prio_a' and 'prio_b' control the priority order of the groups
  23887. + * relative to the individual class queues, and take values from 0-7. Eg. if
  23888. + * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict
  23889. + * priority order would be;
  23890. + * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7
  23891. + *
  23892. + * Return 0 for success. For "set" function, returns -EINVAL if prio_a or
  23893. + * prio_b are out of the range 0 - 7 (priority of group A or group B can not
  23894. + * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if
  23895. + * the configure scheduler command returns error. For "get" function, return
  23896. + * -EINVAL if the query scheduler command returns error.
  23897. + */
  23898. +int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel,
  23899. + int group_b,
  23900. + unsigned int prio_a,
  23901. + unsigned int prio_b);
  23902. +int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel,
  23903. + int *group_b,
  23904. + unsigned int *prio_a,
  23905. + unsigned int *prio_b);
  23906. +
  23907. +/**
  23908. + * qman_ceetm_channel_set_group_cr_eligibility
  23909. + * qman_ceetm_channel_set_group_er_eligibility - Set channel group eligibility
  23910. + * @channel: the given channel object
  23911. + * @group_b: indicates whether there is group B in this channel.
  23912. + * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
  23913. + *
  23914. + * Return zero for success, or -EINVAL if eligibility setting fails.
  23915. +*/
  23916. +int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
  23917. + *channel, int group_b, int cre);
  23918. +int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
  23919. + *channel, int group_b, int ere);
  23920. +
  23921. +/**
  23922. + * qman_ceetm_channel_set_cq_cr_eligibility
  23923. + * qman_ceetm_channel_set_cq_er_eligibility - Set channel cq eligibility
  23924. + * @channel: the given channel object
  23925. + * @idx: is from 0 to 7 (representing CQ0 to CQ7).
  23926. + * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
  23927. + *
  23928. + * Return zero for success, or -EINVAL if eligibility setting fails.
  23929. +*/
  23930. +int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
  23931. + unsigned int idx, int cre);
  23932. +int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
  23933. + unsigned int idx, int ere);
  23934. +
  23935. + /* --------------------- */
  23936. + /* CEETM :: class queues */
  23937. + /* --------------------- */
  23938. +
  23939. +/**
  23940. + * qman_ceetm_cq_claim - Claims an individual class queue.
  23941. + * @cq: the returned class queue object, if successful.
  23942. + * @channel: the class queue channel.
  23943. + * @idx: is from 0 to 7 (representing CQ0 to CQ7).
  23944. + * @ccg: represents the class congestion group that this class queue should be
  23945. + * subscribed to, or NULL if no congestion group membership is desired.
  23946. + *
  23947. + * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or
  23948. + * if this class queue has been claimed, or configure class queue command
  23949. + * returns error, or returns -ENOMEM if allocating CQ memory fails.
  23950. + */
  23951. +int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
  23952. + struct qm_ceetm_channel *channel,
  23953. + unsigned int idx,
  23954. + struct qm_ceetm_ccg *ccg);
  23955. +
  23956. +/**
  23957. + * qman_ceetm_cq_claim_A - Claims a class queue group A.
  23958. + * @cq: the returned class queue object, if successful.
  23959. + * @channel: the class queue channel.
  23960. + * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11.
  23961. + * @ccg: represents the class congestion group that this class queue should be
  23962. + * subscribed to, or NULL if no congestion group membership is desired.
  23963. + *
  23964. + * Return zero for success, or -EINVAL if @idx is out the range or if
  23965. + * this class queue has been claimed or configure class queue command returns
  23966. + * error, or returns -ENOMEM if allocating CQ memory fails.
  23967. + */
  23968. +int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
  23969. + struct qm_ceetm_channel *channel,
  23970. + unsigned int idx,
  23971. + struct qm_ceetm_ccg *ccg);
  23972. +
  23973. +/**
  23974. + * qman_ceetm_cq_claim_B - Claims a class queue group B.
  23975. + * @cq: the returned class queue object, if successful.
  23976. + * @channel: the class queue channel.
  23977. + * @idx: is from 0 to 3 (CQ12 to CQ15).
  23978. + * @ccg: represents the class congestion group that this class queue should be
  23979. + * subscribed to, or NULL if no congestion group membership is desired.
  23980. + *
  23981. + * Return zero for success, or -EINVAL if @idx is out the range or if
  23982. + * this class queue has been claimed or configure class queue command returns
  23983. + * error, or returns -ENOMEM if allocating CQ memory fails.
  23984. + */
  23985. +int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
  23986. + struct qm_ceetm_channel *channel,
  23987. + unsigned int idx,
  23988. + struct qm_ceetm_ccg *ccg);
  23989. +
  23990. +/**
  23991. + * qman_ceetm_cq_release - Releases a previously claimed class queue.
  23992. + * @cq: The class queue to be released.
  23993. + *
  23994. + * Return zero for success, or -EBUSY if the dependent objects (eg. logical
  23995. + * FQIDs) have not been released.
  23996. + */
  23997. +int qman_ceetm_cq_release(struct qm_ceetm_cq *cq);
  23998. +
  23999. +/**
  24000. + * qman_ceetm_set_queue_weight
  24001. + * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class
  24002. + * queue.
  24003. + * @cq: the given class queue.
  24004. + * @weight_code: the desired weight code to set for the given class queue for
  24005. + * "set" function or the queired weight code for "get" function.
  24006. + *
  24007. + * Grouped class queues have a default weight code of zero, which corresponds to
  24008. + * a scheduler weighting of 1. This function can be used to modify a grouped
  24009. + * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio()
  24010. + * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values
  24011. + * and the corresponding sharing weight.)
  24012. + *
  24013. + * Returns zero for success, or -EIO if the configure weight command returns
  24014. + * error for "set" function, or -EINVAL if the query command returns
  24015. + * error for "get" function.
  24016. + * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference
  24017. + * Manual for weight and weight code.
  24018. + */
  24019. +int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
  24020. + struct qm_ceetm_weight_code *weight_code);
  24021. +int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
  24022. + struct qm_ceetm_weight_code *weight_code);
  24023. +
  24024. +/**
  24025. + * qman_ceetm_set_queue_weight_in_ratio
  24026. + * qman_ceetm_get_queue_weight_in_ratio - Configure/query the weight of a
  24027. + * grouped class queue.
  24028. + * @cq: the given class queue.
  24029. + * @ratio: the weight in ratio. It should be the real ratio number multiplied
  24030. + * by 100 to get rid of fraction.
  24031. + *
  24032. + * Returns zero for success, or -EIO if the configure weight command returns
  24033. + * error for "set" function, or -EINVAL if the query command returns
  24034. + * error for "get" function.
  24035. + */
  24036. +int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio);
  24037. +int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio);
  24038. +
  24039. +/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0,
  24040. + * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights
  24041. + * corresponding to intermediate weight codes are calculated using linear
  24042. + * interpolation on the inverted values. Or put another way, the inverse weights
  24043. + * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals
  24044. + * between these are divided linearly into 32 intermediate values, the inverses
  24045. + * of which form the remaining weight codes.
  24046. + *
  24047. + * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of
  24048. + * scheduling within a group of class queues (group A or B). Weights are used to
  24049. + * normalise the class queues to an underlying BFS algorithm where all class
  24050. + * queues are assumed to require "equal bandwidth". So the weights referred to
  24051. + * by the weight codes act as divisors on the size of frames being enqueued. Ie.
  24052. + * one class queue in a group is assigned a weight of 2 whilst the other class
  24053. + * queues in the group keep the default weight of 1, then the WBFS scheduler
  24054. + * will effectively treat all frames enqueued on the weight-2 class queue as
  24055. + * having half the number of bytes they really have. Ie. if all other things are
  24056. + * equal, that class queue would get twice as much bytes-per-second bandwidth as
  24057. + * the others. So weights should be chosen to provide bandwidth ratios between
  24058. + * members of the same class queue group. These weights have no bearing on
  24059. + * behaviour outside that group's WBFS mechanism though.
  24060. + */
  24061. +
  24062. +/**
  24063. + * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional
  24064. + * representation of the corresponding weight is given (in order to not lose
  24065. + * any precision).
  24066. + * @weight_code: The given weight code in WBFS.
  24067. + * @numerator: the numerator part of the weight computed by the weight code.
  24068. + * @denominator: the denominator part of the weight computed by the weight code
  24069. + *
  24070. + * Returns zero for success or -EINVAL if the given weight code is illegal.
  24071. + */
  24072. +int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
  24073. + u32 *numerator,
  24074. + u32 *denominator);
  24075. +/**
  24076. + * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code
  24077. + * If the user needs to know how close this is, convert the resulting weight
  24078. + * code back to a weight and compare.
  24079. + * @numerator: numerator part of the given weight.
  24080. + * @denominator: denominator part of the given weight.
  24081. + * @weight_code: the weight code computed from the given weight.
  24082. + *
  24083. + * Returns zero for success, or -ERANGE if "numerator/denominator" is outside
  24084. + * the range of weights.
  24085. + */
  24086. +int qman_ceetm_ratio2wbfs(u32 numerator,
  24087. + u32 denominator,
  24088. + struct qm_ceetm_weight_code *weight_code,
  24089. + int rounding);
  24090. +
  24091. +#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1
  24092. +/**
  24093. + * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM
  24094. + * CQ counters.
  24095. + * @cq: the given CQ object.
  24096. + * @flags: indicates whether the statistics counter will be cleared after query.
  24097. + * @frame_count: The number of the frames that have been counted since the
  24098. + * counter was cleared last time.
  24099. + * @byte_count: the number of bytes in all frames that have been counted.
  24100. + *
  24101. + * Return zero for success or -EINVAL if query statistics command returns error.
  24102. + *
  24103. + */
  24104. +int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
  24105. + u64 *frame_count, u64 *byte_count);
  24106. +
  24107. +/**
  24108. + * qman_ceetm_drain_cq - drain the CQ till it is empty.
  24109. + * @cq: the give CQ object.
  24110. + * Return 0 for success or -EINVAL for unsuccessful command to empty CQ.
  24111. + */
  24112. +int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq);
  24113. +
  24114. + /* ---------------------- */
  24115. + /* CEETM :: logical FQIDs */
  24116. + /* ---------------------- */
  24117. +/**
  24118. + * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with
  24119. + * the given class queue.
  24120. + * @lfq: the returned lfq object, if successful.
  24121. + * @cq: the class queue which needs to claim a LFQID.
  24122. + *
  24123. + * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if
  24124. + * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails.
  24125. + */
  24126. +int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
  24127. + struct qm_ceetm_cq *cq);
  24128. +
  24129. +/**
  24130. + * qman_ceetm_lfq_release - Releases a previously claimed logical FQID.
  24131. + * @lfq: the lfq to be released.
  24132. + *
  24133. + * Return zero for success.
  24134. + */
  24135. +int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq);
  24136. +
  24137. +/**
  24138. + * qman_ceetm_lfq_set_context
  24139. + * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the
  24140. + * "dequeue context table" associated with the logical FQID.
  24141. + * @lfq: the given logical FQ object.
  24142. + * @context_a: contextA of the dequeue context.
  24143. + * @context_b: contextB of the dequeue context.
  24144. + *
  24145. + * Returns zero for success, or -EINVAL if there is error to set/get the
  24146. + * context pair.
  24147. + */
  24148. +int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq,
  24149. + u64 context_a,
  24150. + u32 context_b);
  24151. +int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq,
  24152. + u64 *context_a,
  24153. + u32 *context_b);
  24154. +
  24155. +/**
  24156. + * qman_ceetm_create_fq - Initialise a FQ object for the LFQ.
  24157. + * @lfq: the given logic fq.
  24158. + * @fq: the fq object created for the given logic fq.
  24159. + *
  24160. + * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to
  24161. + * target a logical FQID (and the class queue it is associated with).
  24162. + * Note that this FQ object can only be used for enqueues, and
  24163. + * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter,
  24164. + * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only
  24165. + * valid as long as the underlying 'lfq' remains claimed. It is the user's
  24166. + * responsibility to ensure that the underlying 'lfq' is not released until any
  24167. + * enqueues to this FQ object have completed. The only field the user needs to
  24168. + * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that
  24169. + * could conceivably be called on this FQ object. This API can be called
  24170. + * multiple times to create multiple FQ objects referring to the same logical
  24171. + * FQID, and any enqueue rejections will respect the callback of the object that
  24172. + * issued the enqueue (and will identify the object via the parameter passed to
  24173. + * the callback too). There is no 'flags' parameter to this API as there is for
  24174. + * qman_create_fq() - the created FQ object behaves as though qman_create_fq()
  24175. + * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY.
  24176. + *
  24177. + * Returns 0 for success.
  24178. + */
  24179. +int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq);
  24180. +
  24181. + /* -------------------------------- */
  24182. + /* CEETM :: class congestion groups */
  24183. + /* -------------------------------- */
  24184. +
  24185. +/**
  24186. + * qman_ceetm_ccg_claim - Claims an unused CCG.
  24187. + * @ccg: the returned CCG object, if successful.
  24188. + * @channel: the given class queue channel
  24189. + * @cscn: the callback function of this CCG.
  24190. + * @cb_ctx: the corresponding context to be used used if state change
  24191. + * notifications are later enabled for this CCG.
  24192. + *
  24193. + * The congestion group is local to the given class queue channel, so only
  24194. + * class queues within the channel can be associated with that congestion group.
  24195. + * The association of class queues to congestion groups occurs when the class
  24196. + * queues are claimed, see qman_ceetm_cq_claim() and related functions.
  24197. + * Congestion groups are in a "zero" state when initially claimed, and they are
  24198. + * returned to that state when released.
  24199. + *
  24200. + * Return zero for success, or -EINVAL if no CCG in the channel is available.
  24201. + */
  24202. +int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
  24203. + struct qm_ceetm_channel *channel,
  24204. + unsigned int idx,
  24205. + void (*cscn)(struct qm_ceetm_ccg *,
  24206. + void *cb_ctx,
  24207. + int congested),
  24208. + void *cb_ctx);
  24209. +
  24210. +/**
  24211. + * qman_ceetm_ccg_release - Releases a previously claimed CCG.
  24212. + * @ccg: the given ccg.
  24213. + *
  24214. + * Returns zero for success, or -EBUSY if the given ccg's dependent objects
  24215. + * (class queues that are associated with the CCG) have not been released.
  24216. + */
  24217. +int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg);
  24218. +
  24219. +/* This struct is used to specify attributes for a CCG. The 'we_mask' field
  24220. + * controls which CCG attributes are to be updated, and the remainder specify
  24221. + * the values for those attributes. A CCG counts either frames or the bytes
  24222. + * within those frames, but not both ('mode'). A CCG can optionally cause
  24223. + * enqueues to be rejected, due to tail-drop or WRED, or both (they are
  24224. + * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be
  24225. + * level-triggered due to a single threshold ('td_thres') or edge-triggered due
  24226. + * to a "congestion state", but not both ('td_mode'). Congestion state has
  24227. + * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and
  24228. + * notifications can be sent to software the CCG goes in to and out of this
  24229. + * congested state ('cscn_en'). */
  24230. +struct qm_ceetm_ccg_params {
  24231. + /* Boolean fields together in a single bitfield struct */
  24232. + struct {
  24233. + /* Whether to count bytes or frames. 1==frames */
  24234. + u8 mode:1;
  24235. + /* En/disable tail-drop. 1==enable */
  24236. + u8 td_en:1;
  24237. + /* Tail-drop on congestion-state or threshold. 1=threshold */
  24238. + u8 td_mode:1;
  24239. + /* Generate congestion state change notifications. 1==enable */
  24240. + u8 cscn_en:1;
  24241. + /* Enable WRED rejections (per colour). 1==enable */
  24242. + u8 wr_en_g:1;
  24243. + u8 wr_en_y:1;
  24244. + u8 wr_en_r:1;
  24245. + } __packed;
  24246. + /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */
  24247. + struct qm_cgr_cs_thres td_thres;
  24248. + /* Congestion state thresholds, for entry and exit. */
  24249. + struct qm_cgr_cs_thres cs_thres_in;
  24250. + struct qm_cgr_cs_thres cs_thres_out;
  24251. + /* Overhead accounting length. Per-packet "tax", from -128 to +127 */
  24252. + signed char oal;
  24253. + /* Congestion state change notification for DCP portal, virtual CCGID*/
  24254. + /* WRED parameters. */
  24255. + struct qm_cgr_wr_parm wr_parm_g;
  24256. + struct qm_cgr_wr_parm wr_parm_y;
  24257. + struct qm_cgr_wr_parm wr_parm_r;
  24258. +};
  24259. +/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of
  24260. + * the CCGR are to be updated. */
  24261. +#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */
  24262. +#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */
  24263. +#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */
  24264. +#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */
  24265. +#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */
  24266. +#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */
  24267. +#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */
  24268. +#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */
  24269. +#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */
  24270. +#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */
  24271. +#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */
  24272. +#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */
  24273. +#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */
  24274. +#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */
  24275. +#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */
  24276. +#define QM_CCGR_WE_CDV 0x8000 /* cdv */
  24277. +
  24278. +/**
  24279. + * qman_ceetm_ccg_set
  24280. + * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes.
  24281. + * @ccg: the given CCG object.
  24282. + * @we_mask: the write enable mask.
  24283. + * @params: the parameters setting for this ccg
  24284. + *
  24285. + * Return 0 for success, or -EIO if configure ccg command returns error for
  24286. + * "set" function, or -EINVAL if query ccg command returns error for "get"
  24287. + * function.
  24288. + */
  24289. +int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg,
  24290. + u16 we_mask,
  24291. + const struct qm_ceetm_ccg_params *params);
  24292. +int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
  24293. + struct qm_ceetm_ccg_params *params);
  24294. +
  24295. +/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target
  24296. + * mask.
  24297. + * qman_ceetm_cscn_swp_get - Query whether a given software portal index is
  24298. + * in the cscn target mask.
  24299. + * @ccg: the give CCG object.
  24300. + * @swp_idx: the index of the software portal.
  24301. + * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from
  24302. + * the target mask.
  24303. + * @we_mask: the write enable mask.
  24304. + * @params: the parameters setting for this ccg
  24305. + *
  24306. + * Return 0 for success, or -EINVAL if command in set/get function fails.
  24307. + */
  24308. +int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg,
  24309. + u16 swp_idx,
  24310. + unsigned int cscn_enabled,
  24311. + u16 we_mask,
  24312. + const struct qm_ceetm_ccg_params *params);
  24313. +int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
  24314. + u16 swp_idx,
  24315. + unsigned int *cscn_enabled);
  24316. +
  24317. +/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\
  24318. + * target mask.
  24319. + * qman_ceetm_cscn_swp_get - Query whether a given direct connect portal index
  24320. + * is in the cscn target mask.
  24321. + * @ccg: the give CCG object.
  24322. + * @dcp_idx: the index of the direct connect portal.
  24323. + * @vcgid: congestion state change notification for dcp portal, virtual CGID.
  24324. + * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from
  24325. + * the target mask.
  24326. + * @we_mask: the write enable mask.
  24327. + * @params: the parameters setting for this ccg
  24328. + *
  24329. + * Return 0 for success, or -EINVAL if command in set/get function fails.
  24330. + */
  24331. +int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
  24332. + u16 dcp_idx,
  24333. + u8 vcgid,
  24334. + unsigned int cscn_enabled,
  24335. + u16 we_mask,
  24336. + const struct qm_ceetm_ccg_params *params);
  24337. +int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
  24338. + u16 dcp_idx,
  24339. + u8 *vcgid,
  24340. + unsigned int *cscn_enabled);
  24341. +
  24342. +/**
  24343. + * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by
  24344. + * CEETM CCG counters.
  24345. + * @ccg: the given CCG object.
  24346. + * @flags: indicates whether the statistics counter will be cleared after query.
  24347. + * @frame_count: The number of the frames that have been counted since the
  24348. + * counter was cleared last time.
  24349. + * @byte_count: the number of bytes in all frames that have been counted.
  24350. + *
  24351. + * Return zero for success or -EINVAL if query statistics command returns error.
  24352. + *
  24353. + */
  24354. +int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
  24355. + u64 *frame_count, u64 *byte_count);
  24356. +
  24357. +/**
  24358. + * qman_ceetm_query_lfqmt - Query the logical frame queue mapping table
  24359. + * @lfqid: Logical Frame Queue ID
  24360. + * @lfqmt_query: Results of the query command
  24361. + *
  24362. + * Returns zero for success or -EIO if the query command returns error.
  24363. + *
  24364. + */
  24365. +int qman_ceetm_query_lfqmt(int lfqid,
  24366. + struct qm_mcr_ceetm_lfqmt_query *lfqmt_query);
  24367. +
  24368. +/**
  24369. + * qman_ceetm_query_write_statistics - Query (and optionally write) statistics
  24370. + * @cid: Target ID (CQID or CCGRID)
  24371. + * @dcp_idx: CEETM portal ID
  24372. + * @command_type: One of the following:
  24373. + * 0 = Query dequeue statistics. CID carries the CQID to be queried.
  24374. + * 1 = Query and clear dequeue statistics. CID carries the CQID to be queried
  24375. + * 2 = Write dequeue statistics. CID carries the CQID to be written.
  24376. + * 3 = Query reject statistics. CID carries the CCGRID to be queried.
  24377. + * 4 = Query and clear reject statistics. CID carries the CCGRID to be queried
  24378. + * 5 = Write reject statistics. CID carries the CCGRID to be written
  24379. + * @frame_count: Frame count value to be written if this is a write command
  24380. + * @byte_count: Bytes count value to be written if this is a write command
  24381. + *
  24382. + * Returns zero for success or -EIO if the query command returns error.
  24383. + */
  24384. +int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
  24385. + u16 command_type, u64 frame_count,
  24386. + u64 byte_count);
  24387. +
  24388. +/**
  24389. + * qman_set_wpm - Set waterfall power management
  24390. + *
  24391. + * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
  24392. + *
  24393. + * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
  24394. + * accessible.
  24395. + */
  24396. +int qman_set_wpm(int wpm_enable);
  24397. +
  24398. +/**
  24399. + * qman_get_swp - Query the waterfall power management setting
  24400. + *
  24401. + * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
  24402. + *
  24403. + * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
  24404. + * accessible.
  24405. + */
  24406. +int qman_get_wpm(int *wpm_enable);
  24407. +
  24408. +/* The below qman_p_***() variants might be called in a migration situation
  24409. + * (e.g. cpu hotplug). They are used to continue accessing the portal that
  24410. + * execution was affine to prior to migration.
  24411. + * @qman_portal specifies which portal the APIs will use.
  24412. +*/
  24413. +const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
  24414. + *p);
  24415. +int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
  24416. +int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
  24417. +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
  24418. +u32 qman_p_poll_slow(struct qman_portal *p);
  24419. +void qman_p_poll(struct qman_portal *p);
  24420. +void qman_p_stop_dequeues(struct qman_portal *p);
  24421. +void qman_p_start_dequeues(struct qman_portal *p);
  24422. +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
  24423. +void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
  24424. +u32 qman_p_static_dequeue_get(struct qman_portal *p);
  24425. +void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
  24426. + int park_request);
  24427. +int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
  24428. + u32 flags __maybe_unused, u32 vdqcr);
  24429. +int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
  24430. + const struct qm_fd *fd, u32 flags);
  24431. +int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
  24432. + const struct qm_fd *fd, u32 flags,
  24433. + struct qman_fq *orp, u16 orp_seqnum);
  24434. +int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
  24435. + const struct qm_fd *fd, u32 flags,
  24436. + qman_cb_precommit cb, void *cb_arg);
  24437. +#ifdef __cplusplus
  24438. +}
  24439. +#endif
  24440. +
  24441. +#endif /* FSL_QMAN_H */
  24442. --- /dev/null
  24443. +++ b/include/linux/fsl_usdpaa.h
  24444. @@ -0,0 +1,372 @@
  24445. +/* Copyright 2011-2012 Freescale Semiconductor, Inc.
  24446. + *
  24447. + * This file is licensed under the terms of the GNU General Public License
  24448. + * version 2. This program is licensed "as is" without any warranty of any
  24449. + * kind, whether express or implied.
  24450. + */
  24451. +
  24452. +#ifndef FSL_USDPAA_H
  24453. +#define FSL_USDPAA_H
  24454. +
  24455. +#ifdef __cplusplus
  24456. +extern "C" {
  24457. +#endif
  24458. +
  24459. +#include <linux/uaccess.h>
  24460. +#include <linux/ioctl.h>
  24461. +#include <linux/fsl_qman.h> /* For "enum qm_channel" */
  24462. +#include <linux/compat.h>
  24463. +
  24464. +#ifdef CONFIG_FSL_USDPAA
  24465. +
  24466. +/******************************/
  24467. +/* Allocation of resource IDs */
  24468. +/******************************/
  24469. +
  24470. +/* This enum is used to distinguish between the type of underlying object being
  24471. + * manipulated. */
  24472. +enum usdpaa_id_type {
  24473. + usdpaa_id_fqid,
  24474. + usdpaa_id_bpid,
  24475. + usdpaa_id_qpool,
  24476. + usdpaa_id_cgrid,
  24477. + usdpaa_id_ceetm0_lfqid,
  24478. + usdpaa_id_ceetm0_channelid,
  24479. + usdpaa_id_ceetm1_lfqid,
  24480. + usdpaa_id_ceetm1_channelid,
  24481. + usdpaa_id_max /* <-- not a valid type, represents the number of types */
  24482. +};
  24483. +#define USDPAA_IOCTL_MAGIC 'u'
  24484. +struct usdpaa_ioctl_id_alloc {
  24485. + uint32_t base; /* Return value, the start of the allocated range */
  24486. + enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */
  24487. + uint32_t num; /* how many IDs to allocate (and return value) */
  24488. + uint32_t align; /* must be a power of 2, 0 is treated like 1 */
  24489. + int partial; /* whether to allow less than 'num' */
  24490. +};
  24491. +struct usdpaa_ioctl_id_release {
  24492. + /* Input; */
  24493. + enum usdpaa_id_type id_type;
  24494. + uint32_t base;
  24495. + uint32_t num;
  24496. +};
  24497. +struct usdpaa_ioctl_id_reserve {
  24498. + enum usdpaa_id_type id_type;
  24499. + uint32_t base;
  24500. + uint32_t num;
  24501. +};
  24502. +
  24503. +
  24504. +/* ioctl() commands */
  24505. +#define USDPAA_IOCTL_ID_ALLOC \
  24506. + _IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc)
  24507. +#define USDPAA_IOCTL_ID_RELEASE \
  24508. + _IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release)
  24509. +#define USDPAA_IOCTL_ID_RESERVE \
  24510. + _IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve)
  24511. +
  24512. +/**********************/
  24513. +/* Mapping DMA memory */
  24514. +/**********************/
  24515. +
  24516. +/* Maximum length for a map name, including NULL-terminator */
  24517. +#define USDPAA_DMA_NAME_MAX 16
  24518. +/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named.
  24519. + * For a sharable and named map, specify _SHARED (whether creating one or
  24520. + * binding to an existing one). If _SHARED is specified and _CREATE is not, then
  24521. + * the mapping must already exist. If _SHARED and _CREATE are specified and the
  24522. + * mapping doesn't already exist, it will be created. If _SHARED and _CREATE are
  24523. + * specified and the mapping already exists, the mapping will fail unless _LAZY
  24524. + * is specified. When mapping to a pre-existing sharable map, the length must be
  24525. + * an exact match. Lengths must be a power-of-4 multiple of page size.
  24526. + *
  24527. + * Note that this does not actually map the memory to user-space, that is done
  24528. + * by a subsequent mmap() using the page offset returned from this ioctl(). The
  24529. + * ioctl() is what gives the process permission to do this, and a page-offset
  24530. + * with which to do so.
  24531. + */
  24532. +#define USDPAA_DMA_FLAG_SHARE 0x01
  24533. +#define USDPAA_DMA_FLAG_CREATE 0x02
  24534. +#define USDPAA_DMA_FLAG_LAZY 0x04
  24535. +#define USDPAA_DMA_FLAG_RDONLY 0x08
  24536. +struct usdpaa_ioctl_dma_map {
  24537. + /* Output parameters - virtual and physical addresses */
  24538. + void *ptr;
  24539. + uint64_t phys_addr;
  24540. + /* Input parameter, the length of the region to be created (or if
  24541. + * mapping an existing region, this must match it). Must be a power-of-4
  24542. + * multiple of page size. */
  24543. + uint64_t len;
  24544. + /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
  24545. + uint32_t flags;
  24546. + /* If _FLAG_SHARE is specified, the name of the region to be created (or
  24547. + * of the existing mapping to use). */
  24548. + char name[USDPAA_DMA_NAME_MAX];
  24549. + /* If this ioctl() creates the mapping, this is an input parameter
  24550. + * stating whether the region supports locking. If mapping an existing
  24551. + * region, this is a return value indicating the same thing. */
  24552. + int has_locking;
  24553. + /* In the case of a successful map with _CREATE and _LAZY, this return
  24554. + * value indicates whether we created the mapped region or whether it
  24555. + * already existed. */
  24556. + int did_create;
  24557. +};
  24558. +
  24559. +#ifdef CONFIG_COMPAT
  24560. +struct usdpaa_ioctl_dma_map_compat {
  24561. + /* Output parameters - virtual and physical addresses */
  24562. + compat_uptr_t ptr;
  24563. + uint64_t phys_addr;
  24564. + /* Input parameter, the length of the region to be created (or if
  24565. + * mapping an existing region, this must match it). Must be a power-of-4
  24566. + * multiple of page size. */
  24567. + uint64_t len;
  24568. + /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
  24569. + uint32_t flags;
  24570. + /* If _FLAG_SHARE is specified, the name of the region to be created (or
  24571. + * of the existing mapping to use). */
  24572. + char name[USDPAA_DMA_NAME_MAX];
  24573. + /* If this ioctl() creates the mapping, this is an input parameter
  24574. + * stating whether the region supports locking. If mapping an existing
  24575. + * region, this is a return value indicating the same thing. */
  24576. + int has_locking;
  24577. + /* In the case of a successful map with _CREATE and _LAZY, this return
  24578. + * value indicates whether we created the mapped region or whether it
  24579. + * already existed. */
  24580. + int did_create;
  24581. +};
  24582. +
  24583. +#define USDPAA_IOCTL_DMA_MAP_COMPAT \
  24584. + _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat)
  24585. +#endif
  24586. +
  24587. +
  24588. +#define USDPAA_IOCTL_DMA_MAP \
  24589. + _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map)
  24590. +/* munmap() does not remove the DMA map, just the user-space mapping to it.
  24591. + * This ioctl will do both (though you can munmap() before calling the ioctl
  24592. + * too). */
  24593. +#define USDPAA_IOCTL_DMA_UNMAP \
  24594. + _IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char)
  24595. +/* We implement a cross-process locking scheme per DMA map. Call this ioctl()
  24596. + * with a mmap()'d address, and the process will (interruptible) sleep if the
  24597. + * lock is already held by another process. Process destruction will
  24598. + * automatically clean up any held locks. */
  24599. +#define USDPAA_IOCTL_DMA_LOCK \
  24600. + _IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char)
  24601. +#define USDPAA_IOCTL_DMA_UNLOCK \
  24602. + _IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char)
  24603. +
  24604. +/***************************************/
  24605. +/* Mapping and using QMan/BMan portals */
  24606. +/***************************************/
  24607. +enum usdpaa_portal_type {
  24608. + usdpaa_portal_qman,
  24609. + usdpaa_portal_bman,
  24610. +};
  24611. +
  24612. +#define QBMAN_ANY_PORTAL_IDX 0xffffffff
  24613. +
  24614. +struct usdpaa_ioctl_portal_map {
  24615. + /* Input parameter, is a qman or bman portal required. */
  24616. +
  24617. + enum usdpaa_portal_type type;
  24618. + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
  24619. + for don't care. The portal index will be populated by the
  24620. + driver when the ioctl() successfully completes */
  24621. + uint32_t index;
  24622. +
  24623. + /* Return value if the map succeeds, this gives the mapped
  24624. + * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
  24625. + struct usdpaa_portal_map {
  24626. + void *cinh;
  24627. + void *cena;
  24628. + } addr;
  24629. + /* Qman-specific return values */
  24630. + uint16_t channel;
  24631. + uint32_t pools;
  24632. +};
  24633. +
  24634. +#ifdef CONFIG_COMPAT
  24635. +struct compat_usdpaa_ioctl_portal_map {
  24636. + /* Input parameter, is a qman or bman portal required. */
  24637. + enum usdpaa_portal_type type;
  24638. + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
  24639. + for don't care. The portal index will be populated by the
  24640. + driver when the ioctl() successfully completes */
  24641. + uint32_t index;
  24642. + /* Return value if the map succeeds, this gives the mapped
  24643. + * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
  24644. + struct usdpaa_portal_map_compat {
  24645. + compat_uptr_t cinh;
  24646. + compat_uptr_t cena;
  24647. + } addr;
  24648. + /* Qman-specific return values */
  24649. + uint16_t channel;
  24650. + uint32_t pools;
  24651. +};
  24652. +#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \
  24653. + _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map)
  24654. +#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \
  24655. + _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat)
  24656. +#endif
  24657. +
  24658. +#define USDPAA_IOCTL_PORTAL_MAP \
  24659. + _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map)
  24660. +#define USDPAA_IOCTL_PORTAL_UNMAP \
  24661. + _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map)
  24662. +
  24663. +struct usdpaa_ioctl_irq_map {
  24664. + enum usdpaa_portal_type type; /* Type of portal to map */
  24665. + int fd; /* File descriptor that contains the portal */
  24666. + void *portal_cinh; /* Cache inhibited area to identify the portal */
  24667. +};
  24668. +
  24669. +#define USDPAA_IOCTL_PORTAL_IRQ_MAP \
  24670. + _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map)
  24671. +
  24672. +#ifdef CONFIG_COMPAT
  24673. +
  24674. +struct compat_ioctl_irq_map {
  24675. + enum usdpaa_portal_type type; /* Type of portal to map */
  24676. + compat_int_t fd; /* File descriptor that contains the portal */
  24677. + compat_uptr_t portal_cinh; /* Used identify the portal */};
  24678. +
  24679. +#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \
  24680. + _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map)
  24681. +#endif
  24682. +
  24683. +/* ioctl to query the amount of DMA memory used in the system */
  24684. +struct usdpaa_ioctl_dma_used {
  24685. + uint64_t free_bytes;
  24686. + uint64_t total_bytes;
  24687. +};
  24688. +#define USDPAA_IOCTL_DMA_USED \
  24689. + _IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used)
  24690. +
  24691. +/* ioctl to allocate a raw portal */
  24692. +struct usdpaa_ioctl_raw_portal {
  24693. + /* inputs */
  24694. + enum usdpaa_portal_type type; /* Type of portal to allocate */
  24695. +
  24696. + /* set to non zero to turn on stashing */
  24697. + uint8_t enable_stash;
  24698. + /* Stashing attributes for the portal */
  24699. + uint32_t cpu;
  24700. + uint32_t cache;
  24701. + uint32_t window;
  24702. +
  24703. + /* Specifies the stash request queue this portal should use */
  24704. + uint8_t sdest;
  24705. +
  24706. + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
  24707. + * for don't care. The portal index will be populated by the
  24708. + * driver when the ioctl() successfully completes */
  24709. + uint32_t index;
  24710. +
  24711. + /* outputs */
  24712. + uint64_t cinh;
  24713. + uint64_t cena;
  24714. +};
  24715. +
  24716. +#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \
  24717. + _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal)
  24718. +
  24719. +#define USDPAA_IOCTL_FREE_RAW_PORTAL \
  24720. + _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal)
  24721. +
  24722. +#ifdef CONFIG_COMPAT
  24723. +
  24724. +struct compat_ioctl_raw_portal {
  24725. + /* inputs */
  24726. + enum usdpaa_portal_type type; /* Type of portal to allocate */
  24727. +
  24728. + /* set to non zero to turn on stashing */
  24729. + uint8_t enable_stash;
  24730. + /* Stashing attributes for the portal */
  24731. + uint32_t cpu;
  24732. + uint32_t cache;
  24733. + uint32_t window;
  24734. + /* Specifies the stash request queue this portal should use */
  24735. + uint8_t sdest;
  24736. +
  24737. + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
  24738. + * for don't care. The portal index will be populated by the
  24739. + * driver when the ioctl() successfully completes */
  24740. + uint32_t index;
  24741. +
  24742. + /* outputs */
  24743. + uint64_t cinh;
  24744. + uint64_t cena;
  24745. +};
  24746. +
  24747. +#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \
  24748. + _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal)
  24749. +
  24750. +#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \
  24751. + _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal)
  24752. +
  24753. +#endif
  24754. +
  24755. +#ifdef __KERNEL__
  24756. +
  24757. +/* Early-boot hook */
  24758. +int __init fsl_usdpaa_init_early(void);
  24759. +
  24760. +/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect
  24761. + * faults within its ranges via this hook. */
  24762. +int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size);
  24763. +
  24764. +#endif /* __KERNEL__ */
  24765. +
  24766. +#endif /* CONFIG_FSL_USDPAA */
  24767. +
  24768. +#ifdef __KERNEL__
  24769. +/* This interface is needed in a few places and though it's not specific to
  24770. + * USDPAA as such, creating a new header for it doesn't make any sense. The
  24771. + * qbman kernel driver implements this interface and uses it as the backend for
  24772. + * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this
  24773. + * interface for tracking per-process allocations handed out to user-space. */
  24774. +struct dpa_alloc {
  24775. + struct list_head free;
  24776. + spinlock_t lock;
  24777. + struct list_head used;
  24778. +};
  24779. +#define DECLARE_DPA_ALLOC(name) \
  24780. + struct dpa_alloc name = { \
  24781. + .free = { \
  24782. + .prev = &name.free, \
  24783. + .next = &name.free \
  24784. + }, \
  24785. + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  24786. + .used = { \
  24787. + .prev = &name.used, \
  24788. + .next = &name.used \
  24789. + } \
  24790. + }
  24791. +static inline void dpa_alloc_init(struct dpa_alloc *alloc)
  24792. +{
  24793. + INIT_LIST_HEAD(&alloc->free);
  24794. + INIT_LIST_HEAD(&alloc->used);
  24795. + spin_lock_init(&alloc->lock);
  24796. +}
  24797. +int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
  24798. + int partial);
  24799. +void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count);
  24800. +void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count);
  24801. +
  24802. +/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
  24803. + * desired range is not available, or 0 for success. */
  24804. +int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count);
  24805. +/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when
  24806. + * 'alloc' is empty. */
  24807. +int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count);
  24808. +/* Returns 1 if the specified id is alloced, 0 otherwise */
  24809. +int dpa_alloc_check(struct dpa_alloc *list, u32 id);
  24810. +#endif /* __KERNEL__ */
  24811. +
  24812. +#ifdef __cplusplus
  24813. +}
  24814. +#endif
  24815. +
  24816. +#endif /* FSL_USDPAA_H */