0045-mtd-add-mt7621-nand-support.patch 136 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417
  1. From 5db075c5dd038fbf4b5a0196e10f4f9658236372 Mon Sep 17 00:00:00 2001
  2. From: John Crispin <blogic@openwrt.org>
  3. Date: Sun, 27 Jul 2014 11:05:17 +0100
  4. Subject: [PATCH 45/57] mtd: add mt7621 nand support
  5. Signed-off-by: John Crispin <blogic@openwrt.org>
  6. ---
  7. drivers/mtd/nand/Kconfig | 6 +
  8. drivers/mtd/nand/Makefile | 1 +
  9. drivers/mtd/nand/bmt.c | 750 ++++++++++++
  10. drivers/mtd/nand/bmt.h | 80 ++
  11. drivers/mtd/nand/dev-nand.c | 63 +
  12. drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++
  13. drivers/mtd/nand/mtk_nand.c | 2304 +++++++++++++++++++++++++++++++++++
  14. drivers/mtd/nand/mtk_nand.h | 452 +++++++
  15. drivers/mtd/nand/nand_base.c | 6 +-
  16. drivers/mtd/nand/nand_bbt.c | 19 +
  17. drivers/mtd/nand/nand_def.h | 123 ++
  18. drivers/mtd/nand/nand_device_list.h | 55 +
  19. drivers/mtd/nand/partition.h | 115 ++
  20. 13 files changed, 4311 insertions(+), 3 deletions(-)
  21. create mode 100644 drivers/mtd/nand/bmt.c
  22. create mode 100644 drivers/mtd/nand/bmt.h
  23. create mode 100644 drivers/mtd/nand/dev-nand.c
  24. create mode 100644 drivers/mtd/nand/mt6575_typedefs.h
  25. create mode 100644 drivers/mtd/nand/mtk_nand.c
  26. create mode 100644 drivers/mtd/nand/mtk_nand.h
  27. create mode 100644 drivers/mtd/nand/nand_def.h
  28. create mode 100644 drivers/mtd/nand/nand_device_list.h
  29. create mode 100644 drivers/mtd/nand/partition.h
  30. --- a/drivers/mtd/nand/Kconfig
  31. +++ b/drivers/mtd/nand/Kconfig
  32. @@ -516,4 +516,10 @@ config MTD_NAND_XWAY
  33. Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
  34. to the External Bus Unit (EBU).
  35. +config MTK_MTD_NAND
  36. + tristate "Support for MTK SoC NAND controller"
  37. + depends on SOC_MT7621
  38. + select MTD_NAND_IDS
  39. + select MTD_NAND_ECC
  40. +
  41. endif # MTD_NAND
  42. --- a/drivers/mtd/nand/Makefile
  43. +++ b/drivers/mtd/nand/Makefile
  44. @@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740
  45. obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
  46. obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
  47. obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
  48. +obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand.o bmt.o
  49. nand-objs := nand_base.o nand_bbt.o nand_timings.o
  50. --- /dev/null
  51. +++ b/drivers/mtd/nand/bmt.c
  52. @@ -0,0 +1,750 @@
  53. +#include "bmt.h"
  54. +
  55. +typedef struct
  56. +{
  57. + char signature[3];
  58. + u8 version;
  59. + u8 bad_count; // bad block count in pool
  60. + u8 mapped_count; // mapped block count in pool
  61. + u8 checksum;
  62. + u8 reseverd[13];
  63. +} phys_bmt_header;
  64. +
  65. +typedef struct
  66. +{
  67. + phys_bmt_header header;
  68. + bmt_entry table[MAX_BMT_SIZE];
  69. +} phys_bmt_struct;
  70. +
  71. +typedef struct
  72. +{
  73. + char signature[3];
  74. +} bmt_oob_data;
  75. +
  76. +static char MAIN_SIGNATURE[] = "BMT";
  77. +static char OOB_SIGNATURE[] = "bmt";
  78. +#define SIGNATURE_SIZE (3)
  79. +
  80. +#define MAX_DAT_SIZE 0x1000
  81. +#define MAX_OOB_SIZE 0x80
  82. +
  83. +static struct mtd_info *mtd_bmt;
  84. +static struct nand_chip *nand_chip_bmt;
  85. +#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
  86. +#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
  87. +
  88. +#define OFFSET(block) ((block) * BLOCK_SIZE_BMT)
  89. +#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
  90. +
  91. +/*********************************************************************
  92. +* Flash is splited into 2 parts, system part is for normal system *
  93. +* system usage, size is system_block_count, another is replace pool *
  94. +* +-------------------------------------------------+ *
  95. +* | system_block_count | bmt_block_count | *
  96. +* +-------------------------------------------------+ *
  97. +*********************************************************************/
  98. +static u32 total_block_count; // block number in flash
  99. +static u32 system_block_count;
  100. +static int bmt_block_count; // bmt table size
  101. +// static int bmt_count; // block used in bmt
  102. +static int page_per_block; // page per count
  103. +
  104. +static u32 bmt_block_index; // bmt block index
  105. +static bmt_struct bmt; // dynamic created global bmt table
  106. +
  107. +static u8 dat_buf[MAX_DAT_SIZE];
  108. +static u8 oob_buf[MAX_OOB_SIZE];
  109. +static bool pool_erased;
  110. +
  111. +/***************************************************************
  112. +*
  113. +* Interface adaptor for preloader/uboot/kernel
  114. +* These interfaces operate on physical address, read/write
  115. +* physical data.
  116. +*
  117. +***************************************************************/
  118. +int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob)
  119. +{
  120. + return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
  121. +}
  122. +
  123. +bool nand_block_bad_bmt(u32 offset)
  124. +{
  125. + return mtk_nand_block_bad_hw(mtd_bmt, offset);
  126. +}
  127. +
  128. +bool nand_erase_bmt(u32 offset)
  129. +{
  130. + int status;
  131. + if (offset < 0x20000)
  132. + {
  133. + MSG(INIT, "erase offset: 0x%x\n", offset);
  134. + }
  135. +
  136. + status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined
  137. + if (status & NAND_STATUS_FAIL)
  138. + return false;
  139. + else
  140. + return true;
  141. +}
  142. +
  143. +int mark_block_bad_bmt(u32 offset)
  144. +{
  145. + return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset);
  146. +}
  147. +
  148. +bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob)
  149. +{
  150. + if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
  151. + return false;
  152. + else
  153. + return true;
  154. +}
  155. +
  156. +/***************************************************************
  157. +* *
  158. +* static internal function *
  159. +* *
  160. +***************************************************************/
  161. +static void dump_bmt_info(bmt_struct * bmt)
  162. +{
  163. + int i;
  164. +
  165. + MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
  166. + for (i = 0; i < bmt->mapped_count; i++)
  167. + {
  168. + MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
  169. + }
  170. +}
  171. +
  172. +static bool match_bmt_signature(u8 * dat, u8 * oob)
  173. +{
  174. +
  175. + if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
  176. + {
  177. + return false;
  178. + }
  179. +
  180. + if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
  181. + {
  182. + MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
  183. + }
  184. + return true;
  185. +}
  186. +
  187. +static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size)
  188. +{
  189. + int i;
  190. + u8 checksum = 0;
  191. + u8 *dat = (u8 *) phys_table;
  192. +
  193. + checksum += phys_table->header.version;
  194. + checksum += phys_table->header.mapped_count;
  195. +
  196. + dat += sizeof(phys_bmt_header);
  197. + for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
  198. + {
  199. + checksum += dat[i];
  200. + }
  201. +
  202. + return checksum;
  203. +}
  204. +
  205. +
  206. +static int is_block_mapped(int index)
  207. +{
  208. + int i;
  209. + for (i = 0; i < bmt.mapped_count; i++)
  210. + {
  211. + if (index == bmt.table[i].mapped_index)
  212. + return i;
  213. + }
  214. + return -1;
  215. +}
  216. +
  217. +static bool is_page_used(u8 * dat, u8 * oob)
  218. +{
  219. + return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
  220. +}
  221. +
  222. +static bool valid_bmt_data(phys_bmt_struct * phys_table)
  223. +{
  224. + int i;
  225. + u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
  226. +
  227. + // checksum correct?
  228. + if (phys_table->header.checksum != checksum)
  229. + {
  230. + MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
  231. + return false;
  232. + }
  233. +
  234. + MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
  235. +
  236. + // block index correct?
  237. + for (i = 0; i < phys_table->header.mapped_count; i++)
  238. + {
  239. + if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count)
  240. + {
  241. + MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
  242. + return false;
  243. + }
  244. + }
  245. +
  246. + // pass check, valid bmt.
  247. + MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
  248. + return true;
  249. +}
  250. +
  251. +static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob)
  252. +{
  253. + phys_bmt_struct phys_bmt;
  254. +
  255. + dump_bmt_info(bmt);
  256. +
  257. + // fill phys_bmt_struct structure with bmt_struct
  258. + memset(&phys_bmt, 0xFF, sizeof(phys_bmt));
  259. +
  260. + memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
  261. + phys_bmt.header.version = BMT_VERSION;
  262. + // phys_bmt.header.bad_count = bmt->bad_count;
  263. + phys_bmt.header.mapped_count = bmt->mapped_count;
  264. + memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
  265. +
  266. + phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count);
  267. +
  268. + memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt));
  269. + memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
  270. +}
  271. +
  272. +// return valid index if found BMT, else return 0
  273. +static int load_bmt_data(int start, int pool_size)
  274. +{
  275. + int bmt_index = start + pool_size - 1; // find from the end
  276. + phys_bmt_struct phys_table;
  277. + int i;
  278. +
  279. + MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
  280. +
  281. + for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
  282. + {
  283. + if (nand_block_bad_bmt(OFFSET(bmt_index)))
  284. + {
  285. + MSG(INIT, "Skip bad block: %d\n", bmt_index);
  286. + continue;
  287. + }
  288. +
  289. + if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
  290. + {
  291. + MSG(INIT, "Error found when read block %d\n", bmt_index);
  292. + continue;
  293. + }
  294. +
  295. + if (!match_bmt_signature(dat_buf, oob_buf))
  296. + {
  297. + continue;
  298. + }
  299. +
  300. + MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
  301. +
  302. + memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table));
  303. +
  304. + if (!valid_bmt_data(&phys_table))
  305. + {
  306. + MSG(INIT, "BMT data is not correct %d\n", bmt_index);
  307. + continue;
  308. + } else
  309. + {
  310. + bmt.mapped_count = phys_table.header.mapped_count;
  311. + bmt.version = phys_table.header.version;
  312. + // bmt.bad_count = phys_table.header.bad_count;
  313. + memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry));
  314. +
  315. + MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
  316. +
  317. + for (i = 0; i < bmt.mapped_count; i++)
  318. + {
  319. + if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
  320. + {
  321. + MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
  322. + mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
  323. + }
  324. + }
  325. +
  326. + return bmt_index;
  327. + }
  328. + }
  329. +
  330. + MSG(INIT, "bmt block not found!\n");
  331. + return 0;
  332. +}
  333. +
  334. +/*************************************************************************
  335. +* Find an available block and erase. *
  336. +* start_from_end: if true, find available block from end of flash. *
  337. +* else, find from the beginning of the pool *
  338. +* need_erase: if true, all unmapped blocks in the pool will be erased *
  339. +*************************************************************************/
  340. +static int find_available_block(bool start_from_end)
  341. +{
  342. + int i; // , j;
  343. + int block = system_block_count;
  344. + int direction;
  345. + // int avail_index = 0;
  346. + MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
  347. +
  348. + // erase all un-mapped blocks in pool when finding avaliable block
  349. + if (!pool_erased)
  350. + {
  351. + MSG(INIT, "Erase all un-mapped blocks in pool\n");
  352. + for (i = 0; i < bmt_block_count; i++)
  353. + {
  354. + if (block == bmt_block_index)
  355. + {
  356. + MSG(INIT, "Skip bmt block 0x%x\n", block);
  357. + continue;
  358. + }
  359. +
  360. + if (nand_block_bad_bmt(OFFSET(block + i)))
  361. + {
  362. + MSG(INIT, "Skip bad block 0x%x\n", block + i);
  363. + continue;
  364. + }
  365. +//if(block==4095)
  366. +//{
  367. +// continue;
  368. +//}
  369. +
  370. + if (is_block_mapped(block + i) >= 0)
  371. + {
  372. + MSG(INIT, "Skip mapped block 0x%x\n", block + i);
  373. + continue;
  374. + }
  375. +
  376. + if (!nand_erase_bmt(OFFSET(block + i)))
  377. + {
  378. + MSG(INIT, "Erase block 0x%x failed\n", block + i);
  379. + mark_block_bad_bmt(OFFSET(block + i));
  380. + }
  381. + }
  382. +
  383. + pool_erased = 1;
  384. + }
  385. +
  386. + if (start_from_end)
  387. + {
  388. + block = total_block_count - 1;
  389. + direction = -1;
  390. + } else
  391. + {
  392. + block = system_block_count;
  393. + direction = 1;
  394. + }
  395. +
  396. + for (i = 0; i < bmt_block_count; i++, block += direction)
  397. + {
  398. + if (block == bmt_block_index)
  399. + {
  400. + MSG(INIT, "Skip bmt block 0x%x\n", block);
  401. + continue;
  402. + }
  403. +
  404. + if (nand_block_bad_bmt(OFFSET(block)))
  405. + {
  406. + MSG(INIT, "Skip bad block 0x%x\n", block);
  407. + continue;
  408. + }
  409. +
  410. + if (is_block_mapped(block) >= 0)
  411. + {
  412. + MSG(INIT, "Skip mapped block 0x%x\n", block);
  413. + continue;
  414. + }
  415. +
  416. + MSG(INIT, "Find block 0x%x available\n", block);
  417. + return block;
  418. + }
  419. +
  420. + return 0;
  421. +}
  422. +
  423. +static unsigned short get_bad_index_from_oob(u8 * oob_buf)
  424. +{
  425. + unsigned short index;
  426. + memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
  427. +
  428. + return index;
  429. +}
  430. +
  431. +void set_bad_index_to_oob(u8 * oob, u16 index)
  432. +{
  433. + memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
  434. +}
  435. +
  436. +static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob)
  437. +{
  438. + int page;
  439. + int error_block = offset / BLOCK_SIZE_BMT;
  440. + int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
  441. + int to_index;
  442. +
  443. + memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
  444. +
  445. + to_index = find_available_block(false);
  446. +
  447. + if (!to_index)
  448. + {
  449. + MSG(INIT, "Cannot find an available block for BMT\n");
  450. + return 0;
  451. + }
  452. +
  453. + { // migrate error page first
  454. + MSG(INIT, "Write error page: 0x%x\n", error_page);
  455. + if (!write_dat)
  456. + {
  457. + nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
  458. + write_dat = dat_buf;
  459. + }
  460. + // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
  461. +
  462. + if (error_block < system_block_count)
  463. + set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
  464. +
  465. + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
  466. + {
  467. + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
  468. + mark_block_bad_bmt(to_index);
  469. + return migrate_from_bad(offset, write_dat, write_oob);
  470. + }
  471. + }
  472. +
  473. + for (page = 0; page < page_per_block; page++)
  474. + {
  475. + if (page != error_page)
  476. + {
  477. + nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
  478. + if (is_page_used(dat_buf, oob_buf))
  479. + {
  480. + if (error_block < system_block_count)
  481. + {
  482. + set_bad_index_to_oob(oob_buf, error_block);
  483. + }
  484. + MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
  485. + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
  486. + {
  487. + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
  488. + mark_block_bad_bmt(to_index);
  489. + return migrate_from_bad(offset, write_dat, write_oob);
  490. + }
  491. + }
  492. + }
  493. + }
  494. +
  495. + MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
  496. +
  497. + return to_index;
  498. +}
  499. +
  500. +static bool write_bmt_to_flash(u8 * dat, u8 * oob)
  501. +{
  502. + bool need_erase = true;
  503. + MSG(INIT, "Try to write BMT\n");
  504. +
  505. + if (bmt_block_index == 0)
  506. + {
  507. + // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
  508. + need_erase = false;
  509. + if (!(bmt_block_index = find_available_block(true)))
  510. + {
  511. + MSG(INIT, "Cannot find an available block for BMT\n");
  512. + return false;
  513. + }
  514. + }
  515. +
  516. + MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
  517. +
  518. + // write bmt to flash
  519. + if (need_erase)
  520. + {
  521. + if (!nand_erase_bmt(OFFSET(bmt_block_index)))
  522. + {
  523. + MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
  524. + mark_block_bad_bmt(OFFSET(bmt_block_index));
  525. + // bmt.bad_count++;
  526. +
  527. + bmt_block_index = 0;
  528. + return write_bmt_to_flash(dat, oob); // recursive call
  529. + }
  530. + }
  531. +
  532. + if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob))
  533. + {
  534. + MSG(INIT, "Write BMT data fail, need to write again\n");
  535. + mark_block_bad_bmt(OFFSET(bmt_block_index));
  536. + // bmt.bad_count++;
  537. +
  538. + bmt_block_index = 0;
  539. + return write_bmt_to_flash(dat, oob); // recursive call
  540. + }
  541. +
  542. + MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
  543. + return true;
  544. +}
  545. +
  546. +/*******************************************************************
  547. +* Reconstruct bmt, called when found bmt info doesn't match bad
  548. +* block info in flash.
  549. +*
  550. +* Return NULL for failure
  551. +*******************************************************************/
  552. +bmt_struct *reconstruct_bmt(bmt_struct * bmt)
  553. +{
  554. + int i;
  555. + int index = system_block_count;
  556. + unsigned short bad_index;
  557. + int mapped;
  558. +
  559. + // init everything in BMT struct
  560. + bmt->version = BMT_VERSION;
  561. + bmt->bad_count = 0;
  562. + bmt->mapped_count = 0;
  563. +
  564. + memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
  565. +
  566. + for (i = 0; i < bmt_block_count; i++, index++)
  567. + {
  568. + if (nand_block_bad_bmt(OFFSET(index)))
  569. + {
  570. + MSG(INIT, "Skip bad block: 0x%x\n", index);
  571. + // bmt->bad_count++;
  572. + continue;
  573. + }
  574. +
  575. + MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
  576. + nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
  577. + /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
  578. + {
  579. + MSG(INIT, "Error when read block %d\n", bmt_block_index);
  580. + continue;
  581. + } */
  582. +
  583. + if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
  584. + {
  585. + MSG(INIT, "get bad index: 0x%x\n", bad_index);
  586. + if (bad_index != 0xFFFF)
  587. + MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
  588. + continue;
  589. + }
  590. +
  591. + MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
  592. +
  593. + if (!nand_block_bad_bmt(OFFSET(bad_index)))
  594. + {
  595. + MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
  596. + continue; // no need to erase here, it will be erased later when trying to write BMT
  597. + }
  598. +
  599. + if ((mapped = is_block_mapped(bad_index)) >= 0)
  600. + {
  601. + MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
  602. + bmt->table[mapped].mapped_index = index; // use new one instead.
  603. + } else
  604. + {
  605. + // add mapping to BMT
  606. + bmt->table[bmt->mapped_count].bad_index = bad_index;
  607. + bmt->table[bmt->mapped_count].mapped_index = index;
  608. + bmt->mapped_count++;
  609. + }
  610. +
  611. + MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
  612. +
  613. + }
  614. +
  615. + MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
  616. + // dump_bmt_info(bmt);
  617. +
  618. + // fill NAND BMT buffer
  619. + memset(oob_buf, 0xFF, sizeof(oob_buf));
  620. + fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
  621. +
  622. + // write BMT back
  623. + if (!write_bmt_to_flash(dat_buf, oob_buf))
  624. + {
  625. + MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
  626. + }
  627. +
  628. + return bmt;
  629. +}
  630. +
  631. +/*******************************************************************
  632. +* [BMT Interface]
  633. +*
  634. +* Description:
  635. +* Init bmt from nand. Reconstruct if not found or data error
  636. +*
  637. +* Parameter:
  638. +* size: size of bmt and replace pool
  639. +*
  640. +* Return:
  641. +* NULL for failure, and a bmt struct for success
  642. +*******************************************************************/
  643. +bmt_struct *init_bmt(struct nand_chip * chip, int size)
  644. +{
  645. + struct mtk_nand_host *host;
  646. +
  647. + if (size > 0 && size < MAX_BMT_SIZE)
  648. + {
  649. + MSG(INIT, "Init bmt table, size: %d\n", size);
  650. + bmt_block_count = size;
  651. + } else
  652. + {
  653. + MSG(INIT, "Invalid bmt table size: %d\n", size);
  654. + return NULL;
  655. + }
  656. + nand_chip_bmt = chip;
  657. + system_block_count = chip->chipsize >> chip->phys_erase_shift;
  658. + total_block_count = bmt_block_count + system_block_count;
  659. + page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
  660. + host = (struct mtk_nand_host *)chip->priv;
  661. + mtd_bmt = &host->mtd;
  662. +
  663. + MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
  664. + MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
  665. +
  666. + // set this flag, and unmapped block in pool will be erased.
  667. + pool_erased = 0;
  668. + memset(bmt.table, 0, size * sizeof(bmt_entry));
  669. + if ((bmt_block_index = load_bmt_data(system_block_count, size)))
  670. + {
  671. + MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
  672. + dump_bmt_info(&bmt);
  673. + return &bmt;
  674. + } else
  675. + {
  676. + MSG(INIT, "Load bmt data fail, need re-construct!\n");
  677. +#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT.
  678. + if (reconstruct_bmt(&bmt))
  679. + return &bmt;
  680. + else
  681. +#endif
  682. + return NULL;
  683. + }
  684. +}
  685. +
  686. +/*******************************************************************
  687. +* [BMT Interface]
  688. +*
  689. +* Description:
  690. +* Update BMT.
  691. +*
  692. +* Parameter:
  693. +* offset: update block/page offset.
  694. +* reason: update reason, see update_reason_t for reason.
  695. +* dat/oob: data and oob buffer for write fail.
  696. +*
  697. +* Return:
  698. +* Return true for success, and false for failure.
  699. +*******************************************************************/
  700. +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob)
  701. +{
  702. + int map_index;
  703. + int orig_bad_block = -1;
  704. + // int bmt_update_index;
  705. + int i;
  706. + int bad_index = offset / BLOCK_SIZE_BMT;
  707. +
  708. +#ifndef MTK_NAND_BMT
  709. + return false;
  710. +#endif
  711. + if (reason == UPDATE_WRITE_FAIL)
  712. + {
  713. + MSG(INIT, "Write fail, need to migrate\n");
  714. + if (!(map_index = migrate_from_bad(offset, dat, oob)))
  715. + {
  716. + MSG(INIT, "migrate fail\n");
  717. + return false;
  718. + }
  719. + } else
  720. + {
  721. + if (!(map_index = find_available_block(false)))
  722. + {
  723. + MSG(INIT, "Cannot find block in pool\n");
  724. + return false;
  725. + }
  726. + }
  727. +
  728. + // now let's update BMT
  729. + if (bad_index >= system_block_count) // mapped block become bad, find original bad block
  730. + {
  731. + for (i = 0; i < bmt_block_count; i++)
  732. + {
  733. + if (bmt.table[i].mapped_index == bad_index)
  734. + {
  735. + orig_bad_block = bmt.table[i].bad_index;
  736. + break;
  737. + }
  738. + }
  739. + // bmt.bad_count++;
  740. + MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
  741. +
  742. + bmt.table[i].mapped_index = map_index;
  743. + } else
  744. + {
  745. + bmt.table[bmt.mapped_count].mapped_index = map_index;
  746. + bmt.table[bmt.mapped_count].bad_index = bad_index;
  747. + bmt.mapped_count++;
  748. + }
  749. +
  750. + memset(oob_buf, 0xFF, sizeof(oob_buf));
  751. + fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
  752. + if (!write_bmt_to_flash(dat_buf, oob_buf))
  753. + return false;
  754. +
  755. + mark_block_bad_bmt(offset);
  756. +
  757. + return true;
  758. +}
  759. +
  760. +/*******************************************************************
  761. +* [BMT Interface]
  762. +*
  763. +* Description:
  764. +* Given an block index, return mapped index if it's mapped, else
  765. +* return given index.
  766. +*
  767. +* Parameter:
  768. +* index: given an block index. This value cannot exceed
  769. +* system_block_count.
  770. +*
  771. +* Return NULL for failure
  772. +*******************************************************************/
  773. +u16 get_mapping_block_index(int index)
  774. +{
  775. + int i;
  776. +#ifndef MTK_NAND_BMT
  777. + return index;
  778. +#endif
  779. + if (index > system_block_count)
  780. + {
  781. + return index;
  782. + }
  783. +
  784. + for (i = 0; i < bmt.mapped_count; i++)
  785. + {
  786. + if (bmt.table[i].bad_index == index)
  787. + {
  788. + return bmt.table[i].mapped_index;
  789. + }
  790. + }
  791. +
  792. + return index;
  793. +}
  794. +#ifdef __KERNEL_NAND__
  795. +EXPORT_SYMBOL_GPL(init_bmt);
  796. +EXPORT_SYMBOL_GPL(update_bmt);
  797. +EXPORT_SYMBOL_GPL(get_mapping_block_index);
  798. +
  799. +MODULE_LICENSE("GPL");
  800. +MODULE_AUTHOR("MediaTek");
  801. +MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");
  802. +#endif
  803. --- /dev/null
  804. +++ b/drivers/mtd/nand/bmt.h
  805. @@ -0,0 +1,80 @@
  806. +#ifndef __BMT_H__
  807. +#define __BMT_H__
  808. +
  809. +#include "nand_def.h"
  810. +
  811. +#if defined(__PRELOADER_NAND__)
  812. +
  813. +#include "nand.h"
  814. +
  815. +#elif defined(__UBOOT_NAND__)
  816. +
  817. +#include <linux/mtd/nand.h>
  818. +#include "mtk_nand.h"
  819. +
  820. +#elif defined(__KERNEL_NAND__)
  821. +
  822. +#include <linux/mtd/mtd.h>
  823. +#include <linux/mtd/nand.h>
  824. +#include <linux/module.h>
  825. +#include "mtk_nand.h"
  826. +
  827. +#endif
  828. +
  829. +
  830. +#define MAX_BMT_SIZE (0x80)
  831. +#define BMT_VERSION (1) // initial version
  832. +
  833. +#define MAIN_SIGNATURE_OFFSET (0)
  834. +#define OOB_SIGNATURE_OFFSET (1)
  835. +#define OOB_INDEX_OFFSET (29)
  836. +#define OOB_INDEX_SIZE (2)
  837. +#define FAKE_INDEX (0xAAAA)
  838. +
  839. +typedef struct _bmt_entry_
  840. +{
  841. + u16 bad_index; // bad block index
  842. + u16 mapped_index; // mapping block index in the replace pool
  843. +} bmt_entry;
  844. +
  845. +typedef enum
  846. +{
  847. + UPDATE_ERASE_FAIL,
  848. + UPDATE_WRITE_FAIL,
  849. + UPDATE_UNMAPPED_BLOCK,
  850. + UPDATE_REASON_COUNT,
  851. +} update_reason_t;
  852. +
  853. +typedef struct
  854. +{
  855. + bmt_entry table[MAX_BMT_SIZE];
  856. + u8 version;
  857. + u8 mapped_count; // mapped block count in pool
  858. + u8 bad_count; // bad block count in pool. Not used in V1
  859. +} bmt_struct;
  860. +
  861. +/***************************************************************
  862. +* *
  863. +* Interface BMT need to use *
  864. +* *
  865. +***************************************************************/
  866. +extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
  867. +extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs);
  868. +extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page);
  869. +extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs);
  870. +extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
  871. +
  872. +
  873. +/***************************************************************
  874. +* *
  875. +* Different function interface for preloader/uboot/kernel *
  876. +* *
  877. +***************************************************************/
  878. +void set_bad_index_to_oob(u8 * oob, u16 index);
  879. +
  880. +
  881. +bmt_struct *init_bmt(struct nand_chip *nand, int size);
  882. +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob);
  883. +unsigned short get_mapping_block_index(int index);
  884. +
  885. +#endif // #ifndef __BMT_H__
  886. --- /dev/null
  887. +++ b/drivers/mtd/nand/dev-nand.c
  888. @@ -0,0 +1,63 @@
  889. +#include <linux/init.h>
  890. +#include <linux/kernel.h>
  891. +#include <linux/platform_device.h>
  892. +
  893. +#include "mt6575_typedefs.h"
  894. +
  895. +#define RALINK_NAND_CTRL_BASE 0xBE003000
  896. +#define NFI_base RALINK_NAND_CTRL_BASE
  897. +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
  898. +#define NFIECC_base RALINK_NANDECC_CTRL_BASE
  899. +#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND
  900. +#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC
  901. +
  902. +#define SURFBOARDINT_NAND 22
  903. +#define SURFBOARDINT_NAND_ECC 23
  904. +
  905. +static struct resource MT7621_resource_nand[] = {
  906. + {
  907. + .start = NFI_base,
  908. + .end = NFI_base + 0x1A0,
  909. + .flags = IORESOURCE_MEM,
  910. + },
  911. + {
  912. + .start = NFIECC_base,
  913. + .end = NFIECC_base + 0x150,
  914. + .flags = IORESOURCE_MEM,
  915. + },
  916. + {
  917. + .start = MT7621_NFI_IRQ_ID,
  918. + .flags = IORESOURCE_IRQ,
  919. + },
  920. + {
  921. + .start = MT7621_NFIECC_IRQ_ID,
  922. + .flags = IORESOURCE_IRQ,
  923. + },
  924. +};
  925. +
  926. +static struct platform_device MT7621_nand_dev = {
  927. + .name = "MT7621-NAND",
  928. + .id = 0,
  929. + .num_resources = ARRAY_SIZE(MT7621_resource_nand),
  930. + .resource = MT7621_resource_nand,
  931. + .dev = {
  932. + .platform_data = &mt7621_nand_hw,
  933. + },
  934. +};
  935. +
  936. +
  937. +int __init mtk_nand_register(void)
  938. +{
  939. +
  940. + int retval = 0;
  941. +
  942. + retval = platform_device_register(&MT7621_nand_dev);
  943. + if (retval != 0) {
  944. + printk(KERN_ERR "register nand device fail\n");
  945. + return retval;
  946. + }
  947. +
  948. +
  949. + return retval;
  950. +}
  951. +arch_initcall(mtk_nand_register);
  952. --- /dev/null
  953. +++ b/drivers/mtd/nand/mt6575_typedefs.h
  954. @@ -0,0 +1,340 @@
  955. +/* Copyright Statement:
  956. + *
  957. + * This software/firmware and related documentation ("MediaTek Software") are
  958. + * protected under relevant copyright laws. The information contained herein
  959. + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
  960. + * Without the prior written permission of MediaTek inc. and/or its licensors,
  961. + * any reproduction, modification, use or disclosure of MediaTek Software,
  962. + * and information contained herein, in whole or in part, shall be strictly prohibited.
  963. + */
  964. +/* MediaTek Inc. (C) 2010. All rights reserved.
  965. + *
  966. + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
  967. + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
  968. + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
  969. + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
  970. + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
  971. + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
  972. + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
  973. + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
  974. + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
  975. + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
  976. + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
  977. + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
  978. + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
  979. + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
  980. + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
  981. + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
  982. + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
  983. + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
  984. + *
  985. + * The following software/firmware and/or related documentation ("MediaTek Software")
  986. + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
  987. + * applicable license agreements with MediaTek Inc.
  988. + */
  989. +
  990. +/*****************************************************************************
  991. +* Copyright Statement:
  992. +* --------------------
  993. +* This software is protected by Copyright and the information contained
  994. +* herein is confidential. The software may not be copied and the information
  995. +* contained herein may not be used or disclosed except with the written
  996. +* permission of MediaTek Inc. (C) 2008
  997. +*
  998. +* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
  999. +* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
  1000. +* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
  1001. +* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
  1002. +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
  1003. +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
  1004. +* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
  1005. +* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
  1006. +* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
  1007. +* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
  1008. +* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
  1009. +* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
  1010. +*
  1011. +* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
  1012. +* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
  1013. +* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
  1014. +* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
  1015. +* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
  1016. +*
  1017. +* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
  1018. +* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
  1019. +* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
  1020. +* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
  1021. +* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
  1022. +*
  1023. +*****************************************************************************/
  1024. +
  1025. +#ifndef _MT6575_TYPEDEFS_H
  1026. +#define _MT6575_TYPEDEFS_H
  1027. +
  1028. +#if defined (__KERNEL_NAND__)
  1029. +#include <linux/bug.h>
  1030. +#else
  1031. +#define true 1
  1032. +#define false 0
  1033. +#define bool u8
  1034. +#endif
  1035. +
  1036. +// ---------------------------------------------------------------------------
  1037. +// Basic Type Definitions
  1038. +// ---------------------------------------------------------------------------
  1039. +
  1040. +typedef volatile unsigned char *P_kal_uint8;
  1041. +typedef volatile unsigned short *P_kal_uint16;
  1042. +typedef volatile unsigned int *P_kal_uint32;
  1043. +
  1044. +typedef long LONG;
  1045. +typedef unsigned char UBYTE;
  1046. +typedef short SHORT;
  1047. +
  1048. +typedef signed char kal_int8;
  1049. +typedef signed short kal_int16;
  1050. +typedef signed int kal_int32;
  1051. +typedef long long kal_int64;
  1052. +typedef unsigned char kal_uint8;
  1053. +typedef unsigned short kal_uint16;
  1054. +typedef unsigned int kal_uint32;
  1055. +typedef unsigned long long kal_uint64;
  1056. +typedef char kal_char;
  1057. +
  1058. +typedef unsigned int *UINT32P;
  1059. +typedef volatile unsigned short *UINT16P;
  1060. +typedef volatile unsigned char *UINT8P;
  1061. +typedef unsigned char *U8P;
  1062. +
  1063. +typedef volatile unsigned char *P_U8;
  1064. +typedef volatile signed char *P_S8;
  1065. +typedef volatile unsigned short *P_U16;
  1066. +typedef volatile signed short *P_S16;
  1067. +typedef volatile unsigned int *P_U32;
  1068. +typedef volatile signed int *P_S32;
  1069. +typedef unsigned long long *P_U64;
  1070. +typedef signed long long *P_S64;
  1071. +
  1072. +typedef unsigned char U8;
  1073. +typedef signed char S8;
  1074. +typedef unsigned short U16;
  1075. +typedef signed short S16;
  1076. +typedef unsigned int U32;
  1077. +typedef signed int S32;
  1078. +typedef unsigned long long U64;
  1079. +typedef signed long long S64;
  1080. +//typedef unsigned char bool;
  1081. +
  1082. +typedef unsigned char UINT8;
  1083. +typedef unsigned short UINT16;
  1084. +typedef unsigned int UINT32;
  1085. +typedef unsigned short USHORT;
  1086. +typedef signed char INT8;
  1087. +typedef signed short INT16;
  1088. +typedef signed int INT32;
  1089. +typedef unsigned int DWORD;
  1090. +typedef void VOID;
  1091. +typedef unsigned char BYTE;
  1092. +typedef float FLOAT;
  1093. +
  1094. +typedef char *LPCSTR;
  1095. +typedef short *LPWSTR;
  1096. +
  1097. +
  1098. +// ---------------------------------------------------------------------------
  1099. +// Constants
  1100. +// ---------------------------------------------------------------------------
  1101. +
  1102. +#define IMPORT EXTERN
  1103. +#ifndef __cplusplus
  1104. + #define EXTERN extern
  1105. +#else
  1106. + #define EXTERN extern "C"
  1107. +#endif
  1108. +#define LOCAL static
  1109. +#define GLOBAL
  1110. +#define EXPORT GLOBAL
  1111. +
  1112. +#define EQ ==
  1113. +#define NEQ !=
  1114. +#define AND &&
  1115. +#define OR ||
  1116. +#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B)))
  1117. +
  1118. +#ifndef FALSE
  1119. + #define FALSE (0)
  1120. +#endif
  1121. +
  1122. +#ifndef TRUE
  1123. + #define TRUE (1)
  1124. +#endif
  1125. +
  1126. +#ifndef NULL
  1127. + #define NULL (0)
  1128. +#endif
  1129. +
  1130. +//enum boolean {false, true};
  1131. +enum {RX, TX, NONE};
  1132. +
  1133. +#ifndef BOOL
  1134. +typedef unsigned char BOOL;
  1135. +#endif
  1136. +
  1137. +typedef enum {
  1138. + KAL_FALSE = 0,
  1139. + KAL_TRUE = 1,
  1140. +} kal_bool;
  1141. +
  1142. +
  1143. +// ---------------------------------------------------------------------------
  1144. +// Type Casting
  1145. +// ---------------------------------------------------------------------------
  1146. +
  1147. +#define AS_INT32(x) (*(INT32 *)((void*)x))
  1148. +#define AS_INT16(x) (*(INT16 *)((void*)x))
  1149. +#define AS_INT8(x) (*(INT8 *)((void*)x))
  1150. +
  1151. +#define AS_UINT32(x) (*(UINT32 *)((void*)x))
  1152. +#define AS_UINT16(x) (*(UINT16 *)((void*)x))
  1153. +#define AS_UINT8(x) (*(UINT8 *)((void*)x))
  1154. +
  1155. +
  1156. +// ---------------------------------------------------------------------------
  1157. +// Register Manipulations
  1158. +// ---------------------------------------------------------------------------
  1159. +
  1160. +#define READ_REGISTER_UINT32(reg) \
  1161. + (*(volatile UINT32 * const)(reg))
  1162. +
  1163. +#define WRITE_REGISTER_UINT32(reg, val) \
  1164. + (*(volatile UINT32 * const)(reg)) = (val)
  1165. +
  1166. +#define READ_REGISTER_UINT16(reg) \
  1167. + (*(volatile UINT16 * const)(reg))
  1168. +
  1169. +#define WRITE_REGISTER_UINT16(reg, val) \
  1170. + (*(volatile UINT16 * const)(reg)) = (val)
  1171. +
  1172. +#define READ_REGISTER_UINT8(reg) \
  1173. + (*(volatile UINT8 * const)(reg))
  1174. +
  1175. +#define WRITE_REGISTER_UINT8(reg, val) \
  1176. + (*(volatile UINT8 * const)(reg)) = (val)
  1177. +
  1178. +#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x)))
  1179. +#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y))
  1180. +#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y))
  1181. +#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y))
  1182. +#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z))
  1183. +
  1184. +#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x)))
  1185. +#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y))
  1186. +#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y))
  1187. +#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y))
  1188. +#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z))
  1189. +
  1190. +#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x)))
  1191. +#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y))
  1192. +#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
  1193. +#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
  1194. +#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
  1195. +
  1196. +
  1197. +#define DRV_Reg8(addr) INREG8(addr)
  1198. +#define DRV_WriteReg8(addr, data) OUTREG8(addr, data)
  1199. +#define DRV_SetReg8(addr, data) SETREG8(addr, data)
  1200. +#define DRV_ClrReg8(addr, data) CLRREG8(addr, data)
  1201. +
  1202. +#define DRV_Reg16(addr) INREG16(addr)
  1203. +#define DRV_WriteReg16(addr, data) OUTREG16(addr, data)
  1204. +#define DRV_SetReg16(addr, data) SETREG16(addr, data)
  1205. +#define DRV_ClrReg16(addr, data) CLRREG16(addr, data)
  1206. +
  1207. +#define DRV_Reg32(addr) INREG32(addr)
  1208. +#define DRV_WriteReg32(addr, data) OUTREG32(addr, data)
  1209. +#define DRV_SetReg32(addr, data) SETREG32(addr, data)
  1210. +#define DRV_ClrReg32(addr, data) CLRREG32(addr, data)
  1211. +
  1212. +// !!! DEPRECATED, WILL BE REMOVED LATER !!!
  1213. +#define DRV_Reg(addr) DRV_Reg16(addr)
  1214. +#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data)
  1215. +#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data)
  1216. +#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data)
  1217. +
  1218. +
  1219. +// ---------------------------------------------------------------------------
  1220. +// Compiler Time Deduction Macros
  1221. +// ---------------------------------------------------------------------------
  1222. +
  1223. +#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) :
  1224. +#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1)
  1225. +#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2)
  1226. +#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4)
  1227. +#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8)
  1228. +#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16)
  1229. +
  1230. +#define MASK_OFFSET_ERROR (0xFFFFFFFF)
  1231. +
  1232. +#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR)
  1233. +
  1234. +
  1235. +// ---------------------------------------------------------------------------
  1236. +// Assertions
  1237. +// ---------------------------------------------------------------------------
  1238. +
  1239. +#ifndef ASSERT
  1240. + #define ASSERT(expr) BUG_ON(!(expr))
  1241. +#endif
  1242. +
  1243. +#ifndef NOT_IMPLEMENTED
  1244. + #define NOT_IMPLEMENTED() BUG_ON(1)
  1245. +#endif
  1246. +
  1247. +#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__)
  1248. +#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line)
  1249. +#define STATIC_ASSERT_XX(pred, line) \
  1250. + extern char assertion_failed_at_##line[(pred) ? 1 : -1]
  1251. +
  1252. +// ---------------------------------------------------------------------------
  1253. +// Resolve Compiler Warnings
  1254. +// ---------------------------------------------------------------------------
  1255. +
  1256. +#define NOT_REFERENCED(x) { (x) = (x); }
  1257. +
  1258. +
  1259. +// ---------------------------------------------------------------------------
  1260. +// Utilities
  1261. +// ---------------------------------------------------------------------------
  1262. +
  1263. +#define MAXIMUM(A,B) (((A)>(B))?(A):(B))
  1264. +#define MINIMUM(A,B) (((A)<(B))?(A):(B))
  1265. +
  1266. +#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0])))
  1267. +#define DVT_DELAYMACRO(u4Num) \
  1268. +{ \
  1269. + UINT32 u4Count = 0 ; \
  1270. + for (u4Count = 0; u4Count < u4Num; u4Count++ ); \
  1271. +} \
  1272. +
  1273. +#define A68351B 0
  1274. +#define B68351B 1
  1275. +#define B68351D 2
  1276. +#define B68351E 3
  1277. +#define UNKNOWN_IC_VERSION 0xFF
  1278. +
  1279. +/* NAND driver */
  1280. +struct mtk_nand_host_hw {
  1281. + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
  1282. + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
  1283. + unsigned int nfi_cs_num; /* NFI_CS_NUM */
  1284. + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
  1285. + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
  1286. + unsigned int nand_ecc_size;
  1287. + unsigned int nand_ecc_bytes;
  1288. + unsigned int nand_ecc_mode;
  1289. +};
  1290. +extern struct mtk_nand_host_hw mt7621_nand_hw;
  1291. +extern unsigned int CFG_BLOCKSIZE;
  1292. +
  1293. +#endif // _MT6575_TYPEDEFS_H
  1294. +
  1295. --- /dev/null
  1296. +++ b/drivers/mtd/nand/mtk_nand.c
  1297. @@ -0,0 +1,2304 @@
  1298. +/******************************************************************************
  1299. +* mtk_nand.c - MTK NAND Flash Device Driver
  1300. + *
  1301. +* Copyright 2009-2012 MediaTek Co.,Ltd.
  1302. + *
  1303. +* DESCRIPTION:
  1304. +* This file provid the other drivers nand relative functions
  1305. + *
  1306. +* modification history
  1307. +* ----------------------------------------
  1308. +* v3.0, 11 Feb 2010, mtk
  1309. +* ----------------------------------------
  1310. +******************************************************************************/
  1311. +#include "nand_def.h"
  1312. +#include <linux/slab.h>
  1313. +#include <linux/init.h>
  1314. +#include <linux/module.h>
  1315. +#include <linux/delay.h>
  1316. +#include <linux/errno.h>
  1317. +#include <linux/sched.h>
  1318. +#include <linux/types.h>
  1319. +#include <linux/wait.h>
  1320. +#include <linux/spinlock.h>
  1321. +#include <linux/interrupt.h>
  1322. +#include <linux/mtd/mtd.h>
  1323. +#include <linux/mtd/nand.h>
  1324. +#include <linux/mtd/partitions.h>
  1325. +#include <linux/mtd/nand_ecc.h>
  1326. +#include <linux/dma-mapping.h>
  1327. +#include <linux/jiffies.h>
  1328. +#include <linux/platform_device.h>
  1329. +#include <linux/proc_fs.h>
  1330. +#include <linux/time.h>
  1331. +#include <linux/mm.h>
  1332. +#include <asm/io.h>
  1333. +#include <asm/cacheflush.h>
  1334. +#include <asm/uaccess.h>
  1335. +#include <linux/miscdevice.h>
  1336. +#include "mtk_nand.h"
  1337. +#include "nand_device_list.h"
  1338. +
  1339. +#include "bmt.h"
  1340. +#include "partition.h"
  1341. +
  1342. +unsigned int CFG_BLOCKSIZE;
  1343. +
  1344. +static int shift_on_bbt = 0;
  1345. +extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag);
  1346. +extern int nand_bbt_get(struct mtd_info *mtd, int page);
  1347. +int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page);
  1348. +
  1349. +static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL };
  1350. +
  1351. +#define NAND_CMD_STATUS_MULTI 0x71
  1352. +
  1353. +void show_stack(struct task_struct *tsk, unsigned long *sp);
  1354. +extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
  1355. +extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity);
  1356. +
  1357. +struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */
  1358. +struct mtk_nand_host_hw mt7621_nand_hw = {
  1359. + .nfi_bus_width = 8,
  1360. + .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING,
  1361. + .nfi_cs_num = NFI_CS_NUM,
  1362. + .nand_sec_size = 512,
  1363. + .nand_sec_shift = 9,
  1364. + .nand_ecc_size = 2048,
  1365. + .nand_ecc_bytes = 32,
  1366. + .nand_ecc_mode = NAND_ECC_HW,
  1367. +};
  1368. +
  1369. +
  1370. +/*******************************************************************************
  1371. + * Gloable Varible Definition
  1372. + *******************************************************************************/
  1373. +
  1374. +#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
  1375. + do { \
  1376. + DRV_WriteReg(NFI_CMD_REG16,cmd);\
  1377. + while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
  1378. + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
  1379. + DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
  1380. + DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
  1381. + while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
  1382. + }while(0);
  1383. +
  1384. +//-------------------------------------------------------------------------------
  1385. +static struct NAND_CMD g_kCMD;
  1386. +static u32 g_u4ChipVer;
  1387. +bool g_bInitDone;
  1388. +static bool g_bcmdstatus;
  1389. +static u32 g_value = 0;
  1390. +static int g_page_size;
  1391. +
  1392. +BOOL g_bHwEcc = true;
  1393. +
  1394. +
  1395. +static u8 *local_buffer_16_align; // 16 byte aligned buffer, for HW issue
  1396. +static u8 local_buffer[4096 + 512];
  1397. +
  1398. +extern void nand_release_device(struct mtd_info *mtd);
  1399. +extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state);
  1400. +
  1401. +#if defined(MTK_NAND_BMT)
  1402. +static bmt_struct *g_bmt;
  1403. +#endif
  1404. +struct mtk_nand_host *host;
  1405. +extern struct mtd_partition g_pasStatic_Partition[];
  1406. +int part_num = NUM_PARTITIONS;
  1407. +int manu_id;
  1408. +int dev_id;
  1409. +
  1410. +static u8 local_oob_buf[NAND_MAX_OOBSIZE];
  1411. +
  1412. +static u8 nand_badblock_offset = 0;
  1413. +
  1414. +void nand_enable_clock(void)
  1415. +{
  1416. + //enable_clock(MT65XX_PDN_PERI_NFI, "NAND");
  1417. +}
  1418. +
  1419. +void nand_disable_clock(void)
  1420. +{
  1421. + //disable_clock(MT65XX_PDN_PERI_NFI, "NAND");
  1422. +}
  1423. +
  1424. +static struct nand_ecclayout nand_oob_16 = {
  1425. + .eccbytes = 8,
  1426. + .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
  1427. + .oobfree = {{1, 6}, {0, 0}}
  1428. +};
  1429. +
  1430. +struct nand_ecclayout nand_oob_64 = {
  1431. + .eccbytes = 32,
  1432. + .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
  1433. + 40, 41, 42, 43, 44, 45, 46, 47,
  1434. + 48, 49, 50, 51, 52, 53, 54, 55,
  1435. + 56, 57, 58, 59, 60, 61, 62, 63},
  1436. + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
  1437. +};
  1438. +
  1439. +struct nand_ecclayout nand_oob_128 = {
  1440. + .eccbytes = 64,
  1441. + .eccpos = {
  1442. + 64, 65, 66, 67, 68, 69, 70, 71,
  1443. + 72, 73, 74, 75, 76, 77, 78, 79,
  1444. + 80, 81, 82, 83, 84, 85, 86, 86,
  1445. + 88, 89, 90, 91, 92, 93, 94, 95,
  1446. + 96, 97, 98, 99, 100, 101, 102, 103,
  1447. + 104, 105, 106, 107, 108, 109, 110, 111,
  1448. + 112, 113, 114, 115, 116, 117, 118, 119,
  1449. + 120, 121, 122, 123, 124, 125, 126, 127},
  1450. + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
  1451. +};
  1452. +
  1453. +flashdev_info devinfo;
  1454. +
  1455. +void dump_nfi(void)
  1456. +{
  1457. +}
  1458. +
  1459. +void dump_ecc(void)
  1460. +{
  1461. +}
  1462. +
  1463. +u32
  1464. +nand_virt_to_phys_add(u32 va)
  1465. +{
  1466. + u32 pageOffset = (va & (PAGE_SIZE - 1));
  1467. + pgd_t *pgd;
  1468. + pmd_t *pmd;
  1469. + pte_t *pte;
  1470. + u32 pa;
  1471. +
  1472. + if (virt_addr_valid(va))
  1473. + return __virt_to_phys(va);
  1474. +
  1475. + if (NULL == current) {
  1476. + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
  1477. + return 0;
  1478. + }
  1479. +
  1480. + if (NULL == current->mm) {
  1481. + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
  1482. + return 0;
  1483. + }
  1484. +
  1485. + pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
  1486. + if (pgd_none(*pgd) || pgd_bad(*pgd)) {
  1487. + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va);
  1488. + return 0;
  1489. + }
  1490. +
  1491. + pmd = pmd_offset((pud_t *)pgd, va);
  1492. + if (pmd_none(*pmd) || pmd_bad(*pmd)) {
  1493. + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va);
  1494. + return 0;
  1495. + }
  1496. +
  1497. + pte = pte_offset_map(pmd, va);
  1498. + if (pte_present(*pte)) {
  1499. + pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
  1500. + return pa;
  1501. + }
  1502. +
  1503. + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va);
  1504. + return 0;
  1505. +}
  1506. +EXPORT_SYMBOL(nand_virt_to_phys_add);
  1507. +
  1508. +bool
  1509. +get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo)
  1510. +{
  1511. + u32 index;
  1512. + for (index = 0; gen_FlashTable[index].id != 0; index++) {
  1513. + if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) {
  1514. + pdevinfo->id = gen_FlashTable[index].id;
  1515. + pdevinfo->ext_id = gen_FlashTable[index].ext_id;
  1516. + pdevinfo->blocksize = gen_FlashTable[index].blocksize;
  1517. + pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle;
  1518. + pdevinfo->iowidth = gen_FlashTable[index].iowidth;
  1519. + pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting;
  1520. + pdevinfo->advancedmode = gen_FlashTable[index].advancedmode;
  1521. + pdevinfo->pagesize = gen_FlashTable[index].pagesize;
  1522. + pdevinfo->sparesize = gen_FlashTable[index].sparesize;
  1523. + pdevinfo->totalsize = gen_FlashTable[index].totalsize;
  1524. + memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename));
  1525. + printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id);
  1526. +
  1527. + goto find;
  1528. + }
  1529. + }
  1530. +
  1531. +find:
  1532. + if (0 == pdevinfo->id) {
  1533. + printk(KERN_INFO "Device not found, ID: %x\n", id);
  1534. + return false;
  1535. + } else {
  1536. + return true;
  1537. + }
  1538. +}
  1539. +
  1540. +static void
  1541. +ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit)
  1542. +{
  1543. + u32 u4ENCODESize;
  1544. + u32 u4DECODESize;
  1545. + u32 ecc_bit_cfg = ECC_CNFG_ECC4;
  1546. +
  1547. + switch(ecc_bit){
  1548. + case 4:
  1549. + ecc_bit_cfg = ECC_CNFG_ECC4;
  1550. + break;
  1551. + case 8:
  1552. + ecc_bit_cfg = ECC_CNFG_ECC8;
  1553. + break;
  1554. + case 10:
  1555. + ecc_bit_cfg = ECC_CNFG_ECC10;
  1556. + break;
  1557. + case 12:
  1558. + ecc_bit_cfg = ECC_CNFG_ECC12;
  1559. + break;
  1560. + default:
  1561. + break;
  1562. + }
  1563. + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
  1564. + do {
  1565. + } while (!DRV_Reg16(ECC_DECIDLE_REG16));
  1566. +
  1567. + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
  1568. + do {
  1569. + } while (!DRV_Reg32(ECC_ENCIDLE_REG32));
  1570. +
  1571. + /* setup FDM register base */
  1572. + DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
  1573. +
  1574. + /* Sector + FDM */
  1575. + u4ENCODESize = (hw->nand_sec_size + 8) << 3;
  1576. + /* Sector + FDM + YAFFS2 meta data bits */
  1577. + u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13;
  1578. +
  1579. + /* configure ECC decoder && encoder */
  1580. + DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT));
  1581. +
  1582. + DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
  1583. + NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
  1584. +}
  1585. +
  1586. +static void
  1587. +ECC_Decode_Start(void)
  1588. +{
  1589. + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
  1590. + ;
  1591. + DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
  1592. +}
  1593. +
  1594. +static void
  1595. +ECC_Decode_End(void)
  1596. +{
  1597. + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
  1598. + ;
  1599. + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
  1600. +}
  1601. +
  1602. +static void
  1603. +ECC_Encode_Start(void)
  1604. +{
  1605. + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
  1606. + ;
  1607. + mb();
  1608. + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
  1609. +}
  1610. +
  1611. +static void
  1612. +ECC_Encode_End(void)
  1613. +{
  1614. + /* wait for device returning idle */
  1615. + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
  1616. + mb();
  1617. + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
  1618. +}
  1619. +
  1620. +static bool
  1621. +mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr)
  1622. +{
  1623. + bool bRet = true;
  1624. + u16 u2SectorDoneMask = 1 << u4SecIndex;
  1625. + u32 u4ErrorNumDebug, i, u4ErrNum;
  1626. + u32 timeout = 0xFFFF;
  1627. + // int el;
  1628. + u32 au4ErrBitLoc[6];
  1629. + u32 u4ErrByteLoc, u4BitOffset;
  1630. + u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
  1631. +
  1632. + //4 // Wait for Decode Done
  1633. + while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) {
  1634. + timeout--;
  1635. + if (0 == timeout)
  1636. + return false;
  1637. + }
  1638. + /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
  1639. + memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
  1640. + u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
  1641. + u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2);
  1642. + u4ErrNum &= 0xF;
  1643. +
  1644. + if (u4ErrNum) {
  1645. + if (0xF == u4ErrNum) {
  1646. + mtd->ecc_stats.failed++;
  1647. + bRet = false;
  1648. + //printk(KERN_ERR"UnCorrectable at PageAddr=%d\n", u4PageAddr);
  1649. + } else {
  1650. + for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) {
  1651. + au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
  1652. + u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF;
  1653. + if (u4ErrBitLoc1th < 0x1000) {
  1654. + u4ErrByteLoc = u4ErrBitLoc1th / 8;
  1655. + u4BitOffset = u4ErrBitLoc1th % 8;
  1656. + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
  1657. + mtd->ecc_stats.corrected++;
  1658. + } else {
  1659. + mtd->ecc_stats.failed++;
  1660. + }
  1661. + u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF;
  1662. + if (0 != u4ErrBitLoc2nd) {
  1663. + if (u4ErrBitLoc2nd < 0x1000) {
  1664. + u4ErrByteLoc = u4ErrBitLoc2nd / 8;
  1665. + u4BitOffset = u4ErrBitLoc2nd % 8;
  1666. + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
  1667. + mtd->ecc_stats.corrected++;
  1668. + } else {
  1669. + mtd->ecc_stats.failed++;
  1670. + //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
  1671. + }
  1672. + }
  1673. + }
  1674. + }
  1675. + if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
  1676. + bRet = false;
  1677. + }
  1678. + return bRet;
  1679. +}
  1680. +
  1681. +static bool
  1682. +mtk_nand_RFIFOValidSize(u16 u2Size)
  1683. +{
  1684. + u32 timeout = 0xFFFF;
  1685. + while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) {
  1686. + timeout--;
  1687. + if (0 == timeout)
  1688. + return false;
  1689. + }
  1690. + return true;
  1691. +}
  1692. +
  1693. +static bool
  1694. +mtk_nand_WFIFOValidSize(u16 u2Size)
  1695. +{
  1696. + u32 timeout = 0xFFFF;
  1697. +
  1698. + while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) {
  1699. + timeout--;
  1700. + if (0 == timeout)
  1701. + return false;
  1702. + }
  1703. + return true;
  1704. +}
  1705. +
  1706. +static bool
  1707. +mtk_nand_status_ready(u32 u4Status)
  1708. +{
  1709. + u32 timeout = 0xFFFF;
  1710. +
  1711. + while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) {
  1712. + timeout--;
  1713. + if (0 == timeout)
  1714. + return false;
  1715. + }
  1716. + return true;
  1717. +}
  1718. +
  1719. +static bool
  1720. +mtk_nand_reset(void)
  1721. +{
  1722. + int timeout = 0xFFFF;
  1723. + if (DRV_Reg16(NFI_MASTERSTA_REG16)) {
  1724. + mb();
  1725. + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
  1726. + while (DRV_Reg16(NFI_MASTERSTA_REG16)) {
  1727. + timeout--;
  1728. + if (!timeout)
  1729. + MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
  1730. + }
  1731. + }
  1732. + /* issue reset operation */
  1733. + mb();
  1734. + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
  1735. +
  1736. + return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
  1737. +}
  1738. +
  1739. +static void
  1740. +mtk_nand_set_mode(u16 u2OpMode)
  1741. +{
  1742. + u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
  1743. + u2Mode &= ~CNFG_OP_MODE_MASK;
  1744. + u2Mode |= u2OpMode;
  1745. + DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
  1746. +}
  1747. +
  1748. +static void
  1749. +mtk_nand_set_autoformat(bool bEnable)
  1750. +{
  1751. + if (bEnable)
  1752. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
  1753. + else
  1754. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
  1755. +}
  1756. +
  1757. +static void
  1758. +mtk_nand_configure_fdm(u16 u2FDMSize)
  1759. +{
  1760. + NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
  1761. + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
  1762. + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
  1763. +}
  1764. +
  1765. +static void
  1766. +mtk_nand_configure_lock(void)
  1767. +{
  1768. + u32 u4WriteColNOB = 2;
  1769. + u32 u4WriteRowNOB = 3;
  1770. + u32 u4EraseColNOB = 0;
  1771. + u32 u4EraseRowNOB = 3;
  1772. + DRV_WriteReg16(NFI_LOCKANOB_REG16,
  1773. + (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT));
  1774. +
  1775. + if (CHIPVER_ECO_1 == g_u4ChipVer) {
  1776. + int i;
  1777. + for (i = 0; i < 16; ++i) {
  1778. + DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF);
  1779. + DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF);
  1780. + }
  1781. + //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0);
  1782. + DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF);
  1783. + DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON);
  1784. + }
  1785. +}
  1786. +
  1787. +static bool
  1788. +mtk_nand_pio_ready(void)
  1789. +{
  1790. + int count = 0;
  1791. + while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) {
  1792. + count++;
  1793. + if (count > 0xffff) {
  1794. + printk("PIO_DIRDY timeout\n");
  1795. + return false;
  1796. + }
  1797. + }
  1798. +
  1799. + return true;
  1800. +}
  1801. +
  1802. +static bool
  1803. +mtk_nand_set_command(u16 command)
  1804. +{
  1805. + mb();
  1806. + DRV_WriteReg16(NFI_CMD_REG16, command);
  1807. + return mtk_nand_status_ready(STA_CMD_STATE);
  1808. +}
  1809. +
  1810. +static bool
  1811. +mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
  1812. +{
  1813. + mb();
  1814. + DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
  1815. + DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
  1816. + DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
  1817. + return mtk_nand_status_ready(STA_ADDR_STATE);
  1818. +}
  1819. +
  1820. +static bool
  1821. +mtk_nand_check_RW_count(u16 u2WriteSize)
  1822. +{
  1823. + u32 timeout = 0xFFFF;
  1824. + u16 u2SecNum = u2WriteSize >> 9;
  1825. +
  1826. + while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) {
  1827. + timeout--;
  1828. + if (0 == timeout) {
  1829. + printk(KERN_INFO "[%s] timeout\n", __FUNCTION__);
  1830. + return false;
  1831. + }
  1832. + }
  1833. + return true;
  1834. +}
  1835. +
  1836. +static bool
  1837. +mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf)
  1838. +{
  1839. + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
  1840. + bool bRet = false;
  1841. + u16 sec_num = 1 << (nand->page_shift - 9);
  1842. + u32 col_addr = u4ColAddr;
  1843. + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
  1844. + if (nand->options & NAND_BUSWIDTH_16)
  1845. + col_addr /= 2;
  1846. +
  1847. + if (!mtk_nand_reset())
  1848. + goto cleanup;
  1849. + if (g_bHwEcc) {
  1850. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1851. + } else {
  1852. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1853. + }
  1854. +
  1855. + mtk_nand_set_mode(CNFG_OP_READ);
  1856. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  1857. + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
  1858. +
  1859. + if (full) {
  1860. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  1861. +
  1862. + if (g_bHwEcc)
  1863. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1864. + else
  1865. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1866. + } else {
  1867. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1868. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  1869. + }
  1870. +
  1871. + mtk_nand_set_autoformat(full);
  1872. + if (full)
  1873. + if (g_bHwEcc)
  1874. + ECC_Decode_Start();
  1875. + if (!mtk_nand_set_command(NAND_CMD_READ0))
  1876. + goto cleanup;
  1877. + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
  1878. + goto cleanup;
  1879. + if (!mtk_nand_set_command(NAND_CMD_READSTART))
  1880. + goto cleanup;
  1881. + if (!mtk_nand_status_ready(STA_NAND_BUSY))
  1882. + goto cleanup;
  1883. +
  1884. + bRet = true;
  1885. +
  1886. +cleanup:
  1887. + return bRet;
  1888. +}
  1889. +
  1890. +static bool
  1891. +mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf)
  1892. +{
  1893. + bool bRet = false;
  1894. + u32 sec_num = 1 << (nand->page_shift - 9);
  1895. + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
  1896. + if (nand->options & NAND_BUSWIDTH_16)
  1897. + col_addr /= 2;
  1898. +
  1899. + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
  1900. + if (!mtk_nand_reset())
  1901. + return false;
  1902. +
  1903. + mtk_nand_set_mode(CNFG_OP_PRGM);
  1904. +
  1905. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  1906. +
  1907. + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
  1908. +
  1909. + if (full) {
  1910. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  1911. + if (g_bHwEcc)
  1912. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1913. + else
  1914. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1915. + } else {
  1916. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1917. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  1918. + }
  1919. +
  1920. + mtk_nand_set_autoformat(full);
  1921. +
  1922. + if (full)
  1923. + if (g_bHwEcc)
  1924. + ECC_Encode_Start();
  1925. +
  1926. + if (!mtk_nand_set_command(NAND_CMD_SEQIN))
  1927. + goto cleanup;
  1928. + //1 FIXED ME: For Any Kind of AddrCycle
  1929. + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
  1930. + goto cleanup;
  1931. +
  1932. + if (!mtk_nand_status_ready(STA_NAND_BUSY))
  1933. + goto cleanup;
  1934. +
  1935. + bRet = true;
  1936. +
  1937. +cleanup:
  1938. + return bRet;
  1939. +}
  1940. +
  1941. +static bool
  1942. +mtk_nand_check_dececc_done(u32 u4SecNum)
  1943. +{
  1944. + u32 timeout, dec_mask;
  1945. +
  1946. + timeout = 0xffff;
  1947. + dec_mask = (1 << u4SecNum) - 1;
  1948. + while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0)
  1949. + timeout--;
  1950. + if (timeout == 0) {
  1951. + MSG(VERIFY, "ECC_DECDONE: timeout\n");
  1952. + return false;
  1953. + }
  1954. + return true;
  1955. +}
  1956. +
  1957. +static bool
  1958. +mtk_nand_mcu_read_data(u8 * buf, u32 length)
  1959. +{
  1960. + int timeout = 0xffff;
  1961. + u32 i;
  1962. + u32 *buf32 = (u32 *) buf;
  1963. + if ((u32) buf % 4 || length % 4)
  1964. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  1965. + else
  1966. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  1967. +
  1968. + //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
  1969. + mb();
  1970. + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
  1971. +
  1972. + if ((u32) buf % 4 || length % 4) {
  1973. + for (i = 0; (i < (length)) && (timeout > 0);) {
  1974. + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  1975. + *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
  1976. + i++;
  1977. + } else {
  1978. + timeout--;
  1979. + }
  1980. + if (0 == timeout) {
  1981. + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
  1982. + dump_nfi();
  1983. + return false;
  1984. + }
  1985. + }
  1986. + } else {
  1987. + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
  1988. + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  1989. + *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
  1990. + i++;
  1991. + } else {
  1992. + timeout--;
  1993. + }
  1994. + if (0 == timeout) {
  1995. + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
  1996. + dump_nfi();
  1997. + return false;
  1998. + }
  1999. + }
  2000. + }
  2001. + return true;
  2002. +}
  2003. +
  2004. +static bool
  2005. +mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
  2006. +{
  2007. + return mtk_nand_mcu_read_data(pDataBuf, u4Size);
  2008. +}
  2009. +
  2010. +static bool
  2011. +mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length)
  2012. +{
  2013. + u32 timeout = 0xFFFF;
  2014. + u32 i;
  2015. + u32 *pBuf32;
  2016. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2017. + mb();
  2018. + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
  2019. + pBuf32 = (u32 *) buf;
  2020. +
  2021. + if ((u32) buf % 4 || length % 4)
  2022. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2023. + else
  2024. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2025. +
  2026. + if ((u32) buf % 4 || length % 4) {
  2027. + for (i = 0; (i < (length)) && (timeout > 0);) {
  2028. + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  2029. + DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
  2030. + i++;
  2031. + } else {
  2032. + timeout--;
  2033. + }
  2034. + if (0 == timeout) {
  2035. + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
  2036. + dump_nfi();
  2037. + return false;
  2038. + }
  2039. + }
  2040. + } else {
  2041. + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
  2042. + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  2043. + DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
  2044. + i++;
  2045. + } else {
  2046. + timeout--;
  2047. + }
  2048. + if (0 == timeout) {
  2049. + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
  2050. + dump_nfi();
  2051. + return false;
  2052. + }
  2053. + }
  2054. + }
  2055. +
  2056. + return true;
  2057. +}
  2058. +
  2059. +static bool
  2060. +mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size)
  2061. +{
  2062. + return mtk_nand_mcu_write_data(mtd, buf, size);
  2063. +}
  2064. +
  2065. +static void
  2066. +mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum)
  2067. +{
  2068. + u32 i;
  2069. + u32 *pBuf32 = (u32 *) pDataBuf;
  2070. +
  2071. + if (pBuf32) {
  2072. + for (i = 0; i < u4SecNum; ++i) {
  2073. + *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
  2074. + *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
  2075. + }
  2076. + }
  2077. +}
  2078. +
  2079. +static u8 fdm_buf[64];
  2080. +static void
  2081. +mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum)
  2082. +{
  2083. + u32 i, j;
  2084. + u8 checksum = 0;
  2085. + bool empty = true;
  2086. + struct nand_oobfree *free_entry;
  2087. + u32 *pBuf32;
  2088. +
  2089. + memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
  2090. +
  2091. + free_entry = chip->ecc.layout->oobfree;
  2092. + for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) {
  2093. + for (j = 0; j < free_entry[i].length; j++) {
  2094. + if (pDataBuf[free_entry[i].offset + j] != 0xFF)
  2095. + empty = false;
  2096. + checksum ^= pDataBuf[free_entry[i].offset + j];
  2097. + }
  2098. + }
  2099. +
  2100. + if (!empty) {
  2101. + fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
  2102. + }
  2103. +
  2104. + pBuf32 = (u32 *) fdm_buf;
  2105. + for (i = 0; i < u4SecNum; ++i) {
  2106. + DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++);
  2107. + DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++);
  2108. + }
  2109. +}
  2110. +
  2111. +static void
  2112. +mtk_nand_stop_read(void)
  2113. +{
  2114. + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
  2115. + mtk_nand_reset();
  2116. + if (g_bHwEcc)
  2117. + ECC_Decode_End();
  2118. + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
  2119. +}
  2120. +
  2121. +static void
  2122. +mtk_nand_stop_write(void)
  2123. +{
  2124. + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
  2125. + if (g_bHwEcc)
  2126. + ECC_Encode_End();
  2127. + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
  2128. +}
  2129. +
  2130. +bool
  2131. +mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
  2132. +{
  2133. + u8 *buf;
  2134. + bool bRet = true;
  2135. + struct nand_chip *nand = mtd->priv;
  2136. + u32 u4SecNum = u4PageSize >> 9;
  2137. +
  2138. + if (((u32) pPageBuf % 16) && local_buffer_16_align)
  2139. + buf = local_buffer_16_align;
  2140. + else
  2141. + buf = pPageBuf;
  2142. + if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) {
  2143. + int j;
  2144. + for (j = 0 ; j < u4SecNum; j++) {
  2145. + if (!mtk_nand_read_page_data(mtd, buf+j*512, 512))
  2146. + bRet = false;
  2147. + if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1))
  2148. + bRet = false;
  2149. + if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr))
  2150. + bRet = false;
  2151. + }
  2152. + if (!mtk_nand_status_ready(STA_NAND_BUSY))
  2153. + bRet = false;
  2154. +
  2155. + mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
  2156. + mtk_nand_stop_read();
  2157. + }
  2158. +
  2159. + if (buf == local_buffer_16_align)
  2160. + memcpy(pPageBuf, buf, u4PageSize);
  2161. +
  2162. + return bRet;
  2163. +}
  2164. +
  2165. +int
  2166. +mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
  2167. +{
  2168. + struct nand_chip *chip = mtd->priv;
  2169. + u32 u4SecNum = u4PageSize >> 9;
  2170. + u8 *buf;
  2171. + u8 status;
  2172. +
  2173. + MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
  2174. +
  2175. + if (((u32) pPageBuf % 16) && local_buffer_16_align) {
  2176. + printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf);
  2177. + memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
  2178. + buf = local_buffer_16_align;
  2179. + } else
  2180. + buf = pPageBuf;
  2181. +
  2182. + if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) {
  2183. + mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
  2184. + (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
  2185. + (void)mtk_nand_check_RW_count(u4PageSize);
  2186. + mtk_nand_stop_write();
  2187. + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
  2188. + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
  2189. + }
  2190. +
  2191. + status = chip->waitfunc(mtd, chip);
  2192. + if (status & NAND_STATUS_FAIL)
  2193. + return -EIO;
  2194. + return 0;
  2195. +}
  2196. +
  2197. +static int
  2198. +get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk)
  2199. +{
  2200. + struct nand_chip *chip = mtd->priv;
  2201. + int i;
  2202. +
  2203. + *start_blk = 0;
  2204. + for (i = 0; i <= part_num; i++)
  2205. + {
  2206. + if (i == part_num)
  2207. + {
  2208. + // try the last reset partition
  2209. + *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1;
  2210. + if (*start_blk <= *end_blk)
  2211. + {
  2212. + if ((block >= *start_blk) && (block <= *end_blk))
  2213. + break;
  2214. + }
  2215. + }
  2216. + // skip All partition entry
  2217. + else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL)
  2218. + {
  2219. + continue;
  2220. + }
  2221. + *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1;
  2222. + if ((block >= *start_blk) && (block <= *end_blk))
  2223. + break;
  2224. + *start_blk = *end_blk + 1;
  2225. + }
  2226. + if (*start_blk > *end_blk)
  2227. + {
  2228. + return -1;
  2229. + }
  2230. + return 0;
  2231. +}
  2232. +
  2233. +static int
  2234. +block_remap(struct mtd_info *mtd, int block)
  2235. +{
  2236. + struct nand_chip *chip = mtd->priv;
  2237. + int start_blk, end_blk;
  2238. + int j, block_offset;
  2239. + int bad_block = 0;
  2240. +
  2241. + if (chip->bbt == NULL) {
  2242. + printk("ERROR!! no bbt table for block_remap\n");
  2243. + return -1;
  2244. + }
  2245. +
  2246. + if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) {
  2247. + printk("ERROR!! can not find start_blk and end_blk\n");
  2248. + return -1;
  2249. + }
  2250. +
  2251. + block_offset = block - start_blk;
  2252. + for (j = start_blk; j <= end_blk;j++) {
  2253. + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) {
  2254. + if (!block_offset)
  2255. + break;
  2256. + block_offset--;
  2257. + } else {
  2258. + bad_block++;
  2259. + }
  2260. + }
  2261. + if (j <= end_blk) {
  2262. + return j;
  2263. + } else {
  2264. + // remap to the bad block
  2265. + for (j = end_blk; bad_block > 0; j--)
  2266. + {
  2267. + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0)
  2268. + {
  2269. + bad_block--;
  2270. + if (bad_block <= block_offset)
  2271. + return j;
  2272. + }
  2273. + }
  2274. + }
  2275. +
  2276. + printk("Error!! block_remap error\n");
  2277. + return -1;
  2278. +}
  2279. +
  2280. +int
  2281. +check_block_remap(struct mtd_info *mtd, int block)
  2282. +{
  2283. + if (shift_on_bbt)
  2284. + return block_remap(mtd, block);
  2285. + else
  2286. + return block;
  2287. +}
  2288. +EXPORT_SYMBOL(check_block_remap);
  2289. +
  2290. +
  2291. +static int
  2292. +write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk)
  2293. +{
  2294. + struct nand_chip *chip = mtd->priv;
  2295. + int i, j, to_page = 0, first_page;
  2296. + char *buf, *oob;
  2297. + int start_blk = 0, end_blk;
  2298. + int mapped_block;
  2299. + int page_per_block_bit = chip->phys_erase_shift - chip->page_shift;
  2300. + int block = page >> page_per_block_bit;
  2301. +
  2302. + // find next available block in the same MTD partition
  2303. + mapped_block = block_remap(mtd, block);
  2304. + if (mapped_block == -1)
  2305. + return NAND_STATUS_FAIL;
  2306. +
  2307. + get_start_end_block(mtd, block, &start_blk, &end_blk);
  2308. +
  2309. + buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA);
  2310. + if (buf == NULL)
  2311. + return -1;
  2312. +
  2313. + oob = buf + mtd->writesize;
  2314. + for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) {
  2315. + if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) {
  2316. + int status;
  2317. + status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit);
  2318. + if (status & NAND_STATUS_FAIL) {
  2319. + mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift);
  2320. + nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3);
  2321. + } else {
  2322. + /* good block */
  2323. + to_page = (*to_blk) << page_per_block_bit;
  2324. + break;
  2325. + }
  2326. + }
  2327. + }
  2328. +
  2329. + if (!to_page) {
  2330. + kfree(buf);
  2331. + return -1;
  2332. + }
  2333. +
  2334. + first_page = (page >> page_per_block_bit) << page_per_block_bit;
  2335. + for (i = 0; i < (1 << page_per_block_bit); i++) {
  2336. + if ((first_page + i) != page) {
  2337. + mtk_nand_read_oob_hw(mtd, chip, (first_page+i));
  2338. + for (j = 0; j < mtd->oobsize; j++)
  2339. + if (chip->oob_poi[j] != (unsigned char)0xff)
  2340. + break;
  2341. + if (j < mtd->oobsize) {
  2342. + mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob);
  2343. + memset(oob, 0xff, mtd->oobsize);
  2344. + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) {
  2345. + int ret, new_blk = 0;
  2346. + nand_bbt_set(mtd, to_page, 0x3);
  2347. + ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk);
  2348. + if (ret) {
  2349. + kfree(buf);
  2350. + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
  2351. + return ret;
  2352. + }
  2353. + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
  2354. + *to_blk = new_blk;
  2355. + to_page = ((*to_blk) << page_per_block_bit);
  2356. + }
  2357. + }
  2358. + } else {
  2359. + memset(chip->oob_poi, 0xff, mtd->oobsize);
  2360. + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) {
  2361. + int ret, new_blk = 0;
  2362. + nand_bbt_set(mtd, to_page, 0x3);
  2363. + ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk);
  2364. + if (ret) {
  2365. + kfree(buf);
  2366. + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
  2367. + return ret;
  2368. + }
  2369. + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
  2370. + *to_blk = new_blk;
  2371. + to_page = ((*to_blk) << page_per_block_bit);
  2372. + }
  2373. + }
  2374. + }
  2375. +
  2376. + kfree(buf);
  2377. +
  2378. + return 0;
  2379. +}
  2380. +
  2381. +static int
  2382. +mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset,
  2383. + int data_len, const u8 * buf, int oob_required, int page, int cached, int raw)
  2384. +{
  2385. + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  2386. + int block = page / page_per_block;
  2387. + u16 page_in_block = page % page_per_block;
  2388. + int mapped_block = block;
  2389. +
  2390. +#if defined(MTK_NAND_BMT)
  2391. + mapped_block = get_mapping_block_index(block);
  2392. + // write bad index into oob
  2393. + if (mapped_block != block)
  2394. + set_bad_index_to_oob(chip->oob_poi, block);
  2395. + else
  2396. + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
  2397. +#else
  2398. + if (shift_on_bbt) {
  2399. + mapped_block = block_remap(mtd, block);
  2400. + if (mapped_block == -1)
  2401. + return NAND_STATUS_FAIL;
  2402. + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
  2403. + return NAND_STATUS_FAIL;
  2404. + }
  2405. +#endif
  2406. + do {
  2407. + if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) {
  2408. + MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
  2409. +#if defined(MTK_NAND_BMT)
  2410. + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
  2411. + MSG(INIT, "Update BMT success\n");
  2412. + return 0;
  2413. + } else {
  2414. + MSG(INIT, "Update BMT fail\n");
  2415. + return -EIO;
  2416. + }
  2417. +#else
  2418. + {
  2419. + int new_blk;
  2420. + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
  2421. + if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0)
  2422. + {
  2423. + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
  2424. + return NAND_STATUS_FAIL;
  2425. + }
  2426. + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
  2427. + break;
  2428. + }
  2429. +#endif
  2430. + } else
  2431. + break;
  2432. + } while(1);
  2433. +
  2434. + return 0;
  2435. +}
  2436. +
  2437. +static void
  2438. +mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
  2439. +{
  2440. + struct nand_chip *nand = mtd->priv;
  2441. +
  2442. + switch (command) {
  2443. + case NAND_CMD_SEQIN:
  2444. + memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
  2445. + g_kCMD.pDataBuf = NULL;
  2446. + g_kCMD.u4RowAddr = page_addr;
  2447. + g_kCMD.u4ColAddr = column;
  2448. + break;
  2449. +
  2450. + case NAND_CMD_PAGEPROG:
  2451. + if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) {
  2452. + u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
  2453. + mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
  2454. + g_kCMD.u4RowAddr = (u32) - 1;
  2455. + g_kCMD.u4OOBRowAddr = (u32) - 1;
  2456. + }
  2457. + break;
  2458. +
  2459. + case NAND_CMD_READOOB:
  2460. + g_kCMD.u4RowAddr = page_addr;
  2461. + g_kCMD.u4ColAddr = column + mtd->writesize;
  2462. + break;
  2463. +
  2464. + case NAND_CMD_READ0:
  2465. + g_kCMD.u4RowAddr = page_addr;
  2466. + g_kCMD.u4ColAddr = column;
  2467. + break;
  2468. +
  2469. + case NAND_CMD_ERASE1:
  2470. + nand->state=FL_ERASING;
  2471. + (void)mtk_nand_reset();
  2472. + mtk_nand_set_mode(CNFG_OP_ERASE);
  2473. + (void)mtk_nand_set_command(NAND_CMD_ERASE1);
  2474. + (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2);
  2475. + break;
  2476. +
  2477. + case NAND_CMD_ERASE2:
  2478. + (void)mtk_nand_set_command(NAND_CMD_ERASE2);
  2479. + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
  2480. + ;
  2481. + break;
  2482. +
  2483. + case NAND_CMD_STATUS:
  2484. + (void)mtk_nand_reset();
  2485. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2486. + mtk_nand_set_mode(CNFG_OP_SRD);
  2487. + mtk_nand_set_mode(CNFG_READ_EN);
  2488. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2489. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2490. + (void)mtk_nand_set_command(NAND_CMD_STATUS);
  2491. + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
  2492. + mb();
  2493. + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
  2494. + g_bcmdstatus = true;
  2495. + break;
  2496. +
  2497. + case NAND_CMD_RESET:
  2498. + (void)mtk_nand_reset();
  2499. + DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN);
  2500. + (void)mtk_nand_set_command(NAND_CMD_RESET);
  2501. + DRV_WriteReg16(NFI_BASE+0x44, 0xF1);
  2502. + while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN))
  2503. + ;
  2504. + break;
  2505. +
  2506. + case NAND_CMD_READID:
  2507. + mtk_nand_reset();
  2508. + /* Disable HW ECC */
  2509. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2510. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2511. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
  2512. + (void)mtk_nand_reset();
  2513. + mb();
  2514. + mtk_nand_set_mode(CNFG_OP_SRD);
  2515. + (void)mtk_nand_set_command(NAND_CMD_READID);
  2516. + (void)mtk_nand_set_address(0, 0, 1, 0);
  2517. + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD);
  2518. + while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE)
  2519. + ;
  2520. + break;
  2521. +
  2522. + default:
  2523. + BUG();
  2524. + break;
  2525. + }
  2526. +}
  2527. +
  2528. +static void
  2529. +mtk_nand_select_chip(struct mtd_info *mtd, int chip)
  2530. +{
  2531. + if ((chip == -1) && (false == g_bInitDone)) {
  2532. + struct nand_chip *nand = mtd->priv;
  2533. + struct mtk_nand_host *host = nand->priv;
  2534. + struct mtk_nand_host_hw *hw = host->hw;
  2535. + u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512);
  2536. + u32 ecc_bit = 4;
  2537. + u32 spare_bit = PAGEFMT_SPARE_16;
  2538. +
  2539. + if (spare_per_sector >= 28) {
  2540. + spare_bit = PAGEFMT_SPARE_28;
  2541. + ecc_bit = 12;
  2542. + spare_per_sector = 28;
  2543. + } else if (spare_per_sector >= 27) {
  2544. + spare_bit = PAGEFMT_SPARE_27;
  2545. + ecc_bit = 8;
  2546. + spare_per_sector = 27;
  2547. + } else if (spare_per_sector >= 26) {
  2548. + spare_bit = PAGEFMT_SPARE_26;
  2549. + ecc_bit = 8;
  2550. + spare_per_sector = 26;
  2551. + } else if (spare_per_sector >= 16) {
  2552. + spare_bit = PAGEFMT_SPARE_16;
  2553. + ecc_bit = 4;
  2554. + spare_per_sector = 16;
  2555. + } else {
  2556. + MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
  2557. + ASSERT(0);
  2558. + }
  2559. + mtd->oobsize = spare_per_sector*(mtd->writesize/512);
  2560. + MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector);
  2561. + /* Setup PageFormat */
  2562. + if (4096 == mtd->writesize) {
  2563. + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
  2564. + nand->cmdfunc = mtk_nand_command_bp;
  2565. + } else if (2048 == mtd->writesize) {
  2566. + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
  2567. + nand->cmdfunc = mtk_nand_command_bp;
  2568. + }
  2569. + ECC_Config(hw,ecc_bit);
  2570. + g_bInitDone = true;
  2571. + }
  2572. + switch (chip) {
  2573. + case -1:
  2574. + break;
  2575. + case 0:
  2576. + case 1:
  2577. + /* Jun Shen, 2011.04.13 */
  2578. + /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */
  2579. + DRV_WriteReg16(NFI_CSEL_REG16, chip);
  2580. + /* Jun Shen, 2011.04.13 */
  2581. + break;
  2582. + }
  2583. +}
  2584. +
  2585. +static uint8_t
  2586. +mtk_nand_read_byte(struct mtd_info *mtd)
  2587. +{
  2588. + uint8_t retval = 0;
  2589. +
  2590. + if (!mtk_nand_pio_ready()) {
  2591. + printk("pio ready timeout\n");
  2592. + retval = false;
  2593. + }
  2594. +
  2595. + if (g_bcmdstatus) {
  2596. + retval = DRV_Reg8(NFI_DATAR_REG32);
  2597. + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
  2598. + mtk_nand_reset();
  2599. + if (g_bHwEcc) {
  2600. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2601. + } else {
  2602. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2603. + }
  2604. + g_bcmdstatus = false;
  2605. + } else
  2606. + retval = DRV_Reg8(NFI_DATAR_REG32);
  2607. +
  2608. + return retval;
  2609. +}
  2610. +
  2611. +static void
  2612. +mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len)
  2613. +{
  2614. + struct nand_chip *nand = (struct nand_chip *)mtd->priv;
  2615. + struct NAND_CMD *pkCMD = &g_kCMD;
  2616. + u32 u4ColAddr = pkCMD->u4ColAddr;
  2617. + u32 u4PageSize = mtd->writesize;
  2618. +
  2619. + if (u4ColAddr < u4PageSize) {
  2620. + if ((u4ColAddr == 0) && (len >= u4PageSize)) {
  2621. + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB);
  2622. + if (len > u4PageSize) {
  2623. + u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
  2624. + memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
  2625. + }
  2626. + } else {
  2627. + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
  2628. + memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
  2629. + }
  2630. + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
  2631. + } else {
  2632. + u32 u4Offset = u4ColAddr - u4PageSize;
  2633. + u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
  2634. + if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) {
  2635. + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
  2636. + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
  2637. + }
  2638. + memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
  2639. + }
  2640. + pkCMD->u4ColAddr += len;
  2641. +}
  2642. +
  2643. +static void
  2644. +mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
  2645. +{
  2646. + struct NAND_CMD *pkCMD = &g_kCMD;
  2647. + u32 u4ColAddr = pkCMD->u4ColAddr;
  2648. + u32 u4PageSize = mtd->writesize;
  2649. + int i4Size, i;
  2650. +
  2651. + if (u4ColAddr >= u4PageSize) {
  2652. + u32 u4Offset = u4ColAddr - u4PageSize;
  2653. + u8 *pOOB = pkCMD->au1OOB + u4Offset;
  2654. + i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
  2655. + for (i = 0; i < i4Size; i++) {
  2656. + pOOB[i] &= buf[i];
  2657. + }
  2658. + } else {
  2659. + pkCMD->pDataBuf = (u8 *) buf;
  2660. + }
  2661. +
  2662. + pkCMD->u4ColAddr += len;
  2663. +}
  2664. +
  2665. +static int
  2666. +mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required)
  2667. +{
  2668. + mtk_nand_write_buf(mtd, buf, mtd->writesize);
  2669. + mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
  2670. + return 0;
  2671. +}
  2672. +
  2673. +static int
  2674. +mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page)
  2675. +{
  2676. + struct NAND_CMD *pkCMD = &g_kCMD;
  2677. + u32 u4ColAddr = pkCMD->u4ColAddr;
  2678. + u32 u4PageSize = mtd->writesize;
  2679. +
  2680. + if (u4ColAddr == 0) {
  2681. + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
  2682. + pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
  2683. + }
  2684. +
  2685. + return 0;
  2686. +}
  2687. +
  2688. +static int
  2689. +mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page)
  2690. +{
  2691. + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  2692. + int block = page / page_per_block;
  2693. + u16 page_in_block = page % page_per_block;
  2694. + int mapped_block = block;
  2695. +
  2696. +#if defined (MTK_NAND_BMT)
  2697. + mapped_block = get_mapping_block_index(block);
  2698. + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
  2699. + mtd->writesize, buf, chip->oob_poi))
  2700. + return 0;
  2701. +#else
  2702. + if (shift_on_bbt) {
  2703. + mapped_block = block_remap(mtd, block);
  2704. + if (mapped_block == -1)
  2705. + return NAND_STATUS_FAIL;
  2706. + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
  2707. + return NAND_STATUS_FAIL;
  2708. + }
  2709. +
  2710. + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi))
  2711. + return 0;
  2712. + else
  2713. + return -EIO;
  2714. +#endif
  2715. +}
  2716. +
  2717. +int
  2718. +mtk_nand_erase_hw(struct mtd_info *mtd, int page)
  2719. +{
  2720. + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  2721. +
  2722. + chip->erase_cmd(mtd, page);
  2723. +
  2724. + return chip->waitfunc(mtd, chip);
  2725. +}
  2726. +
  2727. +static int
  2728. +mtk_nand_erase(struct mtd_info *mtd, int page)
  2729. +{
  2730. + // get mapping
  2731. + struct nand_chip *chip = mtd->priv;
  2732. + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  2733. + int page_in_block = page % page_per_block;
  2734. + int block = page / page_per_block;
  2735. + int mapped_block = block;
  2736. +
  2737. +#if defined(MTK_NAND_BMT)
  2738. + mapped_block = get_mapping_block_index(block);
  2739. +#else
  2740. + if (shift_on_bbt) {
  2741. + mapped_block = block_remap(mtd, block);
  2742. + if (mapped_block == -1)
  2743. + return NAND_STATUS_FAIL;
  2744. + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
  2745. + return NAND_STATUS_FAIL;
  2746. + }
  2747. +#endif
  2748. +
  2749. + do {
  2750. + int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
  2751. +
  2752. + if (status & NAND_STATUS_FAIL) {
  2753. +#if defined (MTK_NAND_BMT)
  2754. + if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift,
  2755. + UPDATE_ERASE_FAIL, NULL, NULL))
  2756. + {
  2757. + MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
  2758. + return 0;
  2759. + } else {
  2760. + MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
  2761. + return NAND_STATUS_FAIL;
  2762. + }
  2763. +#else
  2764. + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
  2765. + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
  2766. + if (shift_on_bbt) {
  2767. + mapped_block = block_remap(mtd, block);
  2768. + if (mapped_block == -1)
  2769. + return NAND_STATUS_FAIL;
  2770. + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
  2771. + return NAND_STATUS_FAIL;
  2772. + } else
  2773. + return NAND_STATUS_FAIL;
  2774. +#endif
  2775. + } else
  2776. + break;
  2777. + } while(1);
  2778. +
  2779. + return 0;
  2780. +}
  2781. +
  2782. +static int
  2783. +mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len)
  2784. +{
  2785. + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  2786. + u32 col_addr = 0;
  2787. + u32 sector = 0;
  2788. + int res = 0;
  2789. + u32 colnob = 2, rawnob = devinfo.addr_cycle - 2;
  2790. + int randomread = 0;
  2791. + int read_len = 0;
  2792. + int sec_num = 1<<(chip->page_shift-9);
  2793. + int spare_per_sector = mtd->oobsize/sec_num;
  2794. +
  2795. + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
  2796. + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
  2797. + return -EINVAL;
  2798. + }
  2799. + if (len > spare_per_sector)
  2800. + randomread = 1;
  2801. + if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) {
  2802. + while (len > 0) {
  2803. + read_len = min(len, spare_per_sector);
  2804. + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16
  2805. + if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) {
  2806. + printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n");
  2807. + res = -EIO;
  2808. + goto error;
  2809. + }
  2810. + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
  2811. + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n");
  2812. + res = -EIO;
  2813. + goto error;
  2814. + }
  2815. + mtk_nand_check_RW_count(read_len);
  2816. + mtk_nand_stop_read();
  2817. + sector++;
  2818. + len -= read_len;
  2819. + }
  2820. + } else {
  2821. + col_addr = NAND_SECTOR_SIZE;
  2822. + if (chip->options & NAND_BUSWIDTH_16)
  2823. + col_addr /= 2;
  2824. + if (!mtk_nand_reset())
  2825. + goto error;
  2826. + mtk_nand_set_mode(0x6000);
  2827. + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  2828. + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
  2829. +
  2830. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2831. + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2832. +
  2833. + mtk_nand_set_autoformat(false);
  2834. +
  2835. + if (!mtk_nand_set_command(NAND_CMD_READ0))
  2836. + goto error;
  2837. + //1 FIXED ME: For Any Kind of AddrCycle
  2838. + if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
  2839. + goto error;
  2840. + if (!mtk_nand_set_command(NAND_CMD_READSTART))
  2841. + goto error;
  2842. + if (!mtk_nand_status_ready(STA_NAND_BUSY))
  2843. + goto error;
  2844. + read_len = min(len, spare_per_sector);
  2845. + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
  2846. + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
  2847. + res = -EIO;
  2848. + goto error;
  2849. + }
  2850. + sector++;
  2851. + len -= read_len;
  2852. + mtk_nand_stop_read();
  2853. + while (len > 0) {
  2854. + read_len = min(len, spare_per_sector);
  2855. + if (!mtk_nand_set_command(0x05))
  2856. + goto error;
  2857. + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector);
  2858. + if (chip->options & NAND_BUSWIDTH_16)
  2859. + col_addr /= 2;
  2860. + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
  2861. + DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
  2862. + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
  2863. + if (!mtk_nand_status_ready(STA_ADDR_STATE))
  2864. + goto error;
  2865. + if (!mtk_nand_set_command(0xE0))
  2866. + goto error;
  2867. + if (!mtk_nand_status_ready(STA_NAND_BUSY))
  2868. + goto error;
  2869. + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
  2870. + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
  2871. + res = -EIO;
  2872. + goto error;
  2873. + }
  2874. + mtk_nand_stop_read();
  2875. + sector++;
  2876. + len -= read_len;
  2877. + }
  2878. + }
  2879. +error:
  2880. + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
  2881. + return res;
  2882. +}
  2883. +
  2884. +static int
  2885. +mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len)
  2886. +{
  2887. + struct nand_chip *chip = mtd->priv;
  2888. + u32 col_addr = 0;
  2889. + u32 sector = 0;
  2890. + int write_len = 0;
  2891. + int status;
  2892. + int sec_num = 1<<(chip->page_shift-9);
  2893. + int spare_per_sector = mtd->oobsize/sec_num;
  2894. +
  2895. + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
  2896. + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
  2897. + return -EINVAL;
  2898. + }
  2899. +
  2900. + while (len > 0) {
  2901. + write_len = min(len, spare_per_sector);
  2902. + col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE;
  2903. + if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
  2904. + return -EIO;
  2905. + if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
  2906. + return -EIO;
  2907. + (void)mtk_nand_check_RW_count(write_len);
  2908. + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
  2909. + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
  2910. + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
  2911. + ;
  2912. + status = chip->waitfunc(mtd, chip);
  2913. + if (status & NAND_STATUS_FAIL) {
  2914. + printk(KERN_INFO "status: %d\n", status);
  2915. + return -EIO;
  2916. + }
  2917. + len -= write_len;
  2918. + sector++;
  2919. + }
  2920. +
  2921. + return 0;
  2922. +}
  2923. +
  2924. +static int
  2925. +mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
  2926. +{
  2927. + int i, iter;
  2928. + int sec_num = 1<<(chip->page_shift-9);
  2929. + int spare_per_sector = mtd->oobsize/sec_num;
  2930. +
  2931. + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
  2932. +
  2933. + // copy ecc data
  2934. + for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
  2935. + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
  2936. + local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]];
  2937. + }
  2938. +
  2939. + // copy FDM data
  2940. + for (i = 0; i < sec_num; i++)
  2941. + memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
  2942. +
  2943. + return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
  2944. +}
  2945. +
  2946. +static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
  2947. +{
  2948. + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  2949. + int block = page / page_per_block;
  2950. + u16 page_in_block = page % page_per_block;
  2951. + int mapped_block = block;
  2952. +
  2953. +#if defined(MTK_NAND_BMT)
  2954. + mapped_block = get_mapping_block_index(block);
  2955. + // write bad index into oob
  2956. + if (mapped_block != block)
  2957. + set_bad_index_to_oob(chip->oob_poi, block);
  2958. + else
  2959. + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
  2960. +#else
  2961. + if (shift_on_bbt)
  2962. + {
  2963. + mapped_block = block_remap(mtd, block);
  2964. + if (mapped_block == -1)
  2965. + return NAND_STATUS_FAIL;
  2966. + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
  2967. + return NAND_STATUS_FAIL;
  2968. + }
  2969. +#endif
  2970. + do {
  2971. + if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) {
  2972. + MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
  2973. +#if defined(MTK_NAND_BMT)
  2974. + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
  2975. + UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
  2976. + {
  2977. + MSG(INIT, "Update BMT success\n");
  2978. + return 0;
  2979. + } else {
  2980. + MSG(INIT, "Update BMT fail\n");
  2981. + return -EIO;
  2982. + }
  2983. +#else
  2984. + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
  2985. + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
  2986. + if (shift_on_bbt) {
  2987. + mapped_block = block_remap(mtd, mapped_block);
  2988. + if (mapped_block == -1)
  2989. + return NAND_STATUS_FAIL;
  2990. + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
  2991. + return NAND_STATUS_FAIL;
  2992. + } else {
  2993. + return NAND_STATUS_FAIL;
  2994. + }
  2995. +#endif
  2996. + } else
  2997. + break;
  2998. + } while (1);
  2999. +
  3000. + return 0;
  3001. +}
  3002. +
  3003. +int
  3004. +mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
  3005. +{
  3006. + struct nand_chip *chip = mtd->priv;
  3007. + int block = (int)offset >> chip->phys_erase_shift;
  3008. + int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
  3009. + u8 buf[8];
  3010. +
  3011. + memset(buf, 0xFF, 8);
  3012. + buf[0] = 0;
  3013. + return mtk_nand_write_oob_raw(mtd, buf, page, 8);
  3014. +}
  3015. +
  3016. +static int
  3017. +mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
  3018. +{
  3019. + struct nand_chip *chip = mtd->priv;
  3020. + int block = (int)offset >> chip->phys_erase_shift;
  3021. + int ret;
  3022. + int mapped_block = block;
  3023. +
  3024. + nand_get_device(chip, mtd, FL_WRITING);
  3025. +
  3026. +#if defined(MTK_NAND_BMT)
  3027. + mapped_block = get_mapping_block_index(block);
  3028. + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
  3029. +#else
  3030. + if (shift_on_bbt) {
  3031. + mapped_block = block_remap(mtd, block);
  3032. + if (mapped_block == -1) {
  3033. + printk("NAND mark bad failed\n");
  3034. + nand_release_device(mtd);
  3035. + return NAND_STATUS_FAIL;
  3036. + }
  3037. + }
  3038. + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
  3039. +#endif
  3040. + nand_release_device(mtd);
  3041. +
  3042. + return ret;
  3043. +}
  3044. +
  3045. +int
  3046. +mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
  3047. +{
  3048. + int i;
  3049. + u8 iter = 0;
  3050. +
  3051. + int sec_num = 1<<(chip->page_shift-9);
  3052. + int spare_per_sector = mtd->oobsize/sec_num;
  3053. +
  3054. + if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) {
  3055. + printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
  3056. + return -EIO;
  3057. + }
  3058. +
  3059. + // adjust to ecc physical layout to memory layout
  3060. + /*********************************************************/
  3061. + /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
  3062. + /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
  3063. + /*********************************************************/
  3064. +
  3065. + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
  3066. + // copy ecc data
  3067. + for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
  3068. + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
  3069. + chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
  3070. + }
  3071. +
  3072. + // copy FDM data
  3073. + for (i = 0; i < sec_num; i++) {
  3074. + memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR);
  3075. + }
  3076. +
  3077. + return 0;
  3078. +}
  3079. +
  3080. +static int
  3081. +mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
  3082. +{
  3083. + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  3084. + int block = page / page_per_block;
  3085. + u16 page_in_block = page % page_per_block;
  3086. + int mapped_block = block;
  3087. +
  3088. +#if defined (MTK_NAND_BMT)
  3089. + mapped_block = get_mapping_block_index(block);
  3090. + mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
  3091. +#else
  3092. + if (shift_on_bbt) {
  3093. + mapped_block = block_remap(mtd, block);
  3094. + if (mapped_block == -1)
  3095. + return NAND_STATUS_FAIL;
  3096. + // allow to read oob even if the block is bad
  3097. + }
  3098. + if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0)
  3099. + return -1;
  3100. +#endif
  3101. + return 0;
  3102. +}
  3103. +
  3104. +int
  3105. +mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
  3106. +{
  3107. + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  3108. + int page_addr = (int)(ofs >> chip->page_shift);
  3109. + unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  3110. + unsigned char oob_buf[8];
  3111. +
  3112. + page_addr &= ~(page_per_block - 1);
  3113. + if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) {
  3114. + printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n");
  3115. + return 1;
  3116. + }
  3117. +
  3118. + if (oob_buf[0] != 0xff) {
  3119. + printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]);
  3120. + // dump_nfi();
  3121. + return 1;
  3122. + }
  3123. +
  3124. + return 0;
  3125. +}
  3126. +
  3127. +static int
  3128. +mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
  3129. +{
  3130. + int chipnr = 0;
  3131. + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  3132. + int block = (int)ofs >> chip->phys_erase_shift;
  3133. + int mapped_block = block;
  3134. + int ret;
  3135. +
  3136. + if (getchip) {
  3137. + chipnr = (int)(ofs >> chip->chip_shift);
  3138. + nand_get_device(chip, mtd, FL_READING);
  3139. + /* Select the NAND device */
  3140. + chip->select_chip(mtd, chipnr);
  3141. + }
  3142. +
  3143. +#if defined(MTK_NAND_BMT)
  3144. + mapped_block = get_mapping_block_index(block);
  3145. +#else
  3146. + if (shift_on_bbt) {
  3147. + mapped_block = block_remap(mtd, block);
  3148. + if (mapped_block == -1) {
  3149. + if (getchip)
  3150. + nand_release_device(mtd);
  3151. + return NAND_STATUS_FAIL;
  3152. + }
  3153. + }
  3154. +#endif
  3155. +
  3156. + ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift);
  3157. +#if defined (MTK_NAND_BMT)
  3158. + if (ret) {
  3159. + MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block);
  3160. + if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) {
  3161. + MSG(INIT, "Update BMT success\n");
  3162. + ret = 0;
  3163. + } else {
  3164. + MSG(INIT, "Update BMT fail\n");
  3165. + ret = 1;
  3166. + }
  3167. + }
  3168. +#endif
  3169. +
  3170. + if (getchip)
  3171. + nand_release_device(mtd);
  3172. +
  3173. + return ret;
  3174. +}
  3175. +
  3176. +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
  3177. +char gacBuf[4096 + 288];
  3178. +
  3179. +static int
  3180. +mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
  3181. +{
  3182. + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  3183. + struct NAND_CMD *pkCMD = &g_kCMD;
  3184. + u32 u4PageSize = mtd->writesize;
  3185. + u32 *pSrc, *pDst;
  3186. + int i;
  3187. +
  3188. + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
  3189. +
  3190. + pSrc = (u32 *) buf;
  3191. + pDst = (u32 *) gacBuf;
  3192. + len = len / sizeof(u32);
  3193. + for (i = 0; i < len; ++i) {
  3194. + if (*pSrc != *pDst) {
  3195. + MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
  3196. + return -1;
  3197. + }
  3198. + pSrc++;
  3199. + pDst++;
  3200. + }
  3201. +
  3202. + pSrc = (u32 *) chip->oob_poi;
  3203. + pDst = (u32 *) (gacBuf + u4PageSize);
  3204. +
  3205. + if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) {
  3206. + // TODO: Ask Designer Why?
  3207. + //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
  3208. + MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
  3209. + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
  3210. + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
  3211. + return -1;
  3212. + }
  3213. + return 0;
  3214. +}
  3215. +#endif
  3216. +
  3217. +static void
  3218. +mtk_nand_init_hw(struct mtk_nand_host *host) {
  3219. + struct mtk_nand_host_hw *hw = host->hw;
  3220. + u32 data;
  3221. +
  3222. + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
  3223. + data &= ~((0x3<<18)|(0x3<<16));
  3224. + data |= ((0x2<<18) |(0x2<<16));
  3225. + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
  3226. +
  3227. + MSG(INIT, "Enable NFI Clock\n");
  3228. + nand_enable_clock();
  3229. +
  3230. + g_bInitDone = false;
  3231. + g_kCMD.u4OOBRowAddr = (u32) - 1;
  3232. +
  3233. + /* Set default NFI access timing control */
  3234. + DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
  3235. + DRV_WriteReg16(NFI_CNFG_REG16, 0);
  3236. + DRV_WriteReg16(NFI_PAGEFMT_REG16, 0);
  3237. +
  3238. + /* Reset the state machine and data FIFO, because flushing FIFO */
  3239. + (void)mtk_nand_reset();
  3240. +
  3241. + /* Set the ECC engine */
  3242. + if (hw->nand_ecc_mode == NAND_ECC_HW) {
  3243. + MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
  3244. + if (g_bHwEcc)
  3245. + NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  3246. + ECC_Config(host->hw,4);
  3247. + mtk_nand_configure_fdm(8);
  3248. + mtk_nand_configure_lock();
  3249. + }
  3250. +
  3251. + NFI_SET_REG16(NFI_IOCON_REG16, 0x47);
  3252. +}
  3253. +
  3254. +static int mtk_nand_dev_ready(struct mtd_info *mtd)
  3255. +{
  3256. + return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
  3257. +}
  3258. +
  3259. +#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table
  3260. +#define FACT_BBT_OOB_SIGNATURE 1
  3261. +#define FACT_BBT_SIGNATURE_LEN 7
  3262. +const u8 oob_signature[] = "mtknand";
  3263. +static u8 *fact_bbt = 0;
  3264. +static u32 bbt_size = 0;
  3265. +
  3266. +static int
  3267. +read_fact_bbt(struct mtd_info *mtd, unsigned int page)
  3268. +{
  3269. + struct nand_chip *chip = mtd->priv;
  3270. +
  3271. + // read oob
  3272. + if (mtk_nand_read_oob_hw(mtd, chip, page)==0)
  3273. + {
  3274. + if (chip->oob_poi[nand_badblock_offset] != 0xFF)
  3275. + {
  3276. + printk("Bad Block on Page %x\n", page);
  3277. + return -1;
  3278. + }
  3279. + if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0)
  3280. + {
  3281. + printk("compare signature failed %x\n", page);
  3282. + return -1;
  3283. + }
  3284. + if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi))
  3285. + {
  3286. + printk("Signature matched and data read!\n");
  3287. + memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize);
  3288. + return 0;
  3289. + }
  3290. +
  3291. + }
  3292. + printk("failed at page %x\n", page);
  3293. + return -1;
  3294. +}
  3295. +
  3296. +static int
  3297. +load_fact_bbt(struct mtd_info *mtd)
  3298. +{
  3299. + struct nand_chip *chip = mtd->priv;
  3300. + int i;
  3301. + u32 total_block;
  3302. +
  3303. + total_block = 1 << (chip->chip_shift - chip->phys_erase_shift);
  3304. + bbt_size = total_block >> 2;
  3305. +
  3306. + if ((!fact_bbt) && (bbt_size))
  3307. + fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL);
  3308. + if (!fact_bbt)
  3309. + return -1;
  3310. +
  3311. + for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--)
  3312. + {
  3313. + if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0)
  3314. + {
  3315. + printk("load_fact_bbt success %d\n", i);
  3316. + return 0;
  3317. + }
  3318. +
  3319. + }
  3320. + printk("load_fact_bbt failed\n");
  3321. + return -1;
  3322. +}
  3323. +
  3324. +static int
  3325. +mtk_nand_probe(struct platform_device *pdev)
  3326. +{
  3327. + struct mtd_part_parser_data ppdata;
  3328. + struct mtk_nand_host_hw *hw;
  3329. + struct mtd_info *mtd;
  3330. + struct nand_chip *nand_chip;
  3331. + u8 ext_id1, ext_id2, ext_id3;
  3332. + int err = 0;
  3333. + int id;
  3334. + u32 ext_id;
  3335. + int i;
  3336. + u32 data;
  3337. +
  3338. + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
  3339. + data &= ~((0x3<<18)|(0x3<<16));
  3340. + data |= ((0x2<<18) |(0x2<<16));
  3341. + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
  3342. +
  3343. + hw = &mt7621_nand_hw,
  3344. + BUG_ON(!hw);
  3345. + /* Allocate memory for the device structure (and zero it) */
  3346. + host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
  3347. + if (!host) {
  3348. + MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
  3349. + return -ENOMEM;
  3350. + }
  3351. +
  3352. + /* Allocate memory for 16 byte aligned buffer */
  3353. + local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16);
  3354. + printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
  3355. + host->hw = hw;
  3356. +
  3357. + /* init mtd data structure */
  3358. + nand_chip = &host->nand_chip;
  3359. + nand_chip->priv = host; /* link the private data structures */
  3360. +
  3361. + mtd = &host->mtd;
  3362. + mtd->priv = nand_chip;
  3363. + mtd->owner = THIS_MODULE;
  3364. + mtd->name = "MT7621-NAND";
  3365. +
  3366. + hw->nand_ecc_mode = NAND_ECC_HW;
  3367. +
  3368. + /* Set address of NAND IO lines */
  3369. + nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
  3370. + nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
  3371. + nand_chip->chip_delay = 20; /* 20us command delay time */
  3372. + nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
  3373. + nand_chip->ecc.strength = 1;
  3374. + nand_chip->read_byte = mtk_nand_read_byte;
  3375. + nand_chip->read_buf = mtk_nand_read_buf;
  3376. + nand_chip->write_buf = mtk_nand_write_buf;
  3377. +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
  3378. + nand_chip->verify_buf = mtk_nand_verify_buf;
  3379. +#endif
  3380. + nand_chip->select_chip = mtk_nand_select_chip;
  3381. + nand_chip->dev_ready = mtk_nand_dev_ready;
  3382. + nand_chip->cmdfunc = mtk_nand_command_bp;
  3383. + nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
  3384. + nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
  3385. +
  3386. + nand_chip->ecc.layout = &nand_oob_64;
  3387. + nand_chip->ecc.size = hw->nand_ecc_size; //2048
  3388. + nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32
  3389. +
  3390. + // For BMT, we need to revise driver architecture
  3391. + nand_chip->write_page = mtk_nand_write_page;
  3392. + nand_chip->ecc.write_oob = mtk_nand_write_oob;
  3393. + nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device().
  3394. + // nand_chip->erase = mtk_nand_erase;
  3395. + // nand_chip->read_page = mtk_nand_read_page;
  3396. + nand_chip->ecc.read_oob = mtk_nand_read_oob;
  3397. + nand_chip->block_bad = mtk_nand_block_bad;
  3398. +
  3399. + //Qwert:Add for Uboot
  3400. + mtk_nand_init_hw(host);
  3401. + /* Select the device */
  3402. + nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
  3403. +
  3404. + /*
  3405. + * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
  3406. + * after power-up
  3407. + */
  3408. + nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  3409. +
  3410. + memset(&devinfo, 0 , sizeof(flashdev_info));
  3411. +
  3412. + /* Send the command for reading device ID */
  3413. +
  3414. + nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
  3415. +
  3416. + /* Read manufacturer and device IDs */
  3417. + manu_id = nand_chip->read_byte(mtd);
  3418. + dev_id = nand_chip->read_byte(mtd);
  3419. + id = dev_id | (manu_id << 8);
  3420. + ext_id1 = nand_chip->read_byte(mtd);
  3421. + ext_id2 = nand_chip->read_byte(mtd);
  3422. + ext_id3 = nand_chip->read_byte(mtd);
  3423. + ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3;
  3424. + if (!get_device_info(id, ext_id, &devinfo)) {
  3425. + u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F;
  3426. + MSG(INIT, "Not Support this Device! \r\n");
  3427. + memset(&devinfo, 0 , sizeof(flashdev_info));
  3428. + MSG(INIT, "chip_mode=%08X\n",chip_mode);
  3429. +
  3430. + /* apply bootstrap first */
  3431. + devinfo.addr_cycle = 5;
  3432. + devinfo.iowidth = 8;
  3433. +
  3434. + switch (chip_mode) {
  3435. + case 10:
  3436. + devinfo.pagesize = 2048;
  3437. + devinfo.sparesize = 128;
  3438. + devinfo.totalsize = 128;
  3439. + devinfo.blocksize = 128;
  3440. + break;
  3441. + case 11:
  3442. + devinfo.pagesize = 4096;
  3443. + devinfo.sparesize = 128;
  3444. + devinfo.totalsize = 1024;
  3445. + devinfo.blocksize = 256;
  3446. + break;
  3447. + case 12:
  3448. + devinfo.pagesize = 4096;
  3449. + devinfo.sparesize = 224;
  3450. + devinfo.totalsize = 2048;
  3451. + devinfo.blocksize = 512;
  3452. + break;
  3453. + default:
  3454. + case 1:
  3455. + devinfo.pagesize = 2048;
  3456. + devinfo.sparesize = 64;
  3457. + devinfo.totalsize = 128;
  3458. + devinfo.blocksize = 128;
  3459. + break;
  3460. + }
  3461. +
  3462. + devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING;
  3463. + devinfo.devciename[0] = 'U';
  3464. + devinfo.advancedmode = 0;
  3465. + }
  3466. + mtd->writesize = devinfo.pagesize;
  3467. + mtd->erasesize = (devinfo.blocksize<<10);
  3468. + mtd->oobsize = devinfo.sparesize;
  3469. +
  3470. + nand_chip->chipsize = (devinfo.totalsize<<20);
  3471. + nand_chip->page_shift = ffs(mtd->writesize) - 1;
  3472. + nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1;
  3473. + nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
  3474. + nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1;
  3475. + nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize;
  3476. + nand_chip->badblockpos = 0;
  3477. +
  3478. + if (devinfo.pagesize == 4096)
  3479. + nand_chip->ecc.layout = &nand_oob_128;
  3480. + else if (devinfo.pagesize == 2048)
  3481. + nand_chip->ecc.layout = &nand_oob_64;
  3482. + else if (devinfo.pagesize == 512)
  3483. + nand_chip->ecc.layout = &nand_oob_16;
  3484. +
  3485. + nand_chip->ecc.layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE);
  3486. + for (i = 0; i < nand_chip->ecc.layout->eccbytes; i++)
  3487. + nand_chip->ecc.layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i;
  3488. +
  3489. + MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
  3490. + hw->nfi_bus_width = devinfo.iowidth;
  3491. + DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
  3492. +
  3493. + /* 16-bit bus width */
  3494. + if (hw->nfi_bus_width == 16) {
  3495. + MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
  3496. + nand_chip->options |= NAND_BUSWIDTH_16;
  3497. + }
  3498. + mtd->oobsize = devinfo.sparesize;
  3499. + hw->nfi_cs_num = 1;
  3500. +
  3501. + /* Scan to find existance of the device */
  3502. + if (nand_scan(mtd, hw->nfi_cs_num)) {
  3503. + MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
  3504. + err = -ENXIO;
  3505. + goto out;
  3506. + }
  3507. +
  3508. + g_page_size = mtd->writesize;
  3509. + platform_set_drvdata(pdev, host);
  3510. + if (hw->nfi_bus_width == 16) {
  3511. + NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
  3512. + }
  3513. +
  3514. + nand_chip->select_chip(mtd, 0);
  3515. +#if defined(MTK_NAND_BMT)
  3516. + nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
  3517. +#endif
  3518. + mtd->size = nand_chip->chipsize;
  3519. +
  3520. + CFG_BLOCKSIZE = mtd->erasesize;
  3521. +
  3522. +#if defined(MTK_NAND_BMT)
  3523. + if (!g_bmt) {
  3524. + if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) {
  3525. + MSG(INIT, "Error: init bmt failed\n");
  3526. + return 0;
  3527. + }
  3528. + }
  3529. +#endif
  3530. +
  3531. + ppdata.of_node = pdev->dev.of_node;
  3532. + err = mtd_device_parse_register(mtd, probe_types, &ppdata,
  3533. + NULL, 0);
  3534. + if (!err) {
  3535. + MSG(INIT, "[mtk_nand] probe successfully!\n");
  3536. + nand_disable_clock();
  3537. + shift_on_bbt = 1;
  3538. + if (load_fact_bbt(mtd) == 0) {
  3539. + int i;
  3540. + for (i = 0; i < 0x100; i++)
  3541. + nand_chip->bbt[i] |= fact_bbt[i];
  3542. + }
  3543. +
  3544. + return err;
  3545. + }
  3546. +
  3547. +out:
  3548. + MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
  3549. + nand_release(mtd);
  3550. + platform_set_drvdata(pdev, NULL);
  3551. + kfree(host);
  3552. + nand_disable_clock();
  3553. + return err;
  3554. +}
  3555. +
  3556. +static int
  3557. +mtk_nand_remove(struct platform_device *pdev)
  3558. +{
  3559. + struct mtk_nand_host *host = platform_get_drvdata(pdev);
  3560. + struct mtd_info *mtd = &host->mtd;
  3561. +
  3562. + nand_release(mtd);
  3563. + kfree(host);
  3564. + nand_disable_clock();
  3565. +
  3566. + return 0;
  3567. +}
  3568. +
  3569. +static const struct of_device_id mt7621_nand_match[] = {
  3570. + { .compatible = "mtk,mt7621-nand" },
  3571. + {},
  3572. +};
  3573. +MODULE_DEVICE_TABLE(of, mt7621_nand_match);
  3574. +
  3575. +static struct platform_driver mtk_nand_driver = {
  3576. + .probe = mtk_nand_probe,
  3577. + .remove = mtk_nand_remove,
  3578. + .driver = {
  3579. + .name = "MT7621-NAND",
  3580. + .owner = THIS_MODULE,
  3581. + .of_match_table = mt7621_nand_match,
  3582. + },
  3583. +};
  3584. +
  3585. +static int __init
  3586. +mtk_nand_init(void)
  3587. +{
  3588. + printk("MediaTek Nand driver init, version %s\n", VERSION);
  3589. +
  3590. + return platform_driver_register(&mtk_nand_driver);
  3591. +}
  3592. +
  3593. +static void __exit
  3594. +mtk_nand_exit(void)
  3595. +{
  3596. + platform_driver_unregister(&mtk_nand_driver);
  3597. +}
  3598. +
  3599. +module_init(mtk_nand_init);
  3600. +module_exit(mtk_nand_exit);
  3601. +MODULE_LICENSE("GPL");
  3602. --- /dev/null
  3603. +++ b/drivers/mtd/nand/mtk_nand.h
  3604. @@ -0,0 +1,452 @@
  3605. +#ifndef __MTK_NAND_H
  3606. +#define __MTK_NAND_H
  3607. +
  3608. +#define RALINK_NAND_CTRL_BASE 0xBE003000
  3609. +#define RALINK_SYSCTL_BASE 0xBE000000
  3610. +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
  3611. +/*******************************************************************************
  3612. + * NFI Register Definition
  3613. + *******************************************************************************/
  3614. +
  3615. +#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000))
  3616. +#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004))
  3617. +#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008))
  3618. +#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C))
  3619. +#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010))
  3620. +#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014))
  3621. +
  3622. +#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020))
  3623. +
  3624. +#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030))
  3625. +#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034))
  3626. +#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038))
  3627. +
  3628. +#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040))
  3629. +
  3630. +#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050))
  3631. +#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054))
  3632. +#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058))
  3633. +
  3634. +#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060))
  3635. +#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064))
  3636. +#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068))
  3637. +
  3638. +#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070))
  3639. +
  3640. +#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080))
  3641. +#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084))
  3642. +
  3643. +#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090))
  3644. +#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094))
  3645. +
  3646. +#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0))
  3647. +#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4))
  3648. +
  3649. +#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100))
  3650. +#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104))
  3651. +#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108))
  3652. +#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110))
  3653. +#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114))
  3654. +#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118))
  3655. +#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C))
  3656. +#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120))
  3657. +#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124))
  3658. +#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128))
  3659. +#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C))
  3660. +#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130))
  3661. +#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134))
  3662. +#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138))
  3663. +#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C))
  3664. +#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140))
  3665. +#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144))
  3666. +#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148))
  3667. +#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C))
  3668. +#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150))
  3669. +#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154))
  3670. +#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158))
  3671. +#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C))
  3672. +#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160))
  3673. +#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164))
  3674. +#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168))
  3675. +#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C))
  3676. +#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170))
  3677. +#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174))
  3678. +#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178))
  3679. +#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C))
  3680. +#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180))
  3681. +#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184))
  3682. +#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188))
  3683. +#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C))
  3684. +
  3685. +#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190))
  3686. +#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194))
  3687. +#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198))
  3688. +#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C))
  3689. +#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210))
  3690. +
  3691. +
  3692. +/*******************************************************************************
  3693. + * NFI Register Field Definition
  3694. + *******************************************************************************/
  3695. +
  3696. +/* NFI_CNFG */
  3697. +#define CNFG_AHB (0x0001)
  3698. +#define CNFG_READ_EN (0x0002)
  3699. +#define CNFG_DMA_BURST_EN (0x0004)
  3700. +#define CNFG_BYTE_RW (0x0040)
  3701. +#define CNFG_HW_ECC_EN (0x0100)
  3702. +#define CNFG_AUTO_FMT_EN (0x0200)
  3703. +#define CNFG_OP_IDLE (0x0000)
  3704. +#define CNFG_OP_READ (0x1000)
  3705. +#define CNFG_OP_SRD (0x2000)
  3706. +#define CNFG_OP_PRGM (0x3000)
  3707. +#define CNFG_OP_ERASE (0x4000)
  3708. +#define CNFG_OP_RESET (0x5000)
  3709. +#define CNFG_OP_CUST (0x6000)
  3710. +#define CNFG_OP_MODE_MASK (0x7000)
  3711. +#define CNFG_OP_MODE_SHIFT (12)
  3712. +
  3713. +/* NFI_PAGEFMT */
  3714. +#define PAGEFMT_512 (0x0000)
  3715. +#define PAGEFMT_2K (0x0001)
  3716. +#define PAGEFMT_4K (0x0002)
  3717. +
  3718. +#define PAGEFMT_PAGE_MASK (0x0003)
  3719. +
  3720. +#define PAGEFMT_DBYTE_EN (0x0008)
  3721. +
  3722. +#define PAGEFMT_SPARE_16 (0x0000)
  3723. +#define PAGEFMT_SPARE_26 (0x0001)
  3724. +#define PAGEFMT_SPARE_27 (0x0002)
  3725. +#define PAGEFMT_SPARE_28 (0x0003)
  3726. +#define PAGEFMT_SPARE_MASK (0x0030)
  3727. +#define PAGEFMT_SPARE_SHIFT (4)
  3728. +
  3729. +#define PAGEFMT_FDM_MASK (0x0F00)
  3730. +#define PAGEFMT_FDM_SHIFT (8)
  3731. +
  3732. +#define PAGEFMT_FDM_ECC_MASK (0xF000)
  3733. +#define PAGEFMT_FDM_ECC_SHIFT (12)
  3734. +
  3735. +/* NFI_CON */
  3736. +#define CON_FIFO_FLUSH (0x0001)
  3737. +#define CON_NFI_RST (0x0002)
  3738. +#define CON_NFI_SRD (0x0010)
  3739. +
  3740. +#define CON_NFI_NOB_MASK (0x0060)
  3741. +#define CON_NFI_NOB_SHIFT (5)
  3742. +
  3743. +#define CON_NFI_BRD (0x0100)
  3744. +#define CON_NFI_BWR (0x0200)
  3745. +
  3746. +#define CON_NFI_SEC_MASK (0xF000)
  3747. +#define CON_NFI_SEC_SHIFT (12)
  3748. +
  3749. +/* NFI_ACCCON */
  3750. +#define ACCCON_SETTING ()
  3751. +
  3752. +/* NFI_INTR_EN */
  3753. +#define INTR_RD_DONE_EN (0x0001)
  3754. +#define INTR_WR_DONE_EN (0x0002)
  3755. +#define INTR_RST_DONE_EN (0x0004)
  3756. +#define INTR_ERASE_DONE_EN (0x0008)
  3757. +#define INTR_BSY_RTN_EN (0x0010)
  3758. +#define INTR_ACC_LOCK_EN (0x0020)
  3759. +#define INTR_AHB_DONE_EN (0x0040)
  3760. +#define INTR_ALL_INTR_DE (0x0000)
  3761. +#define INTR_ALL_INTR_EN (0x007F)
  3762. +
  3763. +/* NFI_INTR */
  3764. +#define INTR_RD_DONE (0x0001)
  3765. +#define INTR_WR_DONE (0x0002)
  3766. +#define INTR_RST_DONE (0x0004)
  3767. +#define INTR_ERASE_DONE (0x0008)
  3768. +#define INTR_BSY_RTN (0x0010)
  3769. +#define INTR_ACC_LOCK (0x0020)
  3770. +#define INTR_AHB_DONE (0x0040)
  3771. +
  3772. +/* NFI_ADDRNOB */
  3773. +#define ADDR_COL_NOB_MASK (0x0003)
  3774. +#define ADDR_COL_NOB_SHIFT (0)
  3775. +#define ADDR_ROW_NOB_MASK (0x0030)
  3776. +#define ADDR_ROW_NOB_SHIFT (4)
  3777. +
  3778. +/* NFI_STA */
  3779. +#define STA_READ_EMPTY (0x00001000)
  3780. +#define STA_ACC_LOCK (0x00000010)
  3781. +#define STA_CMD_STATE (0x00000001)
  3782. +#define STA_ADDR_STATE (0x00000002)
  3783. +#define STA_DATAR_STATE (0x00000004)
  3784. +#define STA_DATAW_STATE (0x00000008)
  3785. +
  3786. +#define STA_NAND_FSM_MASK (0x1F000000)
  3787. +#define STA_NAND_BUSY (0x00000100)
  3788. +#define STA_NAND_BUSY_RETURN (0x00000200)
  3789. +#define STA_NFI_FSM_MASK (0x000F0000)
  3790. +#define STA_NFI_OP_MASK (0x0000000F)
  3791. +
  3792. +/* NFI_FIFOSTA */
  3793. +#define FIFO_RD_EMPTY (0x0040)
  3794. +#define FIFO_RD_FULL (0x0080)
  3795. +#define FIFO_WR_FULL (0x8000)
  3796. +#define FIFO_WR_EMPTY (0x4000)
  3797. +#define FIFO_RD_REMAIN(x) (0x1F&(x))
  3798. +#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8)
  3799. +
  3800. +/* NFI_ADDRCNTR */
  3801. +#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12)
  3802. +#define ADDRCNTR_OFFSET(x) (0x03FF&(x))
  3803. +
  3804. +/* NFI_LOCK */
  3805. +#define NFI_LOCK_ON (0x0001)
  3806. +
  3807. +/* NFI_LOCKANOB */
  3808. +#define PROG_RADD_NOB_MASK (0x7000)
  3809. +#define PROG_RADD_NOB_SHIFT (12)
  3810. +#define PROG_CADD_NOB_MASK (0x0300)
  3811. +#define PROG_CADD_NOB_SHIFT (8)
  3812. +#define ERASE_RADD_NOB_MASK (0x0070)
  3813. +#define ERASE_RADD_NOB_SHIFT (4)
  3814. +#define ERASE_CADD_NOB_MASK (0x0007)
  3815. +#define ERASE_CADD_NOB_SHIFT (0)
  3816. +
  3817. +/*******************************************************************************
  3818. + * ECC Register Definition
  3819. + *******************************************************************************/
  3820. +
  3821. +#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000))
  3822. +#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004))
  3823. +#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008))
  3824. +#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C))
  3825. +#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010))
  3826. +#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014))
  3827. +#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018))
  3828. +#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C))
  3829. +#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020))
  3830. +#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024))
  3831. +#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028))
  3832. +#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C))
  3833. +
  3834. +#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100))
  3835. +#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104))
  3836. +#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108))
  3837. +#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C))
  3838. +#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110))
  3839. +#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114))
  3840. +#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118))
  3841. +#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C))
  3842. +#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120))
  3843. +#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124))
  3844. +#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128))
  3845. +#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C))
  3846. +#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130))
  3847. +#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134))
  3848. +#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138))
  3849. +#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C))
  3850. +#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140))
  3851. +#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144))
  3852. +#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148))
  3853. +#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C))
  3854. +
  3855. +/*******************************************************************************
  3856. + * ECC register definition
  3857. + *******************************************************************************/
  3858. +/* ECC_ENCON */
  3859. +#define ENC_EN (0x0001)
  3860. +#define ENC_DE (0x0000)
  3861. +
  3862. +/* ECC_ENCCNFG */
  3863. +#define ECC_CNFG_ECC4 (0x0000)
  3864. +#define ECC_CNFG_ECC6 (0x0001)
  3865. +#define ECC_CNFG_ECC8 (0x0002)
  3866. +#define ECC_CNFG_ECC10 (0x0003)
  3867. +#define ECC_CNFG_ECC12 (0x0004)
  3868. +#define ECC_CNFG_ECC_MASK (0x00000007)
  3869. +
  3870. +#define ENC_CNFG_NFI (0x0010)
  3871. +#define ENC_CNFG_MODE_MASK (0x0010)
  3872. +
  3873. +#define ENC_CNFG_META6 (0x10300000)
  3874. +#define ENC_CNFG_META8 (0x10400000)
  3875. +
  3876. +#define ENC_CNFG_MSG_MASK (0x1FFF0000)
  3877. +#define ENC_CNFG_MSG_SHIFT (0x10)
  3878. +
  3879. +/* ECC_ENCIDLE */
  3880. +#define ENC_IDLE (0x0001)
  3881. +
  3882. +/* ECC_ENCSTA */
  3883. +#define STA_FSM (0x001F)
  3884. +#define STA_COUNT_PS (0xFF10)
  3885. +#define STA_COUNT_MS (0x3FFF0000)
  3886. +
  3887. +/* ECC_ENCIRQEN */
  3888. +#define ENC_IRQEN (0x0001)
  3889. +
  3890. +/* ECC_ENCIRQSTA */
  3891. +#define ENC_IRQSTA (0x0001)
  3892. +
  3893. +/* ECC_DECCON */
  3894. +#define DEC_EN (0x0001)
  3895. +#define DEC_DE (0x0000)
  3896. +
  3897. +/* ECC_ENCCNFG */
  3898. +#define DEC_CNFG_ECC4 (0x0000)
  3899. +//#define DEC_CNFG_ECC6 (0x0001)
  3900. +//#define DEC_CNFG_ECC12 (0x0002)
  3901. +#define DEC_CNFG_NFI (0x0010)
  3902. +//#define DEC_CNFG_META6 (0x10300000)
  3903. +//#define DEC_CNFG_META8 (0x10400000)
  3904. +
  3905. +#define DEC_CNFG_FER (0x01000)
  3906. +#define DEC_CNFG_EL (0x02000)
  3907. +#define DEC_CNFG_CORRECT (0x03000)
  3908. +#define DEC_CNFG_TYPE_MASK (0x03000)
  3909. +
  3910. +#define DEC_CNFG_EMPTY_EN (0x80000000)
  3911. +
  3912. +#define DEC_CNFG_CODE_MASK (0x1FFF0000)
  3913. +#define DEC_CNFG_CODE_SHIFT (0x10)
  3914. +
  3915. +/* ECC_DECIDLE */
  3916. +#define DEC_IDLE (0x0001)
  3917. +
  3918. +/* ECC_DECFER */
  3919. +#define DEC_FER0 (0x0001)
  3920. +#define DEC_FER1 (0x0002)
  3921. +#define DEC_FER2 (0x0004)
  3922. +#define DEC_FER3 (0x0008)
  3923. +#define DEC_FER4 (0x0010)
  3924. +#define DEC_FER5 (0x0020)
  3925. +#define DEC_FER6 (0x0040)
  3926. +#define DEC_FER7 (0x0080)
  3927. +
  3928. +/* ECC_DECENUM */
  3929. +#define ERR_NUM0 (0x0000000F)
  3930. +#define ERR_NUM1 (0x000000F0)
  3931. +#define ERR_NUM2 (0x00000F00)
  3932. +#define ERR_NUM3 (0x0000F000)
  3933. +#define ERR_NUM4 (0x000F0000)
  3934. +#define ERR_NUM5 (0x00F00000)
  3935. +#define ERR_NUM6 (0x0F000000)
  3936. +#define ERR_NUM7 (0xF0000000)
  3937. +
  3938. +/* ECC_DECDONE */
  3939. +#define DEC_DONE0 (0x0001)
  3940. +#define DEC_DONE1 (0x0002)
  3941. +#define DEC_DONE2 (0x0004)
  3942. +#define DEC_DONE3 (0x0008)
  3943. +#define DEC_DONE4 (0x0010)
  3944. +#define DEC_DONE5 (0x0020)
  3945. +#define DEC_DONE6 (0x0040)
  3946. +#define DEC_DONE7 (0x0080)
  3947. +
  3948. +/* ECC_DECIRQEN */
  3949. +#define DEC_IRQEN (0x0001)
  3950. +
  3951. +/* ECC_DECIRQSTA */
  3952. +#define DEC_IRQSTA (0x0001)
  3953. +
  3954. +#define CHIPVER_ECO_1 (0x8a00)
  3955. +#define CHIPVER_ECO_2 (0x8a01)
  3956. +
  3957. +//#define NAND_PFM
  3958. +
  3959. +/*******************************************************************************
  3960. + * Data Structure Definition
  3961. + *******************************************************************************/
  3962. +struct mtk_nand_host
  3963. +{
  3964. + struct nand_chip nand_chip;
  3965. + struct mtd_info mtd;
  3966. + struct mtk_nand_host_hw *hw;
  3967. +};
  3968. +
  3969. +struct NAND_CMD
  3970. +{
  3971. + u32 u4ColAddr;
  3972. + u32 u4RowAddr;
  3973. + u32 u4OOBRowAddr;
  3974. + u8 au1OOB[288];
  3975. + u8* pDataBuf;
  3976. +#ifdef NAND_PFM
  3977. + u32 pureReadOOB;
  3978. + u32 pureReadOOBNum;
  3979. +#endif
  3980. +};
  3981. +
  3982. +/*
  3983. + * ECC layout control structure. Exported to userspace for
  3984. + * diagnosis and to allow creation of raw images
  3985. +struct nand_ecclayout {
  3986. + uint32_t eccbytes;
  3987. + uint32_t eccpos[64];
  3988. + uint32_t oobavail;
  3989. + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
  3990. +};
  3991. +*/
  3992. +#define __DEBUG_NAND 1 /* Debug information on/off */
  3993. +
  3994. +/* Debug message event */
  3995. +#define DBG_EVT_NONE 0x00000000 /* No event */
  3996. +#define DBG_EVT_INIT 0x00000001 /* Initial related event */
  3997. +#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */
  3998. +#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */
  3999. +#define DBG_EVT_READ 0x00000008 /* Read related event */
  4000. +#define DBG_EVT_WRITE 0x00000010 /* Write related event */
  4001. +#define DBG_EVT_ERASE 0x00000020 /* Erase related event */
  4002. +#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */
  4003. +#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */
  4004. +
  4005. +#define DBG_EVT_ALL 0xffffffff
  4006. +
  4007. +#define DBG_EVT_MASK (DBG_EVT_INIT)
  4008. +
  4009. +#if __DEBUG_NAND
  4010. +#define MSG(evt, fmt, args...) \
  4011. +do { \
  4012. + if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \
  4013. + printk(fmt, ##args); \
  4014. + } \
  4015. +} while(0)
  4016. +
  4017. +#define MSG_FUNC_ENTRY(f) MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__)
  4018. +#else
  4019. +#define MSG(evt, fmt, args...) do{}while(0)
  4020. +#define MSG_FUNC_ENTRY(f) do{}while(0)
  4021. +#endif
  4022. +
  4023. +#define RAMDOM_READ 1<<0
  4024. +#define CACHE_READ 1<<1
  4025. +
  4026. +typedef struct
  4027. +{
  4028. + u16 id; //deviceid+menuid
  4029. + u32 ext_id;
  4030. + u8 addr_cycle;
  4031. + u8 iowidth;
  4032. + u16 totalsize;
  4033. + u16 blocksize;
  4034. + u16 pagesize;
  4035. + u16 sparesize;
  4036. + u32 timmingsetting;
  4037. + char devciename[14];
  4038. + u32 advancedmode; //
  4039. +}flashdev_info,*pflashdev_info;
  4040. +
  4041. +/* NAND driver */
  4042. +#if 0
  4043. +struct mtk_nand_host_hw {
  4044. + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
  4045. + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
  4046. + unsigned int nfi_cs_num; /* NFI_CS_NUM */
  4047. + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
  4048. + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
  4049. + unsigned int nand_ecc_size;
  4050. + unsigned int nand_ecc_bytes;
  4051. + unsigned int nand_ecc_mode;
  4052. +};
  4053. +extern struct mtk_nand_host_hw mt7621_nand_hw;
  4054. +extern u32 CFG_BLOCKSIZE;
  4055. +#endif
  4056. +#endif
  4057. --- a/drivers/mtd/nand/nand_base.c
  4058. +++ b/drivers/mtd/nand/nand_base.c
  4059. @@ -93,7 +93,7 @@ static struct nand_ecclayout nand_oob_12
  4060. .length = 78} }
  4061. };
  4062. -static int nand_get_device(struct mtd_info *mtd, int new_state);
  4063. +int nand_get_device(struct mtd_info *mtd, int new_state);
  4064. static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
  4065. struct mtd_oob_ops *ops);
  4066. @@ -131,7 +131,7 @@ static int check_offs_len(struct mtd_inf
  4067. *
  4068. * Release chip lock and wake up anyone waiting on the device.
  4069. */
  4070. -static void nand_release_device(struct mtd_info *mtd)
  4071. +void nand_release_device(struct mtd_info *mtd)
  4072. {
  4073. struct nand_chip *chip = mtd->priv;
  4074. @@ -803,7 +803,7 @@ static void panic_nand_get_device(struct
  4075. *
  4076. * Get the device and lock it for exclusive access
  4077. */
  4078. -static int
  4079. +int
  4080. nand_get_device(struct mtd_info *mtd, int new_state)
  4081. {
  4082. struct nand_chip *chip = mtd->priv;
  4083. --- a/drivers/mtd/nand/nand_bbt.c
  4084. +++ b/drivers/mtd/nand/nand_bbt.c
  4085. @@ -1372,4 +1372,23 @@ int nand_markbad_bbt(struct mtd_info *mt
  4086. return ret;
  4087. }
  4088. +void nand_bbt_set(struct mtd_info *mtd, int page, int flag)
  4089. +{
  4090. + struct nand_chip *this = mtd->priv;
  4091. + int block;
  4092. +
  4093. + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
  4094. + this->bbt[block >> 3] &= ~(0x03 << (block & 0x6));
  4095. + this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6);
  4096. +}
  4097. +
  4098. +int nand_bbt_get(struct mtd_info *mtd, int page)
  4099. +{
  4100. + struct nand_chip *this = mtd->priv;
  4101. + int block;
  4102. +
  4103. + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
  4104. + return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
  4105. +}
  4106. +
  4107. EXPORT_SYMBOL(nand_scan_bbt);
  4108. --- /dev/null
  4109. +++ b/drivers/mtd/nand/nand_def.h
  4110. @@ -0,0 +1,123 @@
  4111. +#ifndef __NAND_DEF_H__
  4112. +#define __NAND_DEF_H__
  4113. +
  4114. +#define VERSION "v2.1 Fix AHB virt2phys error"
  4115. +#define MODULE_NAME "# MTK NAND #"
  4116. +#define PROCNAME "driver/nand"
  4117. +
  4118. +#undef TESTTIME
  4119. +//#define __UBOOT_NAND__ 1
  4120. +#define __KERNEL_NAND__ 1
  4121. +//#define __PRELOADER_NAND__ 1
  4122. +//#define PMT 1
  4123. +//#define _MTK_NAND_DUMMY_DRIVER
  4124. +//#define CONFIG_BADBLOCK_CHECK 1
  4125. +//#ifdef CONFIG_BADBLOCK_CHECK
  4126. +//#define MTK_NAND_BMT 1
  4127. +//#endif
  4128. +#define ECC_ENABLE 1
  4129. +#define MANUAL_CORRECT 1
  4130. +//#define __INTERNAL_USE_AHB_MODE__ (0)
  4131. +#define SKIP_BAD_BLOCK
  4132. +#define FACT_BBT
  4133. +
  4134. +#ifndef NAND_OTP_SUPPORT
  4135. +#define NAND_OTP_SUPPORT 0
  4136. +#endif
  4137. +
  4138. +/*******************************************************************************
  4139. + * Macro definition
  4140. + *******************************************************************************/
  4141. +//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value)))
  4142. +//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value)))
  4143. +//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value))))
  4144. +//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value))))
  4145. +
  4146. +#if defined (__KERNEL_NAND__)
  4147. +#define NFI_SET_REG32(reg, value) \
  4148. +do { \
  4149. + g_value = (DRV_Reg32(reg) | (value));\
  4150. + DRV_WriteReg32(reg, g_value); \
  4151. +} while(0)
  4152. +
  4153. +#define NFI_SET_REG16(reg, value) \
  4154. +do { \
  4155. + g_value = (DRV_Reg16(reg) | (value));\
  4156. + DRV_WriteReg16(reg, g_value); \
  4157. +} while(0)
  4158. +
  4159. +#define NFI_CLN_REG32(reg, value) \
  4160. +do { \
  4161. + g_value = (DRV_Reg32(reg) & (~(value)));\
  4162. + DRV_WriteReg32(reg, g_value); \
  4163. +} while(0)
  4164. +
  4165. +#define NFI_CLN_REG16(reg, value) \
  4166. +do { \
  4167. + g_value = (DRV_Reg16(reg) & (~(value)));\
  4168. + DRV_WriteReg16(reg, g_value); \
  4169. +} while(0)
  4170. +#endif
  4171. +
  4172. +#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
  4173. +#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
  4174. +
  4175. +
  4176. +#define NAND_SECTOR_SIZE (512)
  4177. +#define OOB_PER_SECTOR (16)
  4178. +#define OOB_AVAI_PER_SECTOR (8)
  4179. +
  4180. +#ifndef PART_SIZE_BMTPOOL
  4181. +#define BMT_POOL_SIZE (80)
  4182. +#else
  4183. +#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
  4184. +#endif
  4185. +
  4186. +#define PMT_POOL_SIZE (2)
  4187. +
  4188. +#define TIMEOUT_1 0x1fff
  4189. +#define TIMEOUT_2 0x8ff
  4190. +#define TIMEOUT_3 0xffff
  4191. +#define TIMEOUT_4 0xffff//5000 //PIO
  4192. +
  4193. +
  4194. +/* temporarity definiation */
  4195. +#if !defined (__KERNEL_NAND__)
  4196. +#define KERN_INFO
  4197. +#define KERN_WARNING
  4198. +#define KERN_ERR
  4199. +#define PAGE_SIZE (4096)
  4200. +#endif
  4201. +#define AddStorageTrace //AddStorageTrace
  4202. +#define STORAGE_LOGGER_MSG_NAND 0
  4203. +#define NFI_BASE RALINK_NAND_CTRL_BASE
  4204. +#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE
  4205. +
  4206. +#ifdef __INTERNAL_USE_AHB_MODE__
  4207. +#define MT65xx_POLARITY_LOW 0
  4208. +#define MT65XX_PDN_PERI_NFI 0
  4209. +#define MT65xx_EDGE_SENSITIVE 0
  4210. +#define MT6575_NFI_IRQ_ID (58)
  4211. +#endif
  4212. +
  4213. +#if defined (__KERNEL_NAND__)
  4214. +#define RALINK_REG(x) (*((volatile u32 *)(x)))
  4215. +#define __virt_to_phys(x) virt_to_phys((volatile void*)x)
  4216. +#else
  4217. +#define CONFIG_MTD_NAND_VERIFY_WRITE (1)
  4218. +#define printk printf
  4219. +#define ra_dbg printf
  4220. +#define BUG() //BUG()
  4221. +#define BUG_ON(x) //BUG_ON()
  4222. +#define NUM_PARTITIONS 1
  4223. +#endif
  4224. +
  4225. +#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333)
  4226. +
  4227. +//uboot only support 1 cs
  4228. +#define NFI_CS_NUM (1)
  4229. +#define NFI_DEFAULT_CS (0)
  4230. +
  4231. +#include "mt6575_typedefs.h"
  4232. +
  4233. +#endif /* __NAND_DEF_H__ */
  4234. --- /dev/null
  4235. +++ b/drivers/mtd/nand/nand_device_list.h
  4236. @@ -0,0 +1,55 @@
  4237. +/* Copyright Statement:
  4238. + *
  4239. + * This software/firmware and related documentation ("MediaTek Software") are
  4240. + * protected under relevant copyright laws. The information contained herein
  4241. + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
  4242. + * Without the prior written permission of MediaTek inc. and/or its licensors,
  4243. + * any reproduction, modification, use or disclosure of MediaTek Software,
  4244. + * and information contained herein, in whole or in part, shall be strictly prohibited.
  4245. + */
  4246. +/* MediaTek Inc. (C) 2010. All rights reserved.
  4247. + *
  4248. + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
  4249. + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
  4250. + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
  4251. + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
  4252. + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
  4253. + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
  4254. + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
  4255. + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
  4256. + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
  4257. + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
  4258. + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
  4259. + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
  4260. + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
  4261. + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
  4262. + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
  4263. + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
  4264. + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
  4265. + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
  4266. + *
  4267. + * The following software/firmware and/or related documentation ("MediaTek Software")
  4268. + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
  4269. + * applicable license agreements with MediaTek Inc.
  4270. + */
  4271. +
  4272. +#ifndef __NAND_DEVICE_LIST_H__
  4273. +#define __NAND_DEVICE_LIST_H__
  4274. +
  4275. +static const flashdev_info gen_FlashTable[]={
  4276. + {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0},
  4277. + {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0},
  4278. + {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0},
  4279. + {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0},
  4280. + {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0},
  4281. + {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0},
  4282. + {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0},
  4283. + {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0},
  4284. + {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0},
  4285. + {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0},
  4286. + {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0},
  4287. + {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0},
  4288. +};
  4289. +
  4290. +
  4291. +#endif
  4292. --- /dev/null
  4293. +++ b/drivers/mtd/nand/partition.h
  4294. @@ -0,0 +1,115 @@
  4295. +/* Copyright Statement:
  4296. + *
  4297. + * This software/firmware and related documentation ("MediaTek Software") are
  4298. + * protected under relevant copyright laws. The information contained herein
  4299. + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
  4300. + * Without the prior written permission of MediaTek inc. and/or its licensors,
  4301. + * any reproduction, modification, use or disclosure of MediaTek Software,
  4302. + * and information contained herein, in whole or in part, shall be strictly prohibited.
  4303. + */
  4304. +/* MediaTek Inc. (C) 2010. All rights reserved.
  4305. + *
  4306. + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
  4307. + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
  4308. + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
  4309. + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
  4310. + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
  4311. + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
  4312. + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
  4313. + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
  4314. + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
  4315. + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
  4316. + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
  4317. + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
  4318. + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
  4319. + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
  4320. + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
  4321. + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
  4322. + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
  4323. + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
  4324. + *
  4325. + * The following software/firmware and/or related documentation ("MediaTek Software")
  4326. + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
  4327. + * applicable license agreements with MediaTek Inc.
  4328. + */
  4329. +
  4330. +#include <linux/mtd/mtd.h>
  4331. +#include <linux/mtd/nand.h>
  4332. +#include <linux/mtd/partitions.h>
  4333. +
  4334. +#define RECONFIG_PARTITION_SIZE 1
  4335. +
  4336. +#define MTD_BOOT_PART_SIZE 0x80000
  4337. +#define MTD_CONFIG_PART_SIZE 0x20000
  4338. +#define MTD_FACTORY_PART_SIZE 0x20000
  4339. +
  4340. +extern unsigned int CFG_BLOCKSIZE;
  4341. +#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2)
  4342. +#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2)
  4343. +#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1)
  4344. +
  4345. +/*=======================================================================*/
  4346. +/* NAND PARTITION Mapping */
  4347. +/*=======================================================================*/
  4348. +//#ifdef CONFIG_MTD_PARTITIONS
  4349. +static struct mtd_partition g_pasStatic_Partition[] = {
  4350. + {
  4351. + name: "ALL",
  4352. + size: MTDPART_SIZ_FULL,
  4353. + offset: 0,
  4354. + },
  4355. + /* Put your own partition definitions here */
  4356. + {
  4357. + name: "Bootloader",
  4358. + size: MTD_BOOT_PART_SIZE,
  4359. + offset: 0,
  4360. + }, {
  4361. + name: "Config",
  4362. + size: MTD_CONFIG_PART_SIZE,
  4363. + offset: MTDPART_OFS_APPEND
  4364. + }, {
  4365. + name: "Factory",
  4366. + size: MTD_FACTORY_PART_SIZE,
  4367. + offset: MTDPART_OFS_APPEND
  4368. +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
  4369. + }, {
  4370. + name: "Kernel",
  4371. + size: MTD_KERN_PART_SIZE,
  4372. + offset: MTDPART_OFS_APPEND,
  4373. + }, {
  4374. + name: "RootFS",
  4375. + size: MTD_ROOTFS_PART_SIZE,
  4376. + offset: MTDPART_OFS_APPEND,
  4377. +#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING
  4378. + }, {
  4379. + name: "Kernel_RootFS",
  4380. + size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE,
  4381. + offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE,
  4382. +#endif
  4383. +#else //CONFIG_RT2880_ROOTFS_IN_RAM
  4384. + }, {
  4385. + name: "Kernel",
  4386. + size: 0x10000,
  4387. + offset: MTDPART_OFS_APPEND,
  4388. +#endif
  4389. +#ifdef CONFIG_DUAL_IMAGE
  4390. + }, {
  4391. + name: "Kernel2",
  4392. + size: MTD_KERN2_PART_SIZE,
  4393. + offset: MTD_KERN2_PART_OFFSET,
  4394. +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
  4395. + }, {
  4396. + name: "RootFS2",
  4397. + size: MTD_ROOTFS2_PART_SIZE,
  4398. + offset: MTD_ROOTFS2_PART_OFFSET,
  4399. +#endif
  4400. +#endif
  4401. + }
  4402. +
  4403. +};
  4404. +
  4405. +#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition)
  4406. +extern int part_num; // = NUM_PARTITIONS;
  4407. +//#endif
  4408. +#undef RECONFIG_PARTITION_SIZE
  4409. +