001-fix_make_headers_install.patch 100 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933
  1. From 5eac4d66049ab7d14a2b7311610c8cb85a2c1bf1 Mon Sep 17 00:00:00 2001
  2. From: Nicolas Thill <nico@openwrt.org>
  3. Date: Fri, 20 Mar 2015 00:31:06 +0100
  4. Subject: [PATCH] UM: fix make headers_install after UAPI header installation
  5. Signed-off-by: Nicolas Thill <nico@openwrt.org>
  6. ---
  7. From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
  8. From: Florian Fainelli <florian@openwrt.org>
  9. Date: Sun, 17 Mar 2013 20:12:10 +0100
  10. Subject: [PATCH] UM: fix make headers_install after UAPI header installation
  11. Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
  12. header installation and checking) breaks UML make headers_install with
  13. the following:
  14. $ ARCH=um make headers_install
  15. CHK include/generated/uapi/linux/version.h
  16. UPD include/generated/uapi/linux/version.h
  17. HOSTCC scripts/basic/fixdep
  18. WRAP arch/um/include/generated/asm/bug.h
  19. [snip]
  20. WRAP arch/um/include/generated/asm/trace_clock.h
  21. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
  22. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
  23. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
  24. SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
  25. HOSTCC scripts/unifdef
  26. Makefile:912: *** Headers not exportable for the um architecture. Stop.
  27. zsh: exit 2 ARCH=um make headers_install
  28. The reason for that is because the top-level Makefile does the
  29. following:
  30. $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
  31. $(error Headers not exportable for the $(SRCARCH) architecture))
  32. we end-up in the else part of the $(if) statement because UML still uses
  33. the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
  34. by moving the header files to be in arch/um/include/uapi/asm/ thus
  35. making headers_install (and other make targets checking for uapi) to
  36. succeed.
  37. Signed-off-by: Florian Fainelli <florian@openwrt.org>
  38. ---
  39. Richard, this has been broken for 3.7+ onwards, if you want me to send
  40. you separate patches for 3.7 and 3.8 let me know. Thanks!
  41. --- a/arch/um/include/asm/Kbuild
  42. +++ /dev/null
  43. @@ -1,31 +0,0 @@
  44. -generic-y += barrier.h
  45. -generic-y += bug.h
  46. -generic-y += clkdev.h
  47. -generic-y += cputime.h
  48. -generic-y += current.h
  49. -generic-y += delay.h
  50. -generic-y += device.h
  51. -generic-y += emergency-restart.h
  52. -generic-y += exec.h
  53. -generic-y += ftrace.h
  54. -generic-y += futex.h
  55. -generic-y += hardirq.h
  56. -generic-y += hash.h
  57. -generic-y += hw_irq.h
  58. -generic-y += io.h
  59. -generic-y += irq_regs.h
  60. -generic-y += irq_work.h
  61. -generic-y += kdebug.h
  62. -generic-y += mcs_spinlock.h
  63. -generic-y += mutex.h
  64. -generic-y += param.h
  65. -generic-y += pci.h
  66. -generic-y += percpu.h
  67. -generic-y += preempt.h
  68. -generic-y += scatterlist.h
  69. -generic-y += sections.h
  70. -generic-y += switch_to.h
  71. -generic-y += topology.h
  72. -generic-y += trace_clock.h
  73. -generic-y += word-at-a-time.h
  74. -generic-y += xor.h
  75. --- a/arch/um/include/asm/a.out-core.h
  76. +++ /dev/null
  77. @@ -1,27 +0,0 @@
  78. -/* a.out coredump register dumper
  79. - *
  80. - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  81. - * Written by David Howells (dhowells@redhat.com)
  82. - *
  83. - * This program is free software; you can redistribute it and/or
  84. - * modify it under the terms of the GNU General Public Licence
  85. - * as published by the Free Software Foundation; either version
  86. - * 2 of the Licence, or (at your option) any later version.
  87. - */
  88. -
  89. -#ifndef __UM_A_OUT_CORE_H
  90. -#define __UM_A_OUT_CORE_H
  91. -
  92. -#ifdef __KERNEL__
  93. -
  94. -#include <linux/user.h>
  95. -
  96. -/*
  97. - * fill in the user structure for an a.out core dump
  98. - */
  99. -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
  100. -{
  101. -}
  102. -
  103. -#endif /* __KERNEL__ */
  104. -#endif /* __UM_A_OUT_CORE_H */
  105. --- a/arch/um/include/asm/bugs.h
  106. +++ /dev/null
  107. @@ -1,6 +0,0 @@
  108. -#ifndef __UM_BUGS_H
  109. -#define __UM_BUGS_H
  110. -
  111. -void check_bugs(void);
  112. -
  113. -#endif
  114. --- a/arch/um/include/asm/cache.h
  115. +++ /dev/null
  116. @@ -1,17 +0,0 @@
  117. -#ifndef __UM_CACHE_H
  118. -#define __UM_CACHE_H
  119. -
  120. -
  121. -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
  122. -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
  123. -#elif defined(CONFIG_UML_X86) /* 64-bit */
  124. -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
  125. -#else
  126. -/* XXX: this was taken from x86, now it's completely random. Luckily only
  127. - * affects SMP padding. */
  128. -# define L1_CACHE_SHIFT 5
  129. -#endif
  130. -
  131. -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  132. -
  133. -#endif
  134. --- a/arch/um/include/asm/common.lds.S
  135. +++ /dev/null
  136. @@ -1,107 +0,0 @@
  137. -#include <asm-generic/vmlinux.lds.h>
  138. -
  139. - .fini : { *(.fini) } =0x9090
  140. - _etext = .;
  141. - PROVIDE (etext = .);
  142. -
  143. - . = ALIGN(4096);
  144. - _sdata = .;
  145. - PROVIDE (sdata = .);
  146. -
  147. - RODATA
  148. -
  149. - .unprotected : { *(.unprotected) }
  150. - . = ALIGN(4096);
  151. - PROVIDE (_unprotected_end = .);
  152. -
  153. - . = ALIGN(4096);
  154. - .note : { *(.note.*) }
  155. - EXCEPTION_TABLE(0)
  156. -
  157. - BUG_TABLE
  158. -
  159. - .uml.setup.init : {
  160. - __uml_setup_start = .;
  161. - *(.uml.setup.init)
  162. - __uml_setup_end = .;
  163. - }
  164. -
  165. - .uml.help.init : {
  166. - __uml_help_start = .;
  167. - *(.uml.help.init)
  168. - __uml_help_end = .;
  169. - }
  170. -
  171. - .uml.postsetup.init : {
  172. - __uml_postsetup_start = .;
  173. - *(.uml.postsetup.init)
  174. - __uml_postsetup_end = .;
  175. - }
  176. -
  177. - .init.setup : {
  178. - INIT_SETUP(0)
  179. - }
  180. -
  181. - PERCPU_SECTION(32)
  182. -
  183. - .initcall.init : {
  184. - INIT_CALLS
  185. - }
  186. -
  187. - .con_initcall.init : {
  188. - CON_INITCALL
  189. - }
  190. -
  191. - .uml.initcall.init : {
  192. - __uml_initcall_start = .;
  193. - *(.uml.initcall.init)
  194. - __uml_initcall_end = .;
  195. - }
  196. -
  197. - SECURITY_INIT
  198. -
  199. - .exitcall : {
  200. - __exitcall_begin = .;
  201. - *(.exitcall.exit)
  202. - __exitcall_end = .;
  203. - }
  204. -
  205. - .uml.exitcall : {
  206. - __uml_exitcall_begin = .;
  207. - *(.uml.exitcall.exit)
  208. - __uml_exitcall_end = .;
  209. - }
  210. -
  211. - . = ALIGN(4);
  212. - .altinstructions : {
  213. - __alt_instructions = .;
  214. - *(.altinstructions)
  215. - __alt_instructions_end = .;
  216. - }
  217. - .altinstr_replacement : { *(.altinstr_replacement) }
  218. - /* .exit.text is discard at runtime, not link time, to deal with references
  219. - from .altinstructions and .eh_frame */
  220. - .exit.text : { *(.exit.text) }
  221. - .exit.data : { *(.exit.data) }
  222. -
  223. - .preinit_array : {
  224. - __preinit_array_start = .;
  225. - *(.preinit_array)
  226. - __preinit_array_end = .;
  227. - }
  228. - .init_array : {
  229. - __init_array_start = .;
  230. - *(.init_array)
  231. - __init_array_end = .;
  232. - }
  233. - .fini_array : {
  234. - __fini_array_start = .;
  235. - *(.fini_array)
  236. - __fini_array_end = .;
  237. - }
  238. -
  239. - . = ALIGN(4096);
  240. - .init.ramfs : {
  241. - INIT_RAM_FS
  242. - }
  243. -
  244. --- a/arch/um/include/asm/dma.h
  245. +++ /dev/null
  246. @@ -1,10 +0,0 @@
  247. -#ifndef __UM_DMA_H
  248. -#define __UM_DMA_H
  249. -
  250. -#include <asm/io.h>
  251. -
  252. -extern unsigned long uml_physmem;
  253. -
  254. -#define MAX_DMA_ADDRESS (uml_physmem)
  255. -
  256. -#endif
  257. --- a/arch/um/include/asm/fixmap.h
  258. +++ /dev/null
  259. @@ -1,60 +0,0 @@
  260. -#ifndef __UM_FIXMAP_H
  261. -#define __UM_FIXMAP_H
  262. -
  263. -#include <asm/processor.h>
  264. -#include <asm/kmap_types.h>
  265. -#include <asm/archparam.h>
  266. -#include <asm/page.h>
  267. -#include <linux/threads.h>
  268. -
  269. -/*
  270. - * Here we define all the compile-time 'special' virtual
  271. - * addresses. The point is to have a constant address at
  272. - * compile time, but to set the physical address only
  273. - * in the boot process. We allocate these special addresses
  274. - * from the end of virtual memory (0xfffff000) backwards.
  275. - * Also this lets us do fail-safe vmalloc(), we
  276. - * can guarantee that these special addresses and
  277. - * vmalloc()-ed addresses never overlap.
  278. - *
  279. - * these 'compile-time allocated' memory buffers are
  280. - * fixed-size 4k pages. (or larger if used with an increment
  281. - * highger than 1) use fixmap_set(idx,phys) to associate
  282. - * physical memory with fixmap indices.
  283. - *
  284. - * TLB entries of such buffers will not be flushed across
  285. - * task switches.
  286. - */
  287. -
  288. -/*
  289. - * on UP currently we will have no trace of the fixmap mechanizm,
  290. - * no page table allocations, etc. This might change in the
  291. - * future, say framebuffers for the console driver(s) could be
  292. - * fix-mapped?
  293. - */
  294. -enum fixed_addresses {
  295. -#ifdef CONFIG_HIGHMEM
  296. - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  297. - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
  298. -#endif
  299. - __end_of_fixed_addresses
  300. -};
  301. -
  302. -extern void __set_fixmap (enum fixed_addresses idx,
  303. - unsigned long phys, pgprot_t flags);
  304. -
  305. -/*
  306. - * used by vmalloc.c.
  307. - *
  308. - * Leave one empty page between vmalloc'ed areas and
  309. - * the start of the fixmap, and leave one page empty
  310. - * at the top of mem..
  311. - */
  312. -
  313. -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  314. -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
  315. -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  316. -
  317. -#include <asm-generic/fixmap.h>
  318. -
  319. -#endif
  320. --- a/arch/um/include/asm/irq.h
  321. +++ /dev/null
  322. @@ -1,23 +0,0 @@
  323. -#ifndef __UM_IRQ_H
  324. -#define __UM_IRQ_H
  325. -
  326. -#define TIMER_IRQ 0
  327. -#define UMN_IRQ 1
  328. -#define CONSOLE_IRQ 2
  329. -#define CONSOLE_WRITE_IRQ 3
  330. -#define UBD_IRQ 4
  331. -#define UM_ETH_IRQ 5
  332. -#define SSL_IRQ 6
  333. -#define SSL_WRITE_IRQ 7
  334. -#define ACCEPT_IRQ 8
  335. -#define MCONSOLE_IRQ 9
  336. -#define WINCH_IRQ 10
  337. -#define SIGIO_WRITE_IRQ 11
  338. -#define TELNETD_IRQ 12
  339. -#define XTERM_IRQ 13
  340. -#define RANDOM_IRQ 14
  341. -
  342. -#define LAST_IRQ RANDOM_IRQ
  343. -#define NR_IRQS (LAST_IRQ + 1)
  344. -
  345. -#endif
  346. --- a/arch/um/include/asm/irqflags.h
  347. +++ /dev/null
  348. @@ -1,42 +0,0 @@
  349. -#ifndef __UM_IRQFLAGS_H
  350. -#define __UM_IRQFLAGS_H
  351. -
  352. -extern int get_signals(void);
  353. -extern int set_signals(int enable);
  354. -extern void block_signals(void);
  355. -extern void unblock_signals(void);
  356. -
  357. -static inline unsigned long arch_local_save_flags(void)
  358. -{
  359. - return get_signals();
  360. -}
  361. -
  362. -static inline void arch_local_irq_restore(unsigned long flags)
  363. -{
  364. - set_signals(flags);
  365. -}
  366. -
  367. -static inline void arch_local_irq_enable(void)
  368. -{
  369. - unblock_signals();
  370. -}
  371. -
  372. -static inline void arch_local_irq_disable(void)
  373. -{
  374. - block_signals();
  375. -}
  376. -
  377. -static inline unsigned long arch_local_irq_save(void)
  378. -{
  379. - unsigned long flags;
  380. - flags = arch_local_save_flags();
  381. - arch_local_irq_disable();
  382. - return flags;
  383. -}
  384. -
  385. -static inline bool arch_irqs_disabled(void)
  386. -{
  387. - return arch_local_save_flags() == 0;
  388. -}
  389. -
  390. -#endif
  391. --- a/arch/um/include/asm/kmap_types.h
  392. +++ /dev/null
  393. @@ -1,13 +0,0 @@
  394. -/*
  395. - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  396. - * Licensed under the GPL
  397. - */
  398. -
  399. -#ifndef __UM_KMAP_TYPES_H
  400. -#define __UM_KMAP_TYPES_H
  401. -
  402. -/* No more #include "asm/arch/kmap_types.h" ! */
  403. -
  404. -#define KM_TYPE_NR 14
  405. -
  406. -#endif
  407. --- a/arch/um/include/asm/kvm_para.h
  408. +++ /dev/null
  409. @@ -1 +0,0 @@
  410. -#include <asm-generic/kvm_para.h>
  411. --- a/arch/um/include/asm/mmu.h
  412. +++ /dev/null
  413. @@ -1,24 +0,0 @@
  414. -/*
  415. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  416. - * Licensed under the GPL
  417. - */
  418. -
  419. -#ifndef __ARCH_UM_MMU_H
  420. -#define __ARCH_UM_MMU_H
  421. -
  422. -#include <mm_id.h>
  423. -#include <asm/mm_context.h>
  424. -
  425. -typedef struct mm_context {
  426. - struct mm_id id;
  427. - struct uml_arch_mm_context arch;
  428. - struct page *stub_pages[2];
  429. -} mm_context_t;
  430. -
  431. -extern void __switch_mm(struct mm_id * mm_idp);
  432. -
  433. -/* Avoid tangled inclusion with asm/ldt.h */
  434. -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
  435. -extern void free_ldt(struct mm_context *mm);
  436. -
  437. -#endif
  438. --- a/arch/um/include/asm/mmu_context.h
  439. +++ /dev/null
  440. @@ -1,58 +0,0 @@
  441. -/*
  442. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  443. - * Licensed under the GPL
  444. - */
  445. -
  446. -#ifndef __UM_MMU_CONTEXT_H
  447. -#define __UM_MMU_CONTEXT_H
  448. -
  449. -#include <linux/sched.h>
  450. -#include <asm/mmu.h>
  451. -
  452. -extern void uml_setup_stubs(struct mm_struct *mm);
  453. -extern void arch_exit_mmap(struct mm_struct *mm);
  454. -
  455. -#define deactivate_mm(tsk,mm) do { } while (0)
  456. -
  457. -extern void force_flush_all(void);
  458. -
  459. -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  460. -{
  461. - /*
  462. - * This is called by fs/exec.c and sys_unshare()
  463. - * when the new ->mm is used for the first time.
  464. - */
  465. - __switch_mm(&new->context.id);
  466. - down_write(&new->mmap_sem);
  467. - uml_setup_stubs(new);
  468. - up_write(&new->mmap_sem);
  469. -}
  470. -
  471. -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  472. - struct task_struct *tsk)
  473. -{
  474. - unsigned cpu = smp_processor_id();
  475. -
  476. - if(prev != next){
  477. - cpumask_clear_cpu(cpu, mm_cpumask(prev));
  478. - cpumask_set_cpu(cpu, mm_cpumask(next));
  479. - if(next != &init_mm)
  480. - __switch_mm(&next->context.id);
  481. - }
  482. -}
  483. -
  484. -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
  485. -{
  486. - uml_setup_stubs(mm);
  487. -}
  488. -
  489. -static inline void enter_lazy_tlb(struct mm_struct *mm,
  490. - struct task_struct *tsk)
  491. -{
  492. -}
  493. -
  494. -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  495. -
  496. -extern void destroy_context(struct mm_struct *mm);
  497. -
  498. -#endif
  499. --- a/arch/um/include/asm/page.h
  500. +++ /dev/null
  501. @@ -1,127 +0,0 @@
  502. -/*
  503. - * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
  504. - * Copyright 2003 PathScale, Inc.
  505. - * Licensed under the GPL
  506. - */
  507. -
  508. -#ifndef __UM_PAGE_H
  509. -#define __UM_PAGE_H
  510. -
  511. -#include <linux/const.h>
  512. -
  513. -/* PAGE_SHIFT determines the page size */
  514. -#define PAGE_SHIFT 12
  515. -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
  516. -#define PAGE_MASK (~(PAGE_SIZE-1))
  517. -
  518. -#ifndef __ASSEMBLY__
  519. -
  520. -struct page;
  521. -
  522. -#include <linux/types.h>
  523. -#include <asm/vm-flags.h>
  524. -
  525. -/*
  526. - * These are used to make use of C type-checking..
  527. - */
  528. -
  529. -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  530. -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
  531. -
  532. -#define clear_user_page(page, vaddr, pg) clear_page(page)
  533. -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  534. -
  535. -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
  536. -
  537. -typedef struct { unsigned long pte_low, pte_high; } pte_t;
  538. -typedef struct { unsigned long pmd; } pmd_t;
  539. -typedef struct { unsigned long pgd; } pgd_t;
  540. -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
  541. -
  542. -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
  543. -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
  544. -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
  545. -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
  546. - smp_wmb(); \
  547. - (to).pte_low = (from).pte_low; })
  548. -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
  549. -#define pte_set_val(pte, phys, prot) \
  550. - ({ (pte).pte_high = (phys) >> 32; \
  551. - (pte).pte_low = (phys) | pgprot_val(prot); })
  552. -
  553. -#define pmd_val(x) ((x).pmd)
  554. -#define __pmd(x) ((pmd_t) { (x) } )
  555. -
  556. -typedef unsigned long long pfn_t;
  557. -typedef unsigned long long phys_t;
  558. -
  559. -#else
  560. -
  561. -typedef struct { unsigned long pte; } pte_t;
  562. -typedef struct { unsigned long pgd; } pgd_t;
  563. -
  564. -#ifdef CONFIG_3_LEVEL_PGTABLES
  565. -typedef struct { unsigned long pmd; } pmd_t;
  566. -#define pmd_val(x) ((x).pmd)
  567. -#define __pmd(x) ((pmd_t) { (x) } )
  568. -#endif
  569. -
  570. -#define pte_val(x) ((x).pte)
  571. -
  572. -
  573. -#define pte_get_bits(p, bits) ((p).pte & (bits))
  574. -#define pte_set_bits(p, bits) ((p).pte |= (bits))
  575. -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
  576. -#define pte_copy(to, from) ((to).pte = (from).pte)
  577. -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
  578. -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
  579. -
  580. -typedef unsigned long pfn_t;
  581. -typedef unsigned long phys_t;
  582. -
  583. -#endif
  584. -
  585. -typedef struct { unsigned long pgprot; } pgprot_t;
  586. -
  587. -typedef struct page *pgtable_t;
  588. -
  589. -#define pgd_val(x) ((x).pgd)
  590. -#define pgprot_val(x) ((x).pgprot)
  591. -
  592. -#define __pte(x) ((pte_t) { (x) } )
  593. -#define __pgd(x) ((pgd_t) { (x) } )
  594. -#define __pgprot(x) ((pgprot_t) { (x) } )
  595. -
  596. -extern unsigned long uml_physmem;
  597. -
  598. -#define PAGE_OFFSET (uml_physmem)
  599. -#define KERNELBASE PAGE_OFFSET
  600. -
  601. -#define __va_space (8*1024*1024)
  602. -
  603. -#include <mem.h>
  604. -
  605. -/* Cast to unsigned long before casting to void * to avoid a warning from
  606. - * mmap_kmem about cutting a long long down to a void *. Not sure that
  607. - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
  608. - * addresses
  609. - */
  610. -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
  611. -#define __va(phys) to_virt((unsigned long) (phys))
  612. -
  613. -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
  614. -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
  615. -
  616. -#define pfn_valid(pfn) ((pfn) < max_mapnr)
  617. -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
  618. -
  619. -#include <asm-generic/memory_model.h>
  620. -#include <asm-generic/getorder.h>
  621. -
  622. -#endif /* __ASSEMBLY__ */
  623. -
  624. -#ifdef CONFIG_X86_32
  625. -#define __HAVE_ARCH_GATE_AREA 1
  626. -#endif
  627. -
  628. -#endif /* __UM_PAGE_H */
  629. --- a/arch/um/include/asm/pgalloc.h
  630. +++ /dev/null
  631. @@ -1,61 +0,0 @@
  632. -/*
  633. - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  634. - * Copyright 2003 PathScale, Inc.
  635. - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
  636. - * Licensed under the GPL
  637. - */
  638. -
  639. -#ifndef __UM_PGALLOC_H
  640. -#define __UM_PGALLOC_H
  641. -
  642. -#include <linux/mm.h>
  643. -
  644. -#define pmd_populate_kernel(mm, pmd, pte) \
  645. - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
  646. -
  647. -#define pmd_populate(mm, pmd, pte) \
  648. - set_pmd(pmd, __pmd(_PAGE_TABLE + \
  649. - ((unsigned long long)page_to_pfn(pte) << \
  650. - (unsigned long long) PAGE_SHIFT)))
  651. -#define pmd_pgtable(pmd) pmd_page(pmd)
  652. -
  653. -/*
  654. - * Allocate and free page tables.
  655. - */
  656. -extern pgd_t *pgd_alloc(struct mm_struct *);
  657. -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  658. -
  659. -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  660. -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  661. -
  662. -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  663. -{
  664. - free_page((unsigned long) pte);
  665. -}
  666. -
  667. -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  668. -{
  669. - pgtable_page_dtor(pte);
  670. - __free_page(pte);
  671. -}
  672. -
  673. -#define __pte_free_tlb(tlb,pte, address) \
  674. -do { \
  675. - pgtable_page_dtor(pte); \
  676. - tlb_remove_page((tlb),(pte)); \
  677. -} while (0)
  678. -
  679. -#ifdef CONFIG_3_LEVEL_PGTABLES
  680. -
  681. -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  682. -{
  683. - free_page((unsigned long)pmd);
  684. -}
  685. -
  686. -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
  687. -#endif
  688. -
  689. -#define check_pgt_cache() do { } while (0)
  690. -
  691. -#endif
  692. -
  693. --- a/arch/um/include/asm/pgtable-2level.h
  694. +++ /dev/null
  695. @@ -1,53 +0,0 @@
  696. -/*
  697. - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  698. - * Copyright 2003 PathScale, Inc.
  699. - * Derived from include/asm-i386/pgtable.h
  700. - * Licensed under the GPL
  701. - */
  702. -
  703. -#ifndef __UM_PGTABLE_2LEVEL_H
  704. -#define __UM_PGTABLE_2LEVEL_H
  705. -
  706. -#include <asm-generic/pgtable-nopmd.h>
  707. -
  708. -/* PGDIR_SHIFT determines what a third-level page table entry can map */
  709. -
  710. -#define PGDIR_SHIFT 22
  711. -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  712. -#define PGDIR_MASK (~(PGDIR_SIZE-1))
  713. -
  714. -/*
  715. - * entries per page directory level: the i386 is two-level, so
  716. - * we don't really have any PMD directory physically.
  717. - */
  718. -#define PTRS_PER_PTE 1024
  719. -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  720. -#define PTRS_PER_PGD 1024
  721. -#define FIRST_USER_ADDRESS 0
  722. -
  723. -#define pte_ERROR(e) \
  724. - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  725. - pte_val(e))
  726. -#define pgd_ERROR(e) \
  727. - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  728. - pgd_val(e))
  729. -
  730. -static inline int pgd_newpage(pgd_t pgd) { return 0; }
  731. -static inline void pgd_mkuptodate(pgd_t pgd) { }
  732. -
  733. -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  734. -
  735. -#define pte_pfn(x) phys_to_pfn(pte_val(x))
  736. -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
  737. -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
  738. -
  739. -/*
  740. - * Bits 0 through 4 are taken
  741. - */
  742. -#define PTE_FILE_MAX_BITS 27
  743. -
  744. -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
  745. -
  746. -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
  747. -
  748. -#endif
  749. --- a/arch/um/include/asm/pgtable-3level.h
  750. +++ /dev/null
  751. @@ -1,136 +0,0 @@
  752. -/*
  753. - * Copyright 2003 PathScale Inc
  754. - * Derived from include/asm-i386/pgtable.h
  755. - * Licensed under the GPL
  756. - */
  757. -
  758. -#ifndef __UM_PGTABLE_3LEVEL_H
  759. -#define __UM_PGTABLE_3LEVEL_H
  760. -
  761. -#include <asm-generic/pgtable-nopud.h>
  762. -
  763. -/* PGDIR_SHIFT determines what a third-level page table entry can map */
  764. -
  765. -#ifdef CONFIG_64BIT
  766. -#define PGDIR_SHIFT 30
  767. -#else
  768. -#define PGDIR_SHIFT 31
  769. -#endif
  770. -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  771. -#define PGDIR_MASK (~(PGDIR_SIZE-1))
  772. -
  773. -/* PMD_SHIFT determines the size of the area a second-level page table can
  774. - * map
  775. - */
  776. -
  777. -#define PMD_SHIFT 21
  778. -#define PMD_SIZE (1UL << PMD_SHIFT)
  779. -#define PMD_MASK (~(PMD_SIZE-1))
  780. -
  781. -/*
  782. - * entries per page directory level
  783. - */
  784. -
  785. -#define PTRS_PER_PTE 512
  786. -#ifdef CONFIG_64BIT
  787. -#define PTRS_PER_PMD 512
  788. -#define PTRS_PER_PGD 512
  789. -#else
  790. -#define PTRS_PER_PMD 1024
  791. -#define PTRS_PER_PGD 1024
  792. -#endif
  793. -
  794. -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  795. -#define FIRST_USER_ADDRESS 0
  796. -
  797. -#define pte_ERROR(e) \
  798. - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  799. - pte_val(e))
  800. -#define pmd_ERROR(e) \
  801. - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  802. - pmd_val(e))
  803. -#define pgd_ERROR(e) \
  804. - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  805. - pgd_val(e))
  806. -
  807. -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
  808. -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  809. -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
  810. -#define pud_populate(mm, pud, pmd) \
  811. - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
  812. -
  813. -#ifdef CONFIG_64BIT
  814. -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
  815. -#else
  816. -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
  817. -#endif
  818. -
  819. -static inline int pgd_newpage(pgd_t pgd)
  820. -{
  821. - return(pgd_val(pgd) & _PAGE_NEWPAGE);
  822. -}
  823. -
  824. -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
  825. -
  826. -#ifdef CONFIG_64BIT
  827. -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
  828. -#else
  829. -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  830. -#endif
  831. -
  832. -struct mm_struct;
  833. -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
  834. -
  835. -static inline void pud_clear (pud_t *pud)
  836. -{
  837. - set_pud(pud, __pud(_PAGE_NEWPAGE));
  838. -}
  839. -
  840. -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
  841. -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  842. -
  843. -/* Find an entry in the second-level page table.. */
  844. -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
  845. - pmd_index(address))
  846. -
  847. -static inline unsigned long pte_pfn(pte_t pte)
  848. -{
  849. - return phys_to_pfn(pte_val(pte));
  850. -}
  851. -
  852. -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
  853. -{
  854. - pte_t pte;
  855. - phys_t phys = pfn_to_phys(page_nr);
  856. -
  857. - pte_set_val(pte, phys, pgprot);
  858. - return pte;
  859. -}
  860. -
  861. -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
  862. -{
  863. - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
  864. -}
  865. -
  866. -/*
  867. - * Bits 0 through 3 are taken in the low part of the pte,
  868. - * put the 32 bits of offset into the high part.
  869. - */
  870. -#define PTE_FILE_MAX_BITS 32
  871. -
  872. -#ifdef CONFIG_64BIT
  873. -
  874. -#define pte_to_pgoff(p) ((p).pte >> 32)
  875. -
  876. -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
  877. -
  878. -#else
  879. -
  880. -#define pte_to_pgoff(pte) ((pte).pte_high)
  881. -
  882. -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
  883. -
  884. -#endif
  885. -
  886. -#endif
  887. -
  888. --- a/arch/um/include/asm/pgtable.h
  889. +++ /dev/null
  890. @@ -1,375 +0,0 @@
  891. -/*
  892. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  893. - * Copyright 2003 PathScale, Inc.
  894. - * Derived from include/asm-i386/pgtable.h
  895. - * Licensed under the GPL
  896. - */
  897. -
  898. -#ifndef __UM_PGTABLE_H
  899. -#define __UM_PGTABLE_H
  900. -
  901. -#include <asm/fixmap.h>
  902. -
  903. -#define _PAGE_PRESENT 0x001
  904. -#define _PAGE_NEWPAGE 0x002
  905. -#define _PAGE_NEWPROT 0x004
  906. -#define _PAGE_RW 0x020
  907. -#define _PAGE_USER 0x040
  908. -#define _PAGE_ACCESSED 0x080
  909. -#define _PAGE_DIRTY 0x100
  910. -/* If _PAGE_PRESENT is clear, we use these: */
  911. -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
  912. -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
  913. - pte_present gives true */
  914. -
  915. -#ifdef CONFIG_3_LEVEL_PGTABLES
  916. -#include <asm/pgtable-3level.h>
  917. -#else
  918. -#include <asm/pgtable-2level.h>
  919. -#endif
  920. -
  921. -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  922. -
  923. -/* zero page used for uninitialized stuff */
  924. -extern unsigned long *empty_zero_page;
  925. -
  926. -#define pgtable_cache_init() do ; while (0)
  927. -
  928. -/* Just any arbitrary offset to the start of the vmalloc VM area: the
  929. - * current 8MB value just means that there will be a 8MB "hole" after the
  930. - * physical memory until the kernel virtual memory starts. That means that
  931. - * any out-of-bounds memory accesses will hopefully be caught.
  932. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  933. - * area for the same reason. ;)
  934. - */
  935. -
  936. -extern unsigned long end_iomem;
  937. -
  938. -#define VMALLOC_OFFSET (__va_space)
  939. -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  940. -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
  941. -#ifdef CONFIG_HIGHMEM
  942. -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  943. -#else
  944. -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  945. -#endif
  946. -#define MODULES_VADDR VMALLOC_START
  947. -#define MODULES_END VMALLOC_END
  948. -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
  949. -
  950. -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  951. -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  952. -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  953. -#define __PAGE_KERNEL_EXEC \
  954. - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  955. -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  956. -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  957. -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  958. -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  959. -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  960. -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  961. -
  962. -/*
  963. - * The i386 can't do page protection for execute, and considers that the same
  964. - * are read.
  965. - * Also, write permissions imply read permissions. This is the closest we can
  966. - * get..
  967. - */
  968. -#define __P000 PAGE_NONE
  969. -#define __P001 PAGE_READONLY
  970. -#define __P010 PAGE_COPY
  971. -#define __P011 PAGE_COPY
  972. -#define __P100 PAGE_READONLY
  973. -#define __P101 PAGE_READONLY
  974. -#define __P110 PAGE_COPY
  975. -#define __P111 PAGE_COPY
  976. -
  977. -#define __S000 PAGE_NONE
  978. -#define __S001 PAGE_READONLY
  979. -#define __S010 PAGE_SHARED
  980. -#define __S011 PAGE_SHARED
  981. -#define __S100 PAGE_READONLY
  982. -#define __S101 PAGE_READONLY
  983. -#define __S110 PAGE_SHARED
  984. -#define __S111 PAGE_SHARED
  985. -
  986. -/*
  987. - * ZERO_PAGE is a global shared page that is always zero: used
  988. - * for zero-mapped memory areas etc..
  989. - */
  990. -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  991. -
  992. -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  993. -
  994. -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
  995. -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  996. -
  997. -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  998. -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
  999. -
  1000. -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
  1001. -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
  1002. -
  1003. -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
  1004. -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
  1005. -
  1006. -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
  1007. -
  1008. -#define pte_page(x) pfn_to_page(pte_pfn(x))
  1009. -
  1010. -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
  1011. -
  1012. -/*
  1013. - * =================================
  1014. - * Flags checking section.
  1015. - * =================================
  1016. - */
  1017. -
  1018. -static inline int pte_none(pte_t pte)
  1019. -{
  1020. - return pte_is_zero(pte);
  1021. -}
  1022. -
  1023. -/*
  1024. - * The following only work if pte_present() is true.
  1025. - * Undefined behaviour if not..
  1026. - */
  1027. -static inline int pte_read(pte_t pte)
  1028. -{
  1029. - return((pte_get_bits(pte, _PAGE_USER)) &&
  1030. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1031. -}
  1032. -
  1033. -static inline int pte_exec(pte_t pte){
  1034. - return((pte_get_bits(pte, _PAGE_USER)) &&
  1035. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1036. -}
  1037. -
  1038. -static inline int pte_write(pte_t pte)
  1039. -{
  1040. - return((pte_get_bits(pte, _PAGE_RW)) &&
  1041. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1042. -}
  1043. -
  1044. -/*
  1045. - * The following only works if pte_present() is not true.
  1046. - */
  1047. -static inline int pte_file(pte_t pte)
  1048. -{
  1049. - return pte_get_bits(pte, _PAGE_FILE);
  1050. -}
  1051. -
  1052. -static inline int pte_dirty(pte_t pte)
  1053. -{
  1054. - return pte_get_bits(pte, _PAGE_DIRTY);
  1055. -}
  1056. -
  1057. -static inline int pte_young(pte_t pte)
  1058. -{
  1059. - return pte_get_bits(pte, _PAGE_ACCESSED);
  1060. -}
  1061. -
  1062. -static inline int pte_newpage(pte_t pte)
  1063. -{
  1064. - return pte_get_bits(pte, _PAGE_NEWPAGE);
  1065. -}
  1066. -
  1067. -static inline int pte_newprot(pte_t pte)
  1068. -{
  1069. - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
  1070. -}
  1071. -
  1072. -static inline int pte_special(pte_t pte)
  1073. -{
  1074. - return 0;
  1075. -}
  1076. -
  1077. -/*
  1078. - * =================================
  1079. - * Flags setting section.
  1080. - * =================================
  1081. - */
  1082. -
  1083. -static inline pte_t pte_mknewprot(pte_t pte)
  1084. -{
  1085. - pte_set_bits(pte, _PAGE_NEWPROT);
  1086. - return(pte);
  1087. -}
  1088. -
  1089. -static inline pte_t pte_mkclean(pte_t pte)
  1090. -{
  1091. - pte_clear_bits(pte, _PAGE_DIRTY);
  1092. - return(pte);
  1093. -}
  1094. -
  1095. -static inline pte_t pte_mkold(pte_t pte)
  1096. -{
  1097. - pte_clear_bits(pte, _PAGE_ACCESSED);
  1098. - return(pte);
  1099. -}
  1100. -
  1101. -static inline pte_t pte_wrprotect(pte_t pte)
  1102. -{
  1103. - pte_clear_bits(pte, _PAGE_RW);
  1104. - return(pte_mknewprot(pte));
  1105. -}
  1106. -
  1107. -static inline pte_t pte_mkread(pte_t pte)
  1108. -{
  1109. - pte_set_bits(pte, _PAGE_USER);
  1110. - return(pte_mknewprot(pte));
  1111. -}
  1112. -
  1113. -static inline pte_t pte_mkdirty(pte_t pte)
  1114. -{
  1115. - pte_set_bits(pte, _PAGE_DIRTY);
  1116. - return(pte);
  1117. -}
  1118. -
  1119. -static inline pte_t pte_mkyoung(pte_t pte)
  1120. -{
  1121. - pte_set_bits(pte, _PAGE_ACCESSED);
  1122. - return(pte);
  1123. -}
  1124. -
  1125. -static inline pte_t pte_mkwrite(pte_t pte)
  1126. -{
  1127. - pte_set_bits(pte, _PAGE_RW);
  1128. - return(pte_mknewprot(pte));
  1129. -}
  1130. -
  1131. -static inline pte_t pte_mkuptodate(pte_t pte)
  1132. -{
  1133. - pte_clear_bits(pte, _PAGE_NEWPAGE);
  1134. - if(pte_present(pte))
  1135. - pte_clear_bits(pte, _PAGE_NEWPROT);
  1136. - return(pte);
  1137. -}
  1138. -
  1139. -static inline pte_t pte_mknewpage(pte_t pte)
  1140. -{
  1141. - pte_set_bits(pte, _PAGE_NEWPAGE);
  1142. - return(pte);
  1143. -}
  1144. -
  1145. -static inline pte_t pte_mkspecial(pte_t pte)
  1146. -{
  1147. - return(pte);
  1148. -}
  1149. -
  1150. -static inline void set_pte(pte_t *pteptr, pte_t pteval)
  1151. -{
  1152. - pte_copy(*pteptr, pteval);
  1153. -
  1154. - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
  1155. - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
  1156. - * mapped pages.
  1157. - */
  1158. -
  1159. - *pteptr = pte_mknewpage(*pteptr);
  1160. - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
  1161. -}
  1162. -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  1163. -
  1164. -#define __HAVE_ARCH_PTE_SAME
  1165. -static inline int pte_same(pte_t pte_a, pte_t pte_b)
  1166. -{
  1167. - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
  1168. -}
  1169. -
  1170. -/*
  1171. - * Conversion functions: convert a page and protection to a page entry,
  1172. - * and a page entry and page directory to the page they refer to.
  1173. - */
  1174. -
  1175. -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
  1176. -#define __virt_to_page(virt) phys_to_page(__pa(virt))
  1177. -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
  1178. -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
  1179. -
  1180. -#define mk_pte(page, pgprot) \
  1181. - ({ pte_t pte; \
  1182. - \
  1183. - pte_set_val(pte, page_to_phys(page), (pgprot)); \
  1184. - if (pte_present(pte)) \
  1185. - pte_mknewprot(pte_mknewpage(pte)); \
  1186. - pte;})
  1187. -
  1188. -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  1189. -{
  1190. - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
  1191. - return pte;
  1192. -}
  1193. -
  1194. -/*
  1195. - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  1196. - *
  1197. - * this macro returns the index of the entry in the pgd page which would
  1198. - * control the given virtual address
  1199. - */
  1200. -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  1201. -
  1202. -/*
  1203. - * pgd_offset() returns a (pgd_t *)
  1204. - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  1205. - */
  1206. -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  1207. -
  1208. -/*
  1209. - * a shortcut which implies the use of the kernel's pgd, instead
  1210. - * of a process's
  1211. - */
  1212. -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  1213. -
  1214. -/*
  1215. - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  1216. - *
  1217. - * this macro returns the index of the entry in the pmd page which would
  1218. - * control the given virtual address
  1219. - */
  1220. -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  1221. -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  1222. -
  1223. -#define pmd_page_vaddr(pmd) \
  1224. - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  1225. -
  1226. -/*
  1227. - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  1228. - *
  1229. - * this macro returns the index of the entry in the pte page which would
  1230. - * control the given virtual address
  1231. - */
  1232. -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  1233. -#define pte_offset_kernel(dir, address) \
  1234. - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
  1235. -#define pte_offset_map(dir, address) \
  1236. - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  1237. -#define pte_unmap(pte) do { } while (0)
  1238. -
  1239. -struct mm_struct;
  1240. -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
  1241. -
  1242. -#define update_mmu_cache(vma,address,ptep) do ; while (0)
  1243. -
  1244. -/* Encode and de-code a swap entry */
  1245. -#define __swp_type(x) (((x).val >> 5) & 0x1f)
  1246. -#define __swp_offset(x) ((x).val >> 11)
  1247. -
  1248. -#define __swp_entry(type, offset) \
  1249. - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
  1250. -#define __pte_to_swp_entry(pte) \
  1251. - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
  1252. -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  1253. -
  1254. -#define kern_addr_valid(addr) (1)
  1255. -
  1256. -#include <asm-generic/pgtable.h>
  1257. -
  1258. -/* Clear a kernel PTE and flush it from the TLB */
  1259. -#define kpte_clear_flush(ptep, vaddr) \
  1260. -do { \
  1261. - pte_clear(&init_mm, (vaddr), (ptep)); \
  1262. - __flush_tlb_one((vaddr)); \
  1263. -} while (0)
  1264. -
  1265. -#endif
  1266. --- a/arch/um/include/asm/processor-generic.h
  1267. +++ /dev/null
  1268. @@ -1,115 +0,0 @@
  1269. -/*
  1270. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1271. - * Licensed under the GPL
  1272. - */
  1273. -
  1274. -#ifndef __UM_PROCESSOR_GENERIC_H
  1275. -#define __UM_PROCESSOR_GENERIC_H
  1276. -
  1277. -struct pt_regs;
  1278. -
  1279. -struct task_struct;
  1280. -
  1281. -#include <asm/ptrace.h>
  1282. -#include <registers.h>
  1283. -#include <sysdep/archsetjmp.h>
  1284. -
  1285. -#include <linux/prefetch.h>
  1286. -
  1287. -struct mm_struct;
  1288. -
  1289. -struct thread_struct {
  1290. - struct pt_regs regs;
  1291. - struct pt_regs *segv_regs;
  1292. - int singlestep_syscall;
  1293. - void *fault_addr;
  1294. - jmp_buf *fault_catcher;
  1295. - struct task_struct *prev_sched;
  1296. - struct arch_thread arch;
  1297. - jmp_buf switch_buf;
  1298. - struct {
  1299. - int op;
  1300. - union {
  1301. - struct {
  1302. - int pid;
  1303. - } fork, exec;
  1304. - struct {
  1305. - int (*proc)(void *);
  1306. - void *arg;
  1307. - } thread;
  1308. - struct {
  1309. - void (*proc)(void *);
  1310. - void *arg;
  1311. - } cb;
  1312. - } u;
  1313. - } request;
  1314. -};
  1315. -
  1316. -#define INIT_THREAD \
  1317. -{ \
  1318. - .regs = EMPTY_REGS, \
  1319. - .fault_addr = NULL, \
  1320. - .prev_sched = NULL, \
  1321. - .arch = INIT_ARCH_THREAD, \
  1322. - .request = { 0 } \
  1323. -}
  1324. -
  1325. -static inline void release_thread(struct task_struct *task)
  1326. -{
  1327. -}
  1328. -
  1329. -extern unsigned long thread_saved_pc(struct task_struct *t);
  1330. -
  1331. -static inline void mm_copy_segments(struct mm_struct *from_mm,
  1332. - struct mm_struct *new_mm)
  1333. -{
  1334. -}
  1335. -
  1336. -#define init_stack (init_thread_union.stack)
  1337. -
  1338. -/*
  1339. - * User space process size: 3GB (default).
  1340. - */
  1341. -extern unsigned long task_size;
  1342. -
  1343. -#define TASK_SIZE (task_size)
  1344. -
  1345. -#undef STACK_TOP
  1346. -#undef STACK_TOP_MAX
  1347. -
  1348. -extern unsigned long stacksizelim;
  1349. -
  1350. -#define STACK_ROOM (stacksizelim)
  1351. -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  1352. -#define STACK_TOP_MAX STACK_TOP
  1353. -
  1354. -/* This decides where the kernel will search for a free chunk of vm
  1355. - * space during mmap's.
  1356. - */
  1357. -#define TASK_UNMAPPED_BASE (0x40000000)
  1358. -
  1359. -extern void start_thread(struct pt_regs *regs, unsigned long entry,
  1360. - unsigned long stack);
  1361. -
  1362. -struct cpuinfo_um {
  1363. - unsigned long loops_per_jiffy;
  1364. - int ipi_pipe[2];
  1365. -};
  1366. -
  1367. -extern struct cpuinfo_um boot_cpu_data;
  1368. -
  1369. -#define my_cpu_data cpu_data[smp_processor_id()]
  1370. -
  1371. -#ifdef CONFIG_SMP
  1372. -extern struct cpuinfo_um cpu_data[];
  1373. -#define current_cpu_data cpu_data[smp_processor_id()]
  1374. -#else
  1375. -#define cpu_data (&boot_cpu_data)
  1376. -#define current_cpu_data boot_cpu_data
  1377. -#endif
  1378. -
  1379. -
  1380. -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
  1381. -extern unsigned long get_wchan(struct task_struct *p);
  1382. -
  1383. -#endif
  1384. --- a/arch/um/include/asm/ptrace-generic.h
  1385. +++ /dev/null
  1386. @@ -1,45 +0,0 @@
  1387. -/*
  1388. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1389. - * Licensed under the GPL
  1390. - */
  1391. -
  1392. -#ifndef __UM_PTRACE_GENERIC_H
  1393. -#define __UM_PTRACE_GENERIC_H
  1394. -
  1395. -#ifndef __ASSEMBLY__
  1396. -
  1397. -#include <asm/ptrace-abi.h>
  1398. -#include <sysdep/ptrace.h>
  1399. -
  1400. -struct pt_regs {
  1401. - struct uml_pt_regs regs;
  1402. -};
  1403. -
  1404. -#define arch_has_single_step() (1)
  1405. -
  1406. -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
  1407. -
  1408. -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
  1409. -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
  1410. -
  1411. -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
  1412. -
  1413. -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
  1414. -
  1415. -#define instruction_pointer(regs) PT_REGS_IP(regs)
  1416. -
  1417. -struct task_struct;
  1418. -
  1419. -extern long subarch_ptrace(struct task_struct *child, long request,
  1420. - unsigned long addr, unsigned long data);
  1421. -extern unsigned long getreg(struct task_struct *child, int regno);
  1422. -extern int putreg(struct task_struct *child, int regno, unsigned long value);
  1423. -
  1424. -extern int arch_copy_tls(struct task_struct *new);
  1425. -extern void clear_flushed_tls(struct task_struct *task);
  1426. -extern void syscall_trace_enter(struct pt_regs *regs);
  1427. -extern void syscall_trace_leave(struct pt_regs *regs);
  1428. -
  1429. -#endif
  1430. -
  1431. -#endif
  1432. --- a/arch/um/include/asm/setup.h
  1433. +++ /dev/null
  1434. @@ -1,10 +0,0 @@
  1435. -#ifndef SETUP_H_INCLUDED
  1436. -#define SETUP_H_INCLUDED
  1437. -
  1438. -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
  1439. - * command line, so this choice is ok.
  1440. - */
  1441. -
  1442. -#define COMMAND_LINE_SIZE 4096
  1443. -
  1444. -#endif /* SETUP_H_INCLUDED */
  1445. --- a/arch/um/include/asm/smp.h
  1446. +++ /dev/null
  1447. @@ -1,32 +0,0 @@
  1448. -#ifndef __UM_SMP_H
  1449. -#define __UM_SMP_H
  1450. -
  1451. -#ifdef CONFIG_SMP
  1452. -
  1453. -#include <linux/bitops.h>
  1454. -#include <asm/current.h>
  1455. -#include <linux/cpumask.h>
  1456. -
  1457. -#define raw_smp_processor_id() (current_thread->cpu)
  1458. -
  1459. -#define cpu_logical_map(n) (n)
  1460. -#define cpu_number_map(n) (n)
  1461. -extern int hard_smp_processor_id(void);
  1462. -#define NO_PROC_ID -1
  1463. -
  1464. -extern int ncpus;
  1465. -
  1466. -
  1467. -static inline void smp_cpus_done(unsigned int maxcpus)
  1468. -{
  1469. -}
  1470. -
  1471. -extern struct task_struct *idle_threads[NR_CPUS];
  1472. -
  1473. -#else
  1474. -
  1475. -#define hard_smp_processor_id() 0
  1476. -
  1477. -#endif
  1478. -
  1479. -#endif
  1480. --- a/arch/um/include/asm/stacktrace.h
  1481. +++ /dev/null
  1482. @@ -1,42 +0,0 @@
  1483. -#ifndef _ASM_UML_STACKTRACE_H
  1484. -#define _ASM_UML_STACKTRACE_H
  1485. -
  1486. -#include <linux/uaccess.h>
  1487. -#include <linux/ptrace.h>
  1488. -
  1489. -struct stack_frame {
  1490. - struct stack_frame *next_frame;
  1491. - unsigned long return_address;
  1492. -};
  1493. -
  1494. -struct stacktrace_ops {
  1495. - void (*address)(void *data, unsigned long address, int reliable);
  1496. -};
  1497. -
  1498. -#ifdef CONFIG_FRAME_POINTER
  1499. -static inline unsigned long
  1500. -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  1501. -{
  1502. - if (!task || task == current)
  1503. - return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
  1504. - return KSTK_EBP(task);
  1505. -}
  1506. -#else
  1507. -static inline unsigned long
  1508. -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  1509. -{
  1510. - return 0;
  1511. -}
  1512. -#endif
  1513. -
  1514. -static inline unsigned long
  1515. -*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  1516. -{
  1517. - if (!task || task == current)
  1518. - return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
  1519. - return (unsigned long *)KSTK_ESP(task);
  1520. -}
  1521. -
  1522. -void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
  1523. -
  1524. -#endif /* _ASM_UML_STACKTRACE_H */
  1525. --- a/arch/um/include/asm/sysrq.h
  1526. +++ /dev/null
  1527. @@ -1,7 +0,0 @@
  1528. -#ifndef __UM_SYSRQ_H
  1529. -#define __UM_SYSRQ_H
  1530. -
  1531. -struct task_struct;
  1532. -extern void show_trace(struct task_struct* task, unsigned long *stack);
  1533. -
  1534. -#endif
  1535. --- a/arch/um/include/asm/thread_info.h
  1536. +++ /dev/null
  1537. @@ -1,78 +0,0 @@
  1538. -/*
  1539. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1540. - * Licensed under the GPL
  1541. - */
  1542. -
  1543. -#ifndef __UM_THREAD_INFO_H
  1544. -#define __UM_THREAD_INFO_H
  1545. -
  1546. -#ifndef __ASSEMBLY__
  1547. -
  1548. -#include <asm/types.h>
  1549. -#include <asm/page.h>
  1550. -#include <asm/uaccess.h>
  1551. -
  1552. -struct thread_info {
  1553. - struct task_struct *task; /* main task structure */
  1554. - struct exec_domain *exec_domain; /* execution domain */
  1555. - unsigned long flags; /* low level flags */
  1556. - __u32 cpu; /* current CPU */
  1557. - int preempt_count; /* 0 => preemptable,
  1558. - <0 => BUG */
  1559. - mm_segment_t addr_limit; /* thread address space:
  1560. - 0-0xBFFFFFFF for user
  1561. - 0-0xFFFFFFFF for kernel */
  1562. - struct restart_block restart_block;
  1563. - struct thread_info *real_thread; /* Points to non-IRQ stack */
  1564. -};
  1565. -
  1566. -#define INIT_THREAD_INFO(tsk) \
  1567. -{ \
  1568. - .task = &tsk, \
  1569. - .exec_domain = &default_exec_domain, \
  1570. - .flags = 0, \
  1571. - .cpu = 0, \
  1572. - .preempt_count = INIT_PREEMPT_COUNT, \
  1573. - .addr_limit = KERNEL_DS, \
  1574. - .restart_block = { \
  1575. - .fn = do_no_restart_syscall, \
  1576. - }, \
  1577. - .real_thread = NULL, \
  1578. -}
  1579. -
  1580. -#define init_thread_info (init_thread_union.thread_info)
  1581. -#define init_stack (init_thread_union.stack)
  1582. -
  1583. -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
  1584. -/* how to get the thread information struct from C */
  1585. -static inline struct thread_info *current_thread_info(void)
  1586. -{
  1587. - struct thread_info *ti;
  1588. - unsigned long mask = THREAD_SIZE - 1;
  1589. - void *p;
  1590. -
  1591. - asm volatile ("" : "=r" (p) : "0" (&ti));
  1592. - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
  1593. - return ti;
  1594. -}
  1595. -
  1596. -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
  1597. -
  1598. -#endif
  1599. -
  1600. -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
  1601. -#define TIF_SIGPENDING 1 /* signal pending */
  1602. -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
  1603. -#define TIF_RESTART_BLOCK 4
  1604. -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
  1605. -#define TIF_SYSCALL_AUDIT 6
  1606. -#define TIF_RESTORE_SIGMASK 7
  1607. -#define TIF_NOTIFY_RESUME 8
  1608. -
  1609. -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  1610. -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  1611. -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  1612. -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
  1613. -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  1614. -
  1615. -#endif
  1616. --- a/arch/um/include/asm/timex.h
  1617. +++ /dev/null
  1618. @@ -1,13 +0,0 @@
  1619. -#ifndef __UM_TIMEX_H
  1620. -#define __UM_TIMEX_H
  1621. -
  1622. -typedef unsigned long cycles_t;
  1623. -
  1624. -static inline cycles_t get_cycles (void)
  1625. -{
  1626. - return 0;
  1627. -}
  1628. -
  1629. -#define CLOCK_TICK_RATE (HZ)
  1630. -
  1631. -#endif
  1632. --- a/arch/um/include/asm/tlb.h
  1633. +++ /dev/null
  1634. @@ -1,134 +0,0 @@
  1635. -#ifndef __UM_TLB_H
  1636. -#define __UM_TLB_H
  1637. -
  1638. -#include <linux/pagemap.h>
  1639. -#include <linux/swap.h>
  1640. -#include <asm/percpu.h>
  1641. -#include <asm/pgalloc.h>
  1642. -#include <asm/tlbflush.h>
  1643. -
  1644. -#define tlb_start_vma(tlb, vma) do { } while (0)
  1645. -#define tlb_end_vma(tlb, vma) do { } while (0)
  1646. -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  1647. -
  1648. -/* struct mmu_gather is an opaque type used by the mm code for passing around
  1649. - * any data needed by arch specific code for tlb_remove_page.
  1650. - */
  1651. -struct mmu_gather {
  1652. - struct mm_struct *mm;
  1653. - unsigned int need_flush; /* Really unmapped some ptes? */
  1654. - unsigned long start;
  1655. - unsigned long end;
  1656. - unsigned int fullmm; /* non-zero means full mm flush */
  1657. -};
  1658. -
  1659. -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  1660. - unsigned long address)
  1661. -{
  1662. - if (tlb->start > address)
  1663. - tlb->start = address;
  1664. - if (tlb->end < address + PAGE_SIZE)
  1665. - tlb->end = address + PAGE_SIZE;
  1666. -}
  1667. -
  1668. -static inline void init_tlb_gather(struct mmu_gather *tlb)
  1669. -{
  1670. - tlb->need_flush = 0;
  1671. -
  1672. - tlb->start = TASK_SIZE;
  1673. - tlb->end = 0;
  1674. -
  1675. - if (tlb->fullmm) {
  1676. - tlb->start = 0;
  1677. - tlb->end = TASK_SIZE;
  1678. - }
  1679. -}
  1680. -
  1681. -static inline void
  1682. -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  1683. -{
  1684. - tlb->mm = mm;
  1685. - tlb->start = start;
  1686. - tlb->end = end;
  1687. - tlb->fullmm = !(start | (end+1));
  1688. -
  1689. - init_tlb_gather(tlb);
  1690. -}
  1691. -
  1692. -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  1693. - unsigned long end);
  1694. -
  1695. -static inline void
  1696. -tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  1697. -{
  1698. - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  1699. -}
  1700. -
  1701. -static inline void
  1702. -tlb_flush_mmu_free(struct mmu_gather *tlb)
  1703. -{
  1704. - init_tlb_gather(tlb);
  1705. -}
  1706. -
  1707. -static inline void
  1708. -tlb_flush_mmu(struct mmu_gather *tlb)
  1709. -{
  1710. - if (!tlb->need_flush)
  1711. - return;
  1712. -
  1713. - tlb_flush_mmu_tlbonly(tlb);
  1714. - tlb_flush_mmu_free(tlb);
  1715. -}
  1716. -
  1717. -/* tlb_finish_mmu
  1718. - * Called at the end of the shootdown operation to free up any resources
  1719. - * that were required.
  1720. - */
  1721. -static inline void
  1722. -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  1723. -{
  1724. - tlb_flush_mmu(tlb);
  1725. -
  1726. - /* keep the page table cache within bounds */
  1727. - check_pgt_cache();
  1728. -}
  1729. -
  1730. -/* tlb_remove_page
  1731. - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  1732. - * while handling the additional races in SMP caused by other CPUs
  1733. - * caching valid mappings in their TLBs.
  1734. - */
  1735. -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  1736. -{
  1737. - tlb->need_flush = 1;
  1738. - free_page_and_swap_cache(page);
  1739. - return 1; /* avoid calling tlb_flush_mmu */
  1740. -}
  1741. -
  1742. -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  1743. -{
  1744. - __tlb_remove_page(tlb, page);
  1745. -}
  1746. -
  1747. -/**
  1748. - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  1749. - *
  1750. - * Record the fact that pte's were really umapped in ->need_flush, so we can
  1751. - * later optimise away the tlb invalidate. This helps when userspace is
  1752. - * unmapping already-unmapped pages, which happens quite a lot.
  1753. - */
  1754. -#define tlb_remove_tlb_entry(tlb, ptep, address) \
  1755. - do { \
  1756. - tlb->need_flush = 1; \
  1757. - __tlb_remove_tlb_entry(tlb, ptep, address); \
  1758. - } while (0)
  1759. -
  1760. -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  1761. -
  1762. -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
  1763. -
  1764. -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  1765. -
  1766. -#define tlb_migrate_finish(mm) do {} while (0)
  1767. -
  1768. -#endif
  1769. --- a/arch/um/include/asm/tlbflush.h
  1770. +++ /dev/null
  1771. @@ -1,31 +0,0 @@
  1772. -/*
  1773. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1774. - * Licensed under the GPL
  1775. - */
  1776. -
  1777. -#ifndef __UM_TLBFLUSH_H
  1778. -#define __UM_TLBFLUSH_H
  1779. -
  1780. -#include <linux/mm.h>
  1781. -
  1782. -/*
  1783. - * TLB flushing:
  1784. - *
  1785. - * - flush_tlb() flushes the current mm struct TLBs
  1786. - * - flush_tlb_all() flushes all processes TLBs
  1787. - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  1788. - * - flush_tlb_page(vma, vmaddr) flushes one page
  1789. - * - flush_tlb_kernel_vm() flushes the kernel vm area
  1790. - * - flush_tlb_range(vma, start, end) flushes a range of pages
  1791. - */
  1792. -
  1793. -extern void flush_tlb_all(void);
  1794. -extern void flush_tlb_mm(struct mm_struct *mm);
  1795. -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  1796. - unsigned long end);
  1797. -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
  1798. -extern void flush_tlb_kernel_vm(void);
  1799. -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  1800. -extern void __flush_tlb_one(unsigned long addr);
  1801. -
  1802. -#endif
  1803. --- a/arch/um/include/asm/uaccess.h
  1804. +++ /dev/null
  1805. @@ -1,178 +0,0 @@
  1806. -/*
  1807. - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  1808. - * Licensed under the GPL
  1809. - */
  1810. -
  1811. -#ifndef __UM_UACCESS_H
  1812. -#define __UM_UACCESS_H
  1813. -
  1814. -/* thread_info has a mm_segment_t in it, so put the definition up here */
  1815. -typedef struct {
  1816. - unsigned long seg;
  1817. -} mm_segment_t;
  1818. -
  1819. -#include <linux/thread_info.h>
  1820. -#include <linux/errno.h>
  1821. -#include <asm/processor.h>
  1822. -#include <asm/elf.h>
  1823. -
  1824. -#define VERIFY_READ 0
  1825. -#define VERIFY_WRITE 1
  1826. -
  1827. -/*
  1828. - * The fs value determines whether argument validity checking should be
  1829. - * performed or not. If get_fs() == USER_DS, checking is performed, with
  1830. - * get_fs() == KERNEL_DS, checking is bypassed.
  1831. - *
  1832. - * For historical reasons, these macros are grossly misnamed.
  1833. - */
  1834. -
  1835. -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  1836. -
  1837. -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  1838. -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
  1839. -
  1840. -#define get_ds() (KERNEL_DS)
  1841. -#define get_fs() (current_thread_info()->addr_limit)
  1842. -#define set_fs(x) (current_thread_info()->addr_limit = (x))
  1843. -
  1844. -#define segment_eq(a, b) ((a).seg == (b).seg)
  1845. -
  1846. -#define __under_task_size(addr, size) \
  1847. - (((unsigned long) (addr) < TASK_SIZE) && \
  1848. - (((unsigned long) (addr) + (size)) < TASK_SIZE))
  1849. -
  1850. -#define __access_ok_vsyscall(type, addr, size) \
  1851. - ((type == VERIFY_READ) && \
  1852. - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
  1853. - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
  1854. - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
  1855. -
  1856. -#define __addr_range_nowrap(addr, size) \
  1857. - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
  1858. -
  1859. -#define access_ok(type, addr, size) \
  1860. - (__addr_range_nowrap(addr, size) && \
  1861. - (__under_task_size(addr, size) || \
  1862. - __access_ok_vsyscall(type, addr, size) || \
  1863. - segment_eq(get_fs(), KERNEL_DS)))
  1864. -
  1865. -extern int copy_from_user(void *to, const void __user *from, int n);
  1866. -extern int copy_to_user(void __user *to, const void *from, int n);
  1867. -
  1868. -/*
  1869. - * strncpy_from_user: - Copy a NUL terminated string from userspace.
  1870. - * @dst: Destination address, in kernel space. This buffer must be at
  1871. - * least @count bytes long.
  1872. - * @src: Source address, in user space.
  1873. - * @count: Maximum number of bytes to copy, including the trailing NUL.
  1874. - *
  1875. - * Copies a NUL-terminated string from userspace to kernel space.
  1876. - *
  1877. - * On success, returns the length of the string (not including the trailing
  1878. - * NUL).
  1879. - *
  1880. - * If access to userspace fails, returns -EFAULT (some data may have been
  1881. - * copied).
  1882. - *
  1883. - * If @count is smaller than the length of the string, copies @count bytes
  1884. - * and returns @count.
  1885. - */
  1886. -
  1887. -extern int strncpy_from_user(char *dst, const char __user *src, int count);
  1888. -
  1889. -/*
  1890. - * __clear_user: - Zero a block of memory in user space, with less checking.
  1891. - * @to: Destination address, in user space.
  1892. - * @n: Number of bytes to zero.
  1893. - *
  1894. - * Zero a block of memory in user space. Caller must check
  1895. - * the specified block with access_ok() before calling this function.
  1896. - *
  1897. - * Returns number of bytes that could not be cleared.
  1898. - * On success, this will be zero.
  1899. - */
  1900. -extern int __clear_user(void __user *mem, int len);
  1901. -
  1902. -/*
  1903. - * clear_user: - Zero a block of memory in user space.
  1904. - * @to: Destination address, in user space.
  1905. - * @n: Number of bytes to zero.
  1906. - *
  1907. - * Zero a block of memory in user space.
  1908. - *
  1909. - * Returns number of bytes that could not be cleared.
  1910. - * On success, this will be zero.
  1911. - */
  1912. -extern int clear_user(void __user *mem, int len);
  1913. -
  1914. -/*
  1915. - * strlen_user: - Get the size of a string in user space.
  1916. - * @str: The string to measure.
  1917. - * @n: The maximum valid length
  1918. - *
  1919. - * Get the size of a NUL-terminated string in user space.
  1920. - *
  1921. - * Returns the size of the string INCLUDING the terminating NUL.
  1922. - * On exception, returns 0.
  1923. - * If the string is too long, returns a value greater than @n.
  1924. - */
  1925. -extern int strnlen_user(const void __user *str, int len);
  1926. -
  1927. -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
  1928. -
  1929. -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
  1930. -
  1931. -#define __copy_to_user_inatomic __copy_to_user
  1932. -#define __copy_from_user_inatomic __copy_from_user
  1933. -
  1934. -#define __get_user(x, ptr) \
  1935. -({ \
  1936. - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
  1937. - __typeof__(x) __private_val; \
  1938. - int __private_ret = -EFAULT; \
  1939. - (x) = (__typeof__(*(__private_ptr)))0; \
  1940. - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
  1941. - sizeof(*(__private_ptr))) == 0) { \
  1942. - (x) = (__typeof__(*(__private_ptr))) __private_val; \
  1943. - __private_ret = 0; \
  1944. - } \
  1945. - __private_ret; \
  1946. -})
  1947. -
  1948. -#define get_user(x, ptr) \
  1949. -({ \
  1950. - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
  1951. - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
  1952. - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
  1953. -})
  1954. -
  1955. -#define __put_user(x, ptr) \
  1956. -({ \
  1957. - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
  1958. - __typeof__(*(__private_ptr)) __private_val; \
  1959. - int __private_ret = -EFAULT; \
  1960. - __private_val = (__typeof__(*(__private_ptr))) (x); \
  1961. - if (__copy_to_user((__private_ptr), &__private_val, \
  1962. - sizeof(*(__private_ptr))) == 0) { \
  1963. - __private_ret = 0; \
  1964. - } \
  1965. - __private_ret; \
  1966. -})
  1967. -
  1968. -#define put_user(x, ptr) \
  1969. -({ \
  1970. - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
  1971. - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
  1972. - __put_user(x, private_ptr) : -EFAULT); \
  1973. -})
  1974. -
  1975. -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
  1976. -
  1977. -struct exception_table_entry
  1978. -{
  1979. - unsigned long insn;
  1980. - unsigned long fixup;
  1981. -};
  1982. -
  1983. -#endif
  1984. --- /dev/null
  1985. +++ b/arch/um/include/uapi/asm/Kbuild
  1986. @@ -0,0 +1,30 @@
  1987. +generic-y += barrier.h
  1988. +generic-y += bug.h
  1989. +generic-y += clkdev.h
  1990. +generic-y += cputime.h
  1991. +generic-y += current.h
  1992. +generic-y += delay.h
  1993. +generic-y += device.h
  1994. +generic-y += emergency-restart.h
  1995. +generic-y += exec.h
  1996. +generic-y += ftrace.h
  1997. +generic-y += futex.h
  1998. +generic-y += hardirq.h
  1999. +generic-y += hash.h
  2000. +generic-y += hw_irq.h
  2001. +generic-y += io.h
  2002. +generic-y += irq_regs.h
  2003. +generic-y += irq_work.h
  2004. +generic-y += kdebug.h
  2005. +generic-y += mcs_spinlock.h
  2006. +generic-y += mutex.h
  2007. +generic-y += param.h
  2008. +generic-y += pci.h
  2009. +generic-y += percpu.h
  2010. +generic-y += preempt.h
  2011. +generic-y += scatterlist.h
  2012. +generic-y += sections.h
  2013. +generic-y += switch_to.h
  2014. +generic-y += topology.h
  2015. +generic-y += trace_clock.h
  2016. +generic-y += xor.h
  2017. --- /dev/null
  2018. +++ b/arch/um/include/uapi/asm/a.out-core.h
  2019. @@ -0,0 +1,27 @@
  2020. +/* a.out coredump register dumper
  2021. + *
  2022. + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  2023. + * Written by David Howells (dhowells@redhat.com)
  2024. + *
  2025. + * This program is free software; you can redistribute it and/or
  2026. + * modify it under the terms of the GNU General Public Licence
  2027. + * as published by the Free Software Foundation; either version
  2028. + * 2 of the Licence, or (at your option) any later version.
  2029. + */
  2030. +
  2031. +#ifndef __UM_A_OUT_CORE_H
  2032. +#define __UM_A_OUT_CORE_H
  2033. +
  2034. +#ifdef __KERNEL__
  2035. +
  2036. +#include <linux/user.h>
  2037. +
  2038. +/*
  2039. + * fill in the user structure for an a.out core dump
  2040. + */
  2041. +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
  2042. +{
  2043. +}
  2044. +
  2045. +#endif /* __KERNEL__ */
  2046. +#endif /* __UM_A_OUT_CORE_H */
  2047. --- /dev/null
  2048. +++ b/arch/um/include/uapi/asm/bugs.h
  2049. @@ -0,0 +1,6 @@
  2050. +#ifndef __UM_BUGS_H
  2051. +#define __UM_BUGS_H
  2052. +
  2053. +void check_bugs(void);
  2054. +
  2055. +#endif
  2056. --- /dev/null
  2057. +++ b/arch/um/include/uapi/asm/cache.h
  2058. @@ -0,0 +1,17 @@
  2059. +#ifndef __UM_CACHE_H
  2060. +#define __UM_CACHE_H
  2061. +
  2062. +
  2063. +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
  2064. +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
  2065. +#elif defined(CONFIG_UML_X86) /* 64-bit */
  2066. +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
  2067. +#else
  2068. +/* XXX: this was taken from x86, now it's completely random. Luckily only
  2069. + * affects SMP padding. */
  2070. +# define L1_CACHE_SHIFT 5
  2071. +#endif
  2072. +
  2073. +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  2074. +
  2075. +#endif
  2076. --- /dev/null
  2077. +++ b/arch/um/include/uapi/asm/common.lds.S
  2078. @@ -0,0 +1,107 @@
  2079. +#include <asm-generic/vmlinux.lds.h>
  2080. +
  2081. + .fini : { *(.fini) } =0x9090
  2082. + _etext = .;
  2083. + PROVIDE (etext = .);
  2084. +
  2085. + . = ALIGN(4096);
  2086. + _sdata = .;
  2087. + PROVIDE (sdata = .);
  2088. +
  2089. + RODATA
  2090. +
  2091. + .unprotected : { *(.unprotected) }
  2092. + . = ALIGN(4096);
  2093. + PROVIDE (_unprotected_end = .);
  2094. +
  2095. + . = ALIGN(4096);
  2096. + .note : { *(.note.*) }
  2097. + EXCEPTION_TABLE(0)
  2098. +
  2099. + BUG_TABLE
  2100. +
  2101. + .uml.setup.init : {
  2102. + __uml_setup_start = .;
  2103. + *(.uml.setup.init)
  2104. + __uml_setup_end = .;
  2105. + }
  2106. +
  2107. + .uml.help.init : {
  2108. + __uml_help_start = .;
  2109. + *(.uml.help.init)
  2110. + __uml_help_end = .;
  2111. + }
  2112. +
  2113. + .uml.postsetup.init : {
  2114. + __uml_postsetup_start = .;
  2115. + *(.uml.postsetup.init)
  2116. + __uml_postsetup_end = .;
  2117. + }
  2118. +
  2119. + .init.setup : {
  2120. + INIT_SETUP(0)
  2121. + }
  2122. +
  2123. + PERCPU_SECTION(32)
  2124. +
  2125. + .initcall.init : {
  2126. + INIT_CALLS
  2127. + }
  2128. +
  2129. + .con_initcall.init : {
  2130. + CON_INITCALL
  2131. + }
  2132. +
  2133. + .uml.initcall.init : {
  2134. + __uml_initcall_start = .;
  2135. + *(.uml.initcall.init)
  2136. + __uml_initcall_end = .;
  2137. + }
  2138. +
  2139. + SECURITY_INIT
  2140. +
  2141. + .exitcall : {
  2142. + __exitcall_begin = .;
  2143. + *(.exitcall.exit)
  2144. + __exitcall_end = .;
  2145. + }
  2146. +
  2147. + .uml.exitcall : {
  2148. + __uml_exitcall_begin = .;
  2149. + *(.uml.exitcall.exit)
  2150. + __uml_exitcall_end = .;
  2151. + }
  2152. +
  2153. + . = ALIGN(4);
  2154. + .altinstructions : {
  2155. + __alt_instructions = .;
  2156. + *(.altinstructions)
  2157. + __alt_instructions_end = .;
  2158. + }
  2159. + .altinstr_replacement : { *(.altinstr_replacement) }
  2160. + /* .exit.text is discard at runtime, not link time, to deal with references
  2161. + from .altinstructions and .eh_frame */
  2162. + .exit.text : { *(.exit.text) }
  2163. + .exit.data : { *(.exit.data) }
  2164. +
  2165. + .preinit_array : {
  2166. + __preinit_array_start = .;
  2167. + *(.preinit_array)
  2168. + __preinit_array_end = .;
  2169. + }
  2170. + .init_array : {
  2171. + __init_array_start = .;
  2172. + *(.init_array)
  2173. + __init_array_end = .;
  2174. + }
  2175. + .fini_array : {
  2176. + __fini_array_start = .;
  2177. + *(.fini_array)
  2178. + __fini_array_end = .;
  2179. + }
  2180. +
  2181. + . = ALIGN(4096);
  2182. + .init.ramfs : {
  2183. + INIT_RAM_FS
  2184. + }
  2185. +
  2186. --- /dev/null
  2187. +++ b/arch/um/include/uapi/asm/dma.h
  2188. @@ -0,0 +1,10 @@
  2189. +#ifndef __UM_DMA_H
  2190. +#define __UM_DMA_H
  2191. +
  2192. +#include <asm/io.h>
  2193. +
  2194. +extern unsigned long uml_physmem;
  2195. +
  2196. +#define MAX_DMA_ADDRESS (uml_physmem)
  2197. +
  2198. +#endif
  2199. --- /dev/null
  2200. +++ b/arch/um/include/uapi/asm/fixmap.h
  2201. @@ -0,0 +1,60 @@
  2202. +#ifndef __UM_FIXMAP_H
  2203. +#define __UM_FIXMAP_H
  2204. +
  2205. +#include <asm/processor.h>
  2206. +#include <asm/kmap_types.h>
  2207. +#include <asm/archparam.h>
  2208. +#include <asm/page.h>
  2209. +#include <linux/threads.h>
  2210. +
  2211. +/*
  2212. + * Here we define all the compile-time 'special' virtual
  2213. + * addresses. The point is to have a constant address at
  2214. + * compile time, but to set the physical address only
  2215. + * in the boot process. We allocate these special addresses
  2216. + * from the end of virtual memory (0xfffff000) backwards.
  2217. + * Also this lets us do fail-safe vmalloc(), we
  2218. + * can guarantee that these special addresses and
  2219. + * vmalloc()-ed addresses never overlap.
  2220. + *
  2221. + * these 'compile-time allocated' memory buffers are
  2222. + * fixed-size 4k pages. (or larger if used with an increment
  2223. + * highger than 1) use fixmap_set(idx,phys) to associate
  2224. + * physical memory with fixmap indices.
  2225. + *
  2226. + * TLB entries of such buffers will not be flushed across
  2227. + * task switches.
  2228. + */
  2229. +
  2230. +/*
  2231. + * on UP currently we will have no trace of the fixmap mechanizm,
  2232. + * no page table allocations, etc. This might change in the
  2233. + * future, say framebuffers for the console driver(s) could be
  2234. + * fix-mapped?
  2235. + */
  2236. +enum fixed_addresses {
  2237. +#ifdef CONFIG_HIGHMEM
  2238. + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  2239. + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
  2240. +#endif
  2241. + __end_of_fixed_addresses
  2242. +};
  2243. +
  2244. +extern void __set_fixmap (enum fixed_addresses idx,
  2245. + unsigned long phys, pgprot_t flags);
  2246. +
  2247. +/*
  2248. + * used by vmalloc.c.
  2249. + *
  2250. + * Leave one empty page between vmalloc'ed areas and
  2251. + * the start of the fixmap, and leave one page empty
  2252. + * at the top of mem..
  2253. + */
  2254. +
  2255. +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  2256. +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
  2257. +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  2258. +
  2259. +#include <asm-generic/fixmap.h>
  2260. +
  2261. +#endif
  2262. --- /dev/null
  2263. +++ b/arch/um/include/uapi/asm/irq.h
  2264. @@ -0,0 +1,23 @@
  2265. +#ifndef __UM_IRQ_H
  2266. +#define __UM_IRQ_H
  2267. +
  2268. +#define TIMER_IRQ 0
  2269. +#define UMN_IRQ 1
  2270. +#define CONSOLE_IRQ 2
  2271. +#define CONSOLE_WRITE_IRQ 3
  2272. +#define UBD_IRQ 4
  2273. +#define UM_ETH_IRQ 5
  2274. +#define SSL_IRQ 6
  2275. +#define SSL_WRITE_IRQ 7
  2276. +#define ACCEPT_IRQ 8
  2277. +#define MCONSOLE_IRQ 9
  2278. +#define WINCH_IRQ 10
  2279. +#define SIGIO_WRITE_IRQ 11
  2280. +#define TELNETD_IRQ 12
  2281. +#define XTERM_IRQ 13
  2282. +#define RANDOM_IRQ 14
  2283. +
  2284. +#define LAST_IRQ RANDOM_IRQ
  2285. +#define NR_IRQS (LAST_IRQ + 1)
  2286. +
  2287. +#endif
  2288. --- /dev/null
  2289. +++ b/arch/um/include/uapi/asm/irqflags.h
  2290. @@ -0,0 +1,42 @@
  2291. +#ifndef __UM_IRQFLAGS_H
  2292. +#define __UM_IRQFLAGS_H
  2293. +
  2294. +extern int get_signals(void);
  2295. +extern int set_signals(int enable);
  2296. +extern void block_signals(void);
  2297. +extern void unblock_signals(void);
  2298. +
  2299. +static inline unsigned long arch_local_save_flags(void)
  2300. +{
  2301. + return get_signals();
  2302. +}
  2303. +
  2304. +static inline void arch_local_irq_restore(unsigned long flags)
  2305. +{
  2306. + set_signals(flags);
  2307. +}
  2308. +
  2309. +static inline void arch_local_irq_enable(void)
  2310. +{
  2311. + unblock_signals();
  2312. +}
  2313. +
  2314. +static inline void arch_local_irq_disable(void)
  2315. +{
  2316. + block_signals();
  2317. +}
  2318. +
  2319. +static inline unsigned long arch_local_irq_save(void)
  2320. +{
  2321. + unsigned long flags;
  2322. + flags = arch_local_save_flags();
  2323. + arch_local_irq_disable();
  2324. + return flags;
  2325. +}
  2326. +
  2327. +static inline bool arch_irqs_disabled(void)
  2328. +{
  2329. + return arch_local_save_flags() == 0;
  2330. +}
  2331. +
  2332. +#endif
  2333. --- /dev/null
  2334. +++ b/arch/um/include/uapi/asm/kmap_types.h
  2335. @@ -0,0 +1,13 @@
  2336. +/*
  2337. + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  2338. + * Licensed under the GPL
  2339. + */
  2340. +
  2341. +#ifndef __UM_KMAP_TYPES_H
  2342. +#define __UM_KMAP_TYPES_H
  2343. +
  2344. +/* No more #include "asm/arch/kmap_types.h" ! */
  2345. +
  2346. +#define KM_TYPE_NR 14
  2347. +
  2348. +#endif
  2349. --- /dev/null
  2350. +++ b/arch/um/include/uapi/asm/kvm_para.h
  2351. @@ -0,0 +1 @@
  2352. +#include <asm-generic/kvm_para.h>
  2353. --- /dev/null
  2354. +++ b/arch/um/include/uapi/asm/mmu.h
  2355. @@ -0,0 +1,24 @@
  2356. +/*
  2357. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2358. + * Licensed under the GPL
  2359. + */
  2360. +
  2361. +#ifndef __ARCH_UM_MMU_H
  2362. +#define __ARCH_UM_MMU_H
  2363. +
  2364. +#include <mm_id.h>
  2365. +#include <asm/mm_context.h>
  2366. +
  2367. +typedef struct mm_context {
  2368. + struct mm_id id;
  2369. + struct uml_arch_mm_context arch;
  2370. + struct page *stub_pages[2];
  2371. +} mm_context_t;
  2372. +
  2373. +extern void __switch_mm(struct mm_id * mm_idp);
  2374. +
  2375. +/* Avoid tangled inclusion with asm/ldt.h */
  2376. +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
  2377. +extern void free_ldt(struct mm_context *mm);
  2378. +
  2379. +#endif
  2380. --- /dev/null
  2381. +++ b/arch/um/include/uapi/asm/mmu_context.h
  2382. @@ -0,0 +1,58 @@
  2383. +/*
  2384. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2385. + * Licensed under the GPL
  2386. + */
  2387. +
  2388. +#ifndef __UM_MMU_CONTEXT_H
  2389. +#define __UM_MMU_CONTEXT_H
  2390. +
  2391. +#include <linux/sched.h>
  2392. +#include <asm/mmu.h>
  2393. +
  2394. +extern void uml_setup_stubs(struct mm_struct *mm);
  2395. +extern void arch_exit_mmap(struct mm_struct *mm);
  2396. +
  2397. +#define deactivate_mm(tsk,mm) do { } while (0)
  2398. +
  2399. +extern void force_flush_all(void);
  2400. +
  2401. +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  2402. +{
  2403. + /*
  2404. + * This is called by fs/exec.c and sys_unshare()
  2405. + * when the new ->mm is used for the first time.
  2406. + */
  2407. + __switch_mm(&new->context.id);
  2408. + down_write(&new->mmap_sem);
  2409. + uml_setup_stubs(new);
  2410. + up_write(&new->mmap_sem);
  2411. +}
  2412. +
  2413. +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  2414. + struct task_struct *tsk)
  2415. +{
  2416. + unsigned cpu = smp_processor_id();
  2417. +
  2418. + if(prev != next){
  2419. + cpumask_clear_cpu(cpu, mm_cpumask(prev));
  2420. + cpumask_set_cpu(cpu, mm_cpumask(next));
  2421. + if(next != &init_mm)
  2422. + __switch_mm(&next->context.id);
  2423. + }
  2424. +}
  2425. +
  2426. +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
  2427. +{
  2428. + uml_setup_stubs(mm);
  2429. +}
  2430. +
  2431. +static inline void enter_lazy_tlb(struct mm_struct *mm,
  2432. + struct task_struct *tsk)
  2433. +{
  2434. +}
  2435. +
  2436. +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  2437. +
  2438. +extern void destroy_context(struct mm_struct *mm);
  2439. +
  2440. +#endif
  2441. --- /dev/null
  2442. +++ b/arch/um/include/uapi/asm/page.h
  2443. @@ -0,0 +1,127 @@
  2444. +/*
  2445. + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
  2446. + * Copyright 2003 PathScale, Inc.
  2447. + * Licensed under the GPL
  2448. + */
  2449. +
  2450. +#ifndef __UM_PAGE_H
  2451. +#define __UM_PAGE_H
  2452. +
  2453. +#include <linux/const.h>
  2454. +
  2455. +/* PAGE_SHIFT determines the page size */
  2456. +#define PAGE_SHIFT 12
  2457. +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
  2458. +#define PAGE_MASK (~(PAGE_SIZE-1))
  2459. +
  2460. +#ifndef __ASSEMBLY__
  2461. +
  2462. +struct page;
  2463. +
  2464. +#include <linux/types.h>
  2465. +#include <asm/vm-flags.h>
  2466. +
  2467. +/*
  2468. + * These are used to make use of C type-checking..
  2469. + */
  2470. +
  2471. +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  2472. +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
  2473. +
  2474. +#define clear_user_page(page, vaddr, pg) clear_page(page)
  2475. +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  2476. +
  2477. +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
  2478. +
  2479. +typedef struct { unsigned long pte_low, pte_high; } pte_t;
  2480. +typedef struct { unsigned long pmd; } pmd_t;
  2481. +typedef struct { unsigned long pgd; } pgd_t;
  2482. +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
  2483. +
  2484. +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
  2485. +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
  2486. +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
  2487. +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
  2488. + smp_wmb(); \
  2489. + (to).pte_low = (from).pte_low; })
  2490. +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
  2491. +#define pte_set_val(pte, phys, prot) \
  2492. + ({ (pte).pte_high = (phys) >> 32; \
  2493. + (pte).pte_low = (phys) | pgprot_val(prot); })
  2494. +
  2495. +#define pmd_val(x) ((x).pmd)
  2496. +#define __pmd(x) ((pmd_t) { (x) } )
  2497. +
  2498. +typedef unsigned long long pfn_t;
  2499. +typedef unsigned long long phys_t;
  2500. +
  2501. +#else
  2502. +
  2503. +typedef struct { unsigned long pte; } pte_t;
  2504. +typedef struct { unsigned long pgd; } pgd_t;
  2505. +
  2506. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2507. +typedef struct { unsigned long pmd; } pmd_t;
  2508. +#define pmd_val(x) ((x).pmd)
  2509. +#define __pmd(x) ((pmd_t) { (x) } )
  2510. +#endif
  2511. +
  2512. +#define pte_val(x) ((x).pte)
  2513. +
  2514. +
  2515. +#define pte_get_bits(p, bits) ((p).pte & (bits))
  2516. +#define pte_set_bits(p, bits) ((p).pte |= (bits))
  2517. +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
  2518. +#define pte_copy(to, from) ((to).pte = (from).pte)
  2519. +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
  2520. +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
  2521. +
  2522. +typedef unsigned long pfn_t;
  2523. +typedef unsigned long phys_t;
  2524. +
  2525. +#endif
  2526. +
  2527. +typedef struct { unsigned long pgprot; } pgprot_t;
  2528. +
  2529. +typedef struct page *pgtable_t;
  2530. +
  2531. +#define pgd_val(x) ((x).pgd)
  2532. +#define pgprot_val(x) ((x).pgprot)
  2533. +
  2534. +#define __pte(x) ((pte_t) { (x) } )
  2535. +#define __pgd(x) ((pgd_t) { (x) } )
  2536. +#define __pgprot(x) ((pgprot_t) { (x) } )
  2537. +
  2538. +extern unsigned long uml_physmem;
  2539. +
  2540. +#define PAGE_OFFSET (uml_physmem)
  2541. +#define KERNELBASE PAGE_OFFSET
  2542. +
  2543. +#define __va_space (8*1024*1024)
  2544. +
  2545. +#include <mem.h>
  2546. +
  2547. +/* Cast to unsigned long before casting to void * to avoid a warning from
  2548. + * mmap_kmem about cutting a long long down to a void *. Not sure that
  2549. + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
  2550. + * addresses
  2551. + */
  2552. +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
  2553. +#define __va(phys) to_virt((unsigned long) (phys))
  2554. +
  2555. +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
  2556. +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
  2557. +
  2558. +#define pfn_valid(pfn) ((pfn) < max_mapnr)
  2559. +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
  2560. +
  2561. +#include <asm-generic/memory_model.h>
  2562. +#include <asm-generic/getorder.h>
  2563. +
  2564. +#endif /* __ASSEMBLY__ */
  2565. +
  2566. +#ifdef CONFIG_X86_32
  2567. +#define __HAVE_ARCH_GATE_AREA 1
  2568. +#endif
  2569. +
  2570. +#endif /* __UM_PAGE_H */
  2571. --- /dev/null
  2572. +++ b/arch/um/include/uapi/asm/pgalloc.h
  2573. @@ -0,0 +1,61 @@
  2574. +/*
  2575. + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  2576. + * Copyright 2003 PathScale, Inc.
  2577. + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
  2578. + * Licensed under the GPL
  2579. + */
  2580. +
  2581. +#ifndef __UM_PGALLOC_H
  2582. +#define __UM_PGALLOC_H
  2583. +
  2584. +#include <linux/mm.h>
  2585. +
  2586. +#define pmd_populate_kernel(mm, pmd, pte) \
  2587. + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
  2588. +
  2589. +#define pmd_populate(mm, pmd, pte) \
  2590. + set_pmd(pmd, __pmd(_PAGE_TABLE + \
  2591. + ((unsigned long long)page_to_pfn(pte) << \
  2592. + (unsigned long long) PAGE_SHIFT)))
  2593. +#define pmd_pgtable(pmd) pmd_page(pmd)
  2594. +
  2595. +/*
  2596. + * Allocate and free page tables.
  2597. + */
  2598. +extern pgd_t *pgd_alloc(struct mm_struct *);
  2599. +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  2600. +
  2601. +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  2602. +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  2603. +
  2604. +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  2605. +{
  2606. + free_page((unsigned long) pte);
  2607. +}
  2608. +
  2609. +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  2610. +{
  2611. + pgtable_page_dtor(pte);
  2612. + __free_page(pte);
  2613. +}
  2614. +
  2615. +#define __pte_free_tlb(tlb,pte, address) \
  2616. +do { \
  2617. + pgtable_page_dtor(pte); \
  2618. + tlb_remove_page((tlb),(pte)); \
  2619. +} while (0)
  2620. +
  2621. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2622. +
  2623. +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  2624. +{
  2625. + free_page((unsigned long)pmd);
  2626. +}
  2627. +
  2628. +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
  2629. +#endif
  2630. +
  2631. +#define check_pgt_cache() do { } while (0)
  2632. +
  2633. +#endif
  2634. +
  2635. --- /dev/null
  2636. +++ b/arch/um/include/uapi/asm/pgtable-2level.h
  2637. @@ -0,0 +1,53 @@
  2638. +/*
  2639. + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  2640. + * Copyright 2003 PathScale, Inc.
  2641. + * Derived from include/asm-i386/pgtable.h
  2642. + * Licensed under the GPL
  2643. + */
  2644. +
  2645. +#ifndef __UM_PGTABLE_2LEVEL_H
  2646. +#define __UM_PGTABLE_2LEVEL_H
  2647. +
  2648. +#include <asm-generic/pgtable-nopmd.h>
  2649. +
  2650. +/* PGDIR_SHIFT determines what a third-level page table entry can map */
  2651. +
  2652. +#define PGDIR_SHIFT 22
  2653. +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  2654. +#define PGDIR_MASK (~(PGDIR_SIZE-1))
  2655. +
  2656. +/*
  2657. + * entries per page directory level: the i386 is two-level, so
  2658. + * we don't really have any PMD directory physically.
  2659. + */
  2660. +#define PTRS_PER_PTE 1024
  2661. +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  2662. +#define PTRS_PER_PGD 1024
  2663. +#define FIRST_USER_ADDRESS 0
  2664. +
  2665. +#define pte_ERROR(e) \
  2666. + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  2667. + pte_val(e))
  2668. +#define pgd_ERROR(e) \
  2669. + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  2670. + pgd_val(e))
  2671. +
  2672. +static inline int pgd_newpage(pgd_t pgd) { return 0; }
  2673. +static inline void pgd_mkuptodate(pgd_t pgd) { }
  2674. +
  2675. +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  2676. +
  2677. +#define pte_pfn(x) phys_to_pfn(pte_val(x))
  2678. +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
  2679. +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
  2680. +
  2681. +/*
  2682. + * Bits 0 through 4 are taken
  2683. + */
  2684. +#define PTE_FILE_MAX_BITS 27
  2685. +
  2686. +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
  2687. +
  2688. +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
  2689. +
  2690. +#endif
  2691. --- /dev/null
  2692. +++ b/arch/um/include/uapi/asm/pgtable-3level.h
  2693. @@ -0,0 +1,136 @@
  2694. +/*
  2695. + * Copyright 2003 PathScale Inc
  2696. + * Derived from include/asm-i386/pgtable.h
  2697. + * Licensed under the GPL
  2698. + */
  2699. +
  2700. +#ifndef __UM_PGTABLE_3LEVEL_H
  2701. +#define __UM_PGTABLE_3LEVEL_H
  2702. +
  2703. +#include <asm-generic/pgtable-nopud.h>
  2704. +
  2705. +/* PGDIR_SHIFT determines what a third-level page table entry can map */
  2706. +
  2707. +#ifdef CONFIG_64BIT
  2708. +#define PGDIR_SHIFT 30
  2709. +#else
  2710. +#define PGDIR_SHIFT 31
  2711. +#endif
  2712. +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  2713. +#define PGDIR_MASK (~(PGDIR_SIZE-1))
  2714. +
  2715. +/* PMD_SHIFT determines the size of the area a second-level page table can
  2716. + * map
  2717. + */
  2718. +
  2719. +#define PMD_SHIFT 21
  2720. +#define PMD_SIZE (1UL << PMD_SHIFT)
  2721. +#define PMD_MASK (~(PMD_SIZE-1))
  2722. +
  2723. +/*
  2724. + * entries per page directory level
  2725. + */
  2726. +
  2727. +#define PTRS_PER_PTE 512
  2728. +#ifdef CONFIG_64BIT
  2729. +#define PTRS_PER_PMD 512
  2730. +#define PTRS_PER_PGD 512
  2731. +#else
  2732. +#define PTRS_PER_PMD 1024
  2733. +#define PTRS_PER_PGD 1024
  2734. +#endif
  2735. +
  2736. +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  2737. +#define FIRST_USER_ADDRESS 0
  2738. +
  2739. +#define pte_ERROR(e) \
  2740. + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2741. + pte_val(e))
  2742. +#define pmd_ERROR(e) \
  2743. + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2744. + pmd_val(e))
  2745. +#define pgd_ERROR(e) \
  2746. + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2747. + pgd_val(e))
  2748. +
  2749. +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
  2750. +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  2751. +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
  2752. +#define pud_populate(mm, pud, pmd) \
  2753. + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
  2754. +
  2755. +#ifdef CONFIG_64BIT
  2756. +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
  2757. +#else
  2758. +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
  2759. +#endif
  2760. +
  2761. +static inline int pgd_newpage(pgd_t pgd)
  2762. +{
  2763. + return(pgd_val(pgd) & _PAGE_NEWPAGE);
  2764. +}
  2765. +
  2766. +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
  2767. +
  2768. +#ifdef CONFIG_64BIT
  2769. +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
  2770. +#else
  2771. +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  2772. +#endif
  2773. +
  2774. +struct mm_struct;
  2775. +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
  2776. +
  2777. +static inline void pud_clear (pud_t *pud)
  2778. +{
  2779. + set_pud(pud, __pud(_PAGE_NEWPAGE));
  2780. +}
  2781. +
  2782. +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
  2783. +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  2784. +
  2785. +/* Find an entry in the second-level page table.. */
  2786. +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
  2787. + pmd_index(address))
  2788. +
  2789. +static inline unsigned long pte_pfn(pte_t pte)
  2790. +{
  2791. + return phys_to_pfn(pte_val(pte));
  2792. +}
  2793. +
  2794. +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
  2795. +{
  2796. + pte_t pte;
  2797. + phys_t phys = pfn_to_phys(page_nr);
  2798. +
  2799. + pte_set_val(pte, phys, pgprot);
  2800. + return pte;
  2801. +}
  2802. +
  2803. +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
  2804. +{
  2805. + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
  2806. +}
  2807. +
  2808. +/*
  2809. + * Bits 0 through 3 are taken in the low part of the pte,
  2810. + * put the 32 bits of offset into the high part.
  2811. + */
  2812. +#define PTE_FILE_MAX_BITS 32
  2813. +
  2814. +#ifdef CONFIG_64BIT
  2815. +
  2816. +#define pte_to_pgoff(p) ((p).pte >> 32)
  2817. +
  2818. +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
  2819. +
  2820. +#else
  2821. +
  2822. +#define pte_to_pgoff(pte) ((pte).pte_high)
  2823. +
  2824. +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
  2825. +
  2826. +#endif
  2827. +
  2828. +#endif
  2829. +
  2830. --- /dev/null
  2831. +++ b/arch/um/include/uapi/asm/pgtable.h
  2832. @@ -0,0 +1,375 @@
  2833. +/*
  2834. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2835. + * Copyright 2003 PathScale, Inc.
  2836. + * Derived from include/asm-i386/pgtable.h
  2837. + * Licensed under the GPL
  2838. + */
  2839. +
  2840. +#ifndef __UM_PGTABLE_H
  2841. +#define __UM_PGTABLE_H
  2842. +
  2843. +#include <asm/fixmap.h>
  2844. +
  2845. +#define _PAGE_PRESENT 0x001
  2846. +#define _PAGE_NEWPAGE 0x002
  2847. +#define _PAGE_NEWPROT 0x004
  2848. +#define _PAGE_RW 0x020
  2849. +#define _PAGE_USER 0x040
  2850. +#define _PAGE_ACCESSED 0x080
  2851. +#define _PAGE_DIRTY 0x100
  2852. +/* If _PAGE_PRESENT is clear, we use these: */
  2853. +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
  2854. +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
  2855. + pte_present gives true */
  2856. +
  2857. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2858. +#include <asm/pgtable-3level.h>
  2859. +#else
  2860. +#include <asm/pgtable-2level.h>
  2861. +#endif
  2862. +
  2863. +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  2864. +
  2865. +/* zero page used for uninitialized stuff */
  2866. +extern unsigned long *empty_zero_page;
  2867. +
  2868. +#define pgtable_cache_init() do ; while (0)
  2869. +
  2870. +/* Just any arbitrary offset to the start of the vmalloc VM area: the
  2871. + * current 8MB value just means that there will be a 8MB "hole" after the
  2872. + * physical memory until the kernel virtual memory starts. That means that
  2873. + * any out-of-bounds memory accesses will hopefully be caught.
  2874. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  2875. + * area for the same reason. ;)
  2876. + */
  2877. +
  2878. +extern unsigned long end_iomem;
  2879. +
  2880. +#define VMALLOC_OFFSET (__va_space)
  2881. +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  2882. +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
  2883. +#ifdef CONFIG_HIGHMEM
  2884. +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  2885. +#else
  2886. +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  2887. +#endif
  2888. +#define MODULES_VADDR VMALLOC_START
  2889. +#define MODULES_END VMALLOC_END
  2890. +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
  2891. +
  2892. +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  2893. +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  2894. +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  2895. +#define __PAGE_KERNEL_EXEC \
  2896. + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  2897. +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  2898. +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  2899. +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  2900. +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  2901. +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  2902. +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  2903. +
  2904. +/*
  2905. + * The i386 can't do page protection for execute, and considers that the same
  2906. + * are read.
  2907. + * Also, write permissions imply read permissions. This is the closest we can
  2908. + * get..
  2909. + */
  2910. +#define __P000 PAGE_NONE
  2911. +#define __P001 PAGE_READONLY
  2912. +#define __P010 PAGE_COPY
  2913. +#define __P011 PAGE_COPY
  2914. +#define __P100 PAGE_READONLY
  2915. +#define __P101 PAGE_READONLY
  2916. +#define __P110 PAGE_COPY
  2917. +#define __P111 PAGE_COPY
  2918. +
  2919. +#define __S000 PAGE_NONE
  2920. +#define __S001 PAGE_READONLY
  2921. +#define __S010 PAGE_SHARED
  2922. +#define __S011 PAGE_SHARED
  2923. +#define __S100 PAGE_READONLY
  2924. +#define __S101 PAGE_READONLY
  2925. +#define __S110 PAGE_SHARED
  2926. +#define __S111 PAGE_SHARED
  2927. +
  2928. +/*
  2929. + * ZERO_PAGE is a global shared page that is always zero: used
  2930. + * for zero-mapped memory areas etc..
  2931. + */
  2932. +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  2933. +
  2934. +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  2935. +
  2936. +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
  2937. +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  2938. +
  2939. +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  2940. +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
  2941. +
  2942. +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
  2943. +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
  2944. +
  2945. +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
  2946. +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
  2947. +
  2948. +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
  2949. +
  2950. +#define pte_page(x) pfn_to_page(pte_pfn(x))
  2951. +
  2952. +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
  2953. +
  2954. +/*
  2955. + * =================================
  2956. + * Flags checking section.
  2957. + * =================================
  2958. + */
  2959. +
  2960. +static inline int pte_none(pte_t pte)
  2961. +{
  2962. + return pte_is_zero(pte);
  2963. +}
  2964. +
  2965. +/*
  2966. + * The following only work if pte_present() is true.
  2967. + * Undefined behaviour if not..
  2968. + */
  2969. +static inline int pte_read(pte_t pte)
  2970. +{
  2971. + return((pte_get_bits(pte, _PAGE_USER)) &&
  2972. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2973. +}
  2974. +
  2975. +static inline int pte_exec(pte_t pte){
  2976. + return((pte_get_bits(pte, _PAGE_USER)) &&
  2977. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2978. +}
  2979. +
  2980. +static inline int pte_write(pte_t pte)
  2981. +{
  2982. + return((pte_get_bits(pte, _PAGE_RW)) &&
  2983. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2984. +}
  2985. +
  2986. +/*
  2987. + * The following only works if pte_present() is not true.
  2988. + */
  2989. +static inline int pte_file(pte_t pte)
  2990. +{
  2991. + return pte_get_bits(pte, _PAGE_FILE);
  2992. +}
  2993. +
  2994. +static inline int pte_dirty(pte_t pte)
  2995. +{
  2996. + return pte_get_bits(pte, _PAGE_DIRTY);
  2997. +}
  2998. +
  2999. +static inline int pte_young(pte_t pte)
  3000. +{
  3001. + return pte_get_bits(pte, _PAGE_ACCESSED);
  3002. +}
  3003. +
  3004. +static inline int pte_newpage(pte_t pte)
  3005. +{
  3006. + return pte_get_bits(pte, _PAGE_NEWPAGE);
  3007. +}
  3008. +
  3009. +static inline int pte_newprot(pte_t pte)
  3010. +{
  3011. + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
  3012. +}
  3013. +
  3014. +static inline int pte_special(pte_t pte)
  3015. +{
  3016. + return 0;
  3017. +}
  3018. +
  3019. +/*
  3020. + * =================================
  3021. + * Flags setting section.
  3022. + * =================================
  3023. + */
  3024. +
  3025. +static inline pte_t pte_mknewprot(pte_t pte)
  3026. +{
  3027. + pte_set_bits(pte, _PAGE_NEWPROT);
  3028. + return(pte);
  3029. +}
  3030. +
  3031. +static inline pte_t pte_mkclean(pte_t pte)
  3032. +{
  3033. + pte_clear_bits(pte, _PAGE_DIRTY);
  3034. + return(pte);
  3035. +}
  3036. +
  3037. +static inline pte_t pte_mkold(pte_t pte)
  3038. +{
  3039. + pte_clear_bits(pte, _PAGE_ACCESSED);
  3040. + return(pte);
  3041. +}
  3042. +
  3043. +static inline pte_t pte_wrprotect(pte_t pte)
  3044. +{
  3045. + pte_clear_bits(pte, _PAGE_RW);
  3046. + return(pte_mknewprot(pte));
  3047. +}
  3048. +
  3049. +static inline pte_t pte_mkread(pte_t pte)
  3050. +{
  3051. + pte_set_bits(pte, _PAGE_USER);
  3052. + return(pte_mknewprot(pte));
  3053. +}
  3054. +
  3055. +static inline pte_t pte_mkdirty(pte_t pte)
  3056. +{
  3057. + pte_set_bits(pte, _PAGE_DIRTY);
  3058. + return(pte);
  3059. +}
  3060. +
  3061. +static inline pte_t pte_mkyoung(pte_t pte)
  3062. +{
  3063. + pte_set_bits(pte, _PAGE_ACCESSED);
  3064. + return(pte);
  3065. +}
  3066. +
  3067. +static inline pte_t pte_mkwrite(pte_t pte)
  3068. +{
  3069. + pte_set_bits(pte, _PAGE_RW);
  3070. + return(pte_mknewprot(pte));
  3071. +}
  3072. +
  3073. +static inline pte_t pte_mkuptodate(pte_t pte)
  3074. +{
  3075. + pte_clear_bits(pte, _PAGE_NEWPAGE);
  3076. + if(pte_present(pte))
  3077. + pte_clear_bits(pte, _PAGE_NEWPROT);
  3078. + return(pte);
  3079. +}
  3080. +
  3081. +static inline pte_t pte_mknewpage(pte_t pte)
  3082. +{
  3083. + pte_set_bits(pte, _PAGE_NEWPAGE);
  3084. + return(pte);
  3085. +}
  3086. +
  3087. +static inline pte_t pte_mkspecial(pte_t pte)
  3088. +{
  3089. + return(pte);
  3090. +}
  3091. +
  3092. +static inline void set_pte(pte_t *pteptr, pte_t pteval)
  3093. +{
  3094. + pte_copy(*pteptr, pteval);
  3095. +
  3096. + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
  3097. + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
  3098. + * mapped pages.
  3099. + */
  3100. +
  3101. + *pteptr = pte_mknewpage(*pteptr);
  3102. + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
  3103. +}
  3104. +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  3105. +
  3106. +#define __HAVE_ARCH_PTE_SAME
  3107. +static inline int pte_same(pte_t pte_a, pte_t pte_b)
  3108. +{
  3109. + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
  3110. +}
  3111. +
  3112. +/*
  3113. + * Conversion functions: convert a page and protection to a page entry,
  3114. + * and a page entry and page directory to the page they refer to.
  3115. + */
  3116. +
  3117. +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
  3118. +#define __virt_to_page(virt) phys_to_page(__pa(virt))
  3119. +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
  3120. +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
  3121. +
  3122. +#define mk_pte(page, pgprot) \
  3123. + ({ pte_t pte; \
  3124. + \
  3125. + pte_set_val(pte, page_to_phys(page), (pgprot)); \
  3126. + if (pte_present(pte)) \
  3127. + pte_mknewprot(pte_mknewpage(pte)); \
  3128. + pte;})
  3129. +
  3130. +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  3131. +{
  3132. + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
  3133. + return pte;
  3134. +}
  3135. +
  3136. +/*
  3137. + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  3138. + *
  3139. + * this macro returns the index of the entry in the pgd page which would
  3140. + * control the given virtual address
  3141. + */
  3142. +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  3143. +
  3144. +/*
  3145. + * pgd_offset() returns a (pgd_t *)
  3146. + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  3147. + */
  3148. +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  3149. +
  3150. +/*
  3151. + * a shortcut which implies the use of the kernel's pgd, instead
  3152. + * of a process's
  3153. + */
  3154. +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  3155. +
  3156. +/*
  3157. + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  3158. + *
  3159. + * this macro returns the index of the entry in the pmd page which would
  3160. + * control the given virtual address
  3161. + */
  3162. +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  3163. +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  3164. +
  3165. +#define pmd_page_vaddr(pmd) \
  3166. + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  3167. +
  3168. +/*
  3169. + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  3170. + *
  3171. + * this macro returns the index of the entry in the pte page which would
  3172. + * control the given virtual address
  3173. + */
  3174. +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  3175. +#define pte_offset_kernel(dir, address) \
  3176. + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
  3177. +#define pte_offset_map(dir, address) \
  3178. + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  3179. +#define pte_unmap(pte) do { } while (0)
  3180. +
  3181. +struct mm_struct;
  3182. +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
  3183. +
  3184. +#define update_mmu_cache(vma,address,ptep) do ; while (0)
  3185. +
  3186. +/* Encode and de-code a swap entry */
  3187. +#define __swp_type(x) (((x).val >> 5) & 0x1f)
  3188. +#define __swp_offset(x) ((x).val >> 11)
  3189. +
  3190. +#define __swp_entry(type, offset) \
  3191. + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
  3192. +#define __pte_to_swp_entry(pte) \
  3193. + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
  3194. +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  3195. +
  3196. +#define kern_addr_valid(addr) (1)
  3197. +
  3198. +#include <asm-generic/pgtable.h>
  3199. +
  3200. +/* Clear a kernel PTE and flush it from the TLB */
  3201. +#define kpte_clear_flush(ptep, vaddr) \
  3202. +do { \
  3203. + pte_clear(&init_mm, (vaddr), (ptep)); \
  3204. + __flush_tlb_one((vaddr)); \
  3205. +} while (0)
  3206. +
  3207. +#endif
  3208. --- /dev/null
  3209. +++ b/arch/um/include/uapi/asm/processor-generic.h
  3210. @@ -0,0 +1,115 @@
  3211. +/*
  3212. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3213. + * Licensed under the GPL
  3214. + */
  3215. +
  3216. +#ifndef __UM_PROCESSOR_GENERIC_H
  3217. +#define __UM_PROCESSOR_GENERIC_H
  3218. +
  3219. +struct pt_regs;
  3220. +
  3221. +struct task_struct;
  3222. +
  3223. +#include <asm/ptrace.h>
  3224. +#include <registers.h>
  3225. +#include <sysdep/archsetjmp.h>
  3226. +
  3227. +#include <linux/prefetch.h>
  3228. +
  3229. +struct mm_struct;
  3230. +
  3231. +struct thread_struct {
  3232. + struct pt_regs regs;
  3233. + struct pt_regs *segv_regs;
  3234. + int singlestep_syscall;
  3235. + void *fault_addr;
  3236. + jmp_buf *fault_catcher;
  3237. + struct task_struct *prev_sched;
  3238. + struct arch_thread arch;
  3239. + jmp_buf switch_buf;
  3240. + struct {
  3241. + int op;
  3242. + union {
  3243. + struct {
  3244. + int pid;
  3245. + } fork, exec;
  3246. + struct {
  3247. + int (*proc)(void *);
  3248. + void *arg;
  3249. + } thread;
  3250. + struct {
  3251. + void (*proc)(void *);
  3252. + void *arg;
  3253. + } cb;
  3254. + } u;
  3255. + } request;
  3256. +};
  3257. +
  3258. +#define INIT_THREAD \
  3259. +{ \
  3260. + .regs = EMPTY_REGS, \
  3261. + .fault_addr = NULL, \
  3262. + .prev_sched = NULL, \
  3263. + .arch = INIT_ARCH_THREAD, \
  3264. + .request = { 0 } \
  3265. +}
  3266. +
  3267. +static inline void release_thread(struct task_struct *task)
  3268. +{
  3269. +}
  3270. +
  3271. +extern unsigned long thread_saved_pc(struct task_struct *t);
  3272. +
  3273. +static inline void mm_copy_segments(struct mm_struct *from_mm,
  3274. + struct mm_struct *new_mm)
  3275. +{
  3276. +}
  3277. +
  3278. +#define init_stack (init_thread_union.stack)
  3279. +
  3280. +/*
  3281. + * User space process size: 3GB (default).
  3282. + */
  3283. +extern unsigned long task_size;
  3284. +
  3285. +#define TASK_SIZE (task_size)
  3286. +
  3287. +#undef STACK_TOP
  3288. +#undef STACK_TOP_MAX
  3289. +
  3290. +extern unsigned long stacksizelim;
  3291. +
  3292. +#define STACK_ROOM (stacksizelim)
  3293. +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  3294. +#define STACK_TOP_MAX STACK_TOP
  3295. +
  3296. +/* This decides where the kernel will search for a free chunk of vm
  3297. + * space during mmap's.
  3298. + */
  3299. +#define TASK_UNMAPPED_BASE (0x40000000)
  3300. +
  3301. +extern void start_thread(struct pt_regs *regs, unsigned long entry,
  3302. + unsigned long stack);
  3303. +
  3304. +struct cpuinfo_um {
  3305. + unsigned long loops_per_jiffy;
  3306. + int ipi_pipe[2];
  3307. +};
  3308. +
  3309. +extern struct cpuinfo_um boot_cpu_data;
  3310. +
  3311. +#define my_cpu_data cpu_data[smp_processor_id()]
  3312. +
  3313. +#ifdef CONFIG_SMP
  3314. +extern struct cpuinfo_um cpu_data[];
  3315. +#define current_cpu_data cpu_data[smp_processor_id()]
  3316. +#else
  3317. +#define cpu_data (&boot_cpu_data)
  3318. +#define current_cpu_data boot_cpu_data
  3319. +#endif
  3320. +
  3321. +
  3322. +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
  3323. +extern unsigned long get_wchan(struct task_struct *p);
  3324. +
  3325. +#endif
  3326. --- /dev/null
  3327. +++ b/arch/um/include/uapi/asm/ptrace-generic.h
  3328. @@ -0,0 +1,45 @@
  3329. +/*
  3330. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3331. + * Licensed under the GPL
  3332. + */
  3333. +
  3334. +#ifndef __UM_PTRACE_GENERIC_H
  3335. +#define __UM_PTRACE_GENERIC_H
  3336. +
  3337. +#ifndef __ASSEMBLY__
  3338. +
  3339. +#include <asm/ptrace-abi.h>
  3340. +#include <sysdep/ptrace.h>
  3341. +
  3342. +struct pt_regs {
  3343. + struct uml_pt_regs regs;
  3344. +};
  3345. +
  3346. +#define arch_has_single_step() (1)
  3347. +
  3348. +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
  3349. +
  3350. +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
  3351. +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
  3352. +
  3353. +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
  3354. +
  3355. +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
  3356. +
  3357. +#define instruction_pointer(regs) PT_REGS_IP(regs)
  3358. +
  3359. +struct task_struct;
  3360. +
  3361. +extern long subarch_ptrace(struct task_struct *child, long request,
  3362. + unsigned long addr, unsigned long data);
  3363. +extern unsigned long getreg(struct task_struct *child, int regno);
  3364. +extern int putreg(struct task_struct *child, int regno, unsigned long value);
  3365. +
  3366. +extern int arch_copy_tls(struct task_struct *new);
  3367. +extern void clear_flushed_tls(struct task_struct *task);
  3368. +extern void syscall_trace_enter(struct pt_regs *regs);
  3369. +extern void syscall_trace_leave(struct pt_regs *regs);
  3370. +
  3371. +#endif
  3372. +
  3373. +#endif
  3374. --- /dev/null
  3375. +++ b/arch/um/include/uapi/asm/setup.h
  3376. @@ -0,0 +1,10 @@
  3377. +#ifndef SETUP_H_INCLUDED
  3378. +#define SETUP_H_INCLUDED
  3379. +
  3380. +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
  3381. + * command line, so this choice is ok.
  3382. + */
  3383. +
  3384. +#define COMMAND_LINE_SIZE 4096
  3385. +
  3386. +#endif /* SETUP_H_INCLUDED */
  3387. --- /dev/null
  3388. +++ b/arch/um/include/uapi/asm/smp.h
  3389. @@ -0,0 +1,32 @@
  3390. +#ifndef __UM_SMP_H
  3391. +#define __UM_SMP_H
  3392. +
  3393. +#ifdef CONFIG_SMP
  3394. +
  3395. +#include <linux/bitops.h>
  3396. +#include <asm/current.h>
  3397. +#include <linux/cpumask.h>
  3398. +
  3399. +#define raw_smp_processor_id() (current_thread->cpu)
  3400. +
  3401. +#define cpu_logical_map(n) (n)
  3402. +#define cpu_number_map(n) (n)
  3403. +extern int hard_smp_processor_id(void);
  3404. +#define NO_PROC_ID -1
  3405. +
  3406. +extern int ncpus;
  3407. +
  3408. +
  3409. +static inline void smp_cpus_done(unsigned int maxcpus)
  3410. +{
  3411. +}
  3412. +
  3413. +extern struct task_struct *idle_threads[NR_CPUS];
  3414. +
  3415. +#else
  3416. +
  3417. +#define hard_smp_processor_id() 0
  3418. +
  3419. +#endif
  3420. +
  3421. +#endif
  3422. --- /dev/null
  3423. +++ b/arch/um/include/uapi/asm/stacktrace.h
  3424. @@ -0,0 +1,42 @@
  3425. +#ifndef _ASM_UML_STACKTRACE_H
  3426. +#define _ASM_UML_STACKTRACE_H
  3427. +
  3428. +#include <linux/uaccess.h>
  3429. +#include <linux/ptrace.h>
  3430. +
  3431. +struct stack_frame {
  3432. + struct stack_frame *next_frame;
  3433. + unsigned long return_address;
  3434. +};
  3435. +
  3436. +struct stacktrace_ops {
  3437. + void (*address)(void *data, unsigned long address, int reliable);
  3438. +};
  3439. +
  3440. +#ifdef CONFIG_FRAME_POINTER
  3441. +static inline unsigned long
  3442. +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  3443. +{
  3444. + if (!task || task == current)
  3445. + return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
  3446. + return KSTK_EBP(task);
  3447. +}
  3448. +#else
  3449. +static inline unsigned long
  3450. +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  3451. +{
  3452. + return 0;
  3453. +}
  3454. +#endif
  3455. +
  3456. +static inline unsigned long
  3457. +*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  3458. +{
  3459. + if (!task || task == current)
  3460. + return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
  3461. + return (unsigned long *)KSTK_ESP(task);
  3462. +}
  3463. +
  3464. +void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
  3465. +
  3466. +#endif /* _ASM_UML_STACKTRACE_H */
  3467. --- /dev/null
  3468. +++ b/arch/um/include/uapi/asm/sysrq.h
  3469. @@ -0,0 +1,7 @@
  3470. +#ifndef __UM_SYSRQ_H
  3471. +#define __UM_SYSRQ_H
  3472. +
  3473. +struct task_struct;
  3474. +extern void show_trace(struct task_struct* task, unsigned long *stack);
  3475. +
  3476. +#endif
  3477. --- /dev/null
  3478. +++ b/arch/um/include/uapi/asm/thread_info.h
  3479. @@ -0,0 +1,78 @@
  3480. +/*
  3481. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3482. + * Licensed under the GPL
  3483. + */
  3484. +
  3485. +#ifndef __UM_THREAD_INFO_H
  3486. +#define __UM_THREAD_INFO_H
  3487. +
  3488. +#ifndef __ASSEMBLY__
  3489. +
  3490. +#include <asm/types.h>
  3491. +#include <asm/page.h>
  3492. +#include <asm/uaccess.h>
  3493. +
  3494. +struct thread_info {
  3495. + struct task_struct *task; /* main task structure */
  3496. + struct exec_domain *exec_domain; /* execution domain */
  3497. + unsigned long flags; /* low level flags */
  3498. + __u32 cpu; /* current CPU */
  3499. + int preempt_count; /* 0 => preemptable,
  3500. + <0 => BUG */
  3501. + mm_segment_t addr_limit; /* thread address space:
  3502. + 0-0xBFFFFFFF for user
  3503. + 0-0xFFFFFFFF for kernel */
  3504. + struct restart_block restart_block;
  3505. + struct thread_info *real_thread; /* Points to non-IRQ stack */
  3506. +};
  3507. +
  3508. +#define INIT_THREAD_INFO(tsk) \
  3509. +{ \
  3510. + .task = &tsk, \
  3511. + .exec_domain = &default_exec_domain, \
  3512. + .flags = 0, \
  3513. + .cpu = 0, \
  3514. + .preempt_count = INIT_PREEMPT_COUNT, \
  3515. + .addr_limit = KERNEL_DS, \
  3516. + .restart_block = { \
  3517. + .fn = do_no_restart_syscall, \
  3518. + }, \
  3519. + .real_thread = NULL, \
  3520. +}
  3521. +
  3522. +#define init_thread_info (init_thread_union.thread_info)
  3523. +#define init_stack (init_thread_union.stack)
  3524. +
  3525. +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
  3526. +/* how to get the thread information struct from C */
  3527. +static inline struct thread_info *current_thread_info(void)
  3528. +{
  3529. + struct thread_info *ti;
  3530. + unsigned long mask = THREAD_SIZE - 1;
  3531. + void *p;
  3532. +
  3533. + asm volatile ("" : "=r" (p) : "0" (&ti));
  3534. + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
  3535. + return ti;
  3536. +}
  3537. +
  3538. +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
  3539. +
  3540. +#endif
  3541. +
  3542. +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
  3543. +#define TIF_SIGPENDING 1 /* signal pending */
  3544. +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
  3545. +#define TIF_RESTART_BLOCK 4
  3546. +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
  3547. +#define TIF_SYSCALL_AUDIT 6
  3548. +#define TIF_RESTORE_SIGMASK 7
  3549. +#define TIF_NOTIFY_RESUME 8
  3550. +
  3551. +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  3552. +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  3553. +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  3554. +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
  3555. +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  3556. +
  3557. +#endif
  3558. --- /dev/null
  3559. +++ b/arch/um/include/uapi/asm/timex.h
  3560. @@ -0,0 +1,13 @@
  3561. +#ifndef __UM_TIMEX_H
  3562. +#define __UM_TIMEX_H
  3563. +
  3564. +typedef unsigned long cycles_t;
  3565. +
  3566. +static inline cycles_t get_cycles (void)
  3567. +{
  3568. + return 0;
  3569. +}
  3570. +
  3571. +#define CLOCK_TICK_RATE (HZ)
  3572. +
  3573. +#endif
  3574. --- /dev/null
  3575. +++ b/arch/um/include/uapi/asm/tlb.h
  3576. @@ -0,0 +1,134 @@
  3577. +#ifndef __UM_TLB_H
  3578. +#define __UM_TLB_H
  3579. +
  3580. +#include <linux/pagemap.h>
  3581. +#include <linux/swap.h>
  3582. +#include <asm/percpu.h>
  3583. +#include <asm/pgalloc.h>
  3584. +#include <asm/tlbflush.h>
  3585. +
  3586. +#define tlb_start_vma(tlb, vma) do { } while (0)
  3587. +#define tlb_end_vma(tlb, vma) do { } while (0)
  3588. +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  3589. +
  3590. +/* struct mmu_gather is an opaque type used by the mm code for passing around
  3591. + * any data needed by arch specific code for tlb_remove_page.
  3592. + */
  3593. +struct mmu_gather {
  3594. + struct mm_struct *mm;
  3595. + unsigned int need_flush; /* Really unmapped some ptes? */
  3596. + unsigned long start;
  3597. + unsigned long end;
  3598. + unsigned int fullmm; /* non-zero means full mm flush */
  3599. +};
  3600. +
  3601. +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  3602. + unsigned long address)
  3603. +{
  3604. + if (tlb->start > address)
  3605. + tlb->start = address;
  3606. + if (tlb->end < address + PAGE_SIZE)
  3607. + tlb->end = address + PAGE_SIZE;
  3608. +}
  3609. +
  3610. +static inline void init_tlb_gather(struct mmu_gather *tlb)
  3611. +{
  3612. + tlb->need_flush = 0;
  3613. +
  3614. + tlb->start = TASK_SIZE;
  3615. + tlb->end = 0;
  3616. +
  3617. + if (tlb->fullmm) {
  3618. + tlb->start = 0;
  3619. + tlb->end = TASK_SIZE;
  3620. + }
  3621. +}
  3622. +
  3623. +static inline void
  3624. +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  3625. +{
  3626. + tlb->mm = mm;
  3627. + tlb->start = start;
  3628. + tlb->end = end;
  3629. + tlb->fullmm = !(start | (end+1));
  3630. +
  3631. + init_tlb_gather(tlb);
  3632. +}
  3633. +
  3634. +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  3635. + unsigned long end);
  3636. +
  3637. +static inline void
  3638. +tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  3639. +{
  3640. + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  3641. +}
  3642. +
  3643. +static inline void
  3644. +tlb_flush_mmu_free(struct mmu_gather *tlb)
  3645. +{
  3646. + init_tlb_gather(tlb);
  3647. +}
  3648. +
  3649. +static inline void
  3650. +tlb_flush_mmu(struct mmu_gather *tlb)
  3651. +{
  3652. + if (!tlb->need_flush)
  3653. + return;
  3654. +
  3655. + tlb_flush_mmu_tlbonly(tlb);
  3656. + tlb_flush_mmu_free(tlb);
  3657. +}
  3658. +
  3659. +/* tlb_finish_mmu
  3660. + * Called at the end of the shootdown operation to free up any resources
  3661. + * that were required.
  3662. + */
  3663. +static inline void
  3664. +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  3665. +{
  3666. + tlb_flush_mmu(tlb);
  3667. +
  3668. + /* keep the page table cache within bounds */
  3669. + check_pgt_cache();
  3670. +}
  3671. +
  3672. +/* tlb_remove_page
  3673. + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  3674. + * while handling the additional races in SMP caused by other CPUs
  3675. + * caching valid mappings in their TLBs.
  3676. + */
  3677. +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  3678. +{
  3679. + tlb->need_flush = 1;
  3680. + free_page_and_swap_cache(page);
  3681. + return 1; /* avoid calling tlb_flush_mmu */
  3682. +}
  3683. +
  3684. +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  3685. +{
  3686. + __tlb_remove_page(tlb, page);
  3687. +}
  3688. +
  3689. +/**
  3690. + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  3691. + *
  3692. + * Record the fact that pte's were really umapped in ->need_flush, so we can
  3693. + * later optimise away the tlb invalidate. This helps when userspace is
  3694. + * unmapping already-unmapped pages, which happens quite a lot.
  3695. + */
  3696. +#define tlb_remove_tlb_entry(tlb, ptep, address) \
  3697. + do { \
  3698. + tlb->need_flush = 1; \
  3699. + __tlb_remove_tlb_entry(tlb, ptep, address); \
  3700. + } while (0)
  3701. +
  3702. +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  3703. +
  3704. +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
  3705. +
  3706. +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  3707. +
  3708. +#define tlb_migrate_finish(mm) do {} while (0)
  3709. +
  3710. +#endif
  3711. --- /dev/null
  3712. +++ b/arch/um/include/uapi/asm/tlbflush.h
  3713. @@ -0,0 +1,31 @@
  3714. +/*
  3715. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3716. + * Licensed under the GPL
  3717. + */
  3718. +
  3719. +#ifndef __UM_TLBFLUSH_H
  3720. +#define __UM_TLBFLUSH_H
  3721. +
  3722. +#include <linux/mm.h>
  3723. +
  3724. +/*
  3725. + * TLB flushing:
  3726. + *
  3727. + * - flush_tlb() flushes the current mm struct TLBs
  3728. + * - flush_tlb_all() flushes all processes TLBs
  3729. + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  3730. + * - flush_tlb_page(vma, vmaddr) flushes one page
  3731. + * - flush_tlb_kernel_vm() flushes the kernel vm area
  3732. + * - flush_tlb_range(vma, start, end) flushes a range of pages
  3733. + */
  3734. +
  3735. +extern void flush_tlb_all(void);
  3736. +extern void flush_tlb_mm(struct mm_struct *mm);
  3737. +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  3738. + unsigned long end);
  3739. +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
  3740. +extern void flush_tlb_kernel_vm(void);
  3741. +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  3742. +extern void __flush_tlb_one(unsigned long addr);
  3743. +
  3744. +#endif
  3745. --- /dev/null
  3746. +++ b/arch/um/include/uapi/asm/uaccess.h
  3747. @@ -0,0 +1,178 @@
  3748. +/*
  3749. + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  3750. + * Licensed under the GPL
  3751. + */
  3752. +
  3753. +#ifndef __UM_UACCESS_H
  3754. +#define __UM_UACCESS_H
  3755. +
  3756. +/* thread_info has a mm_segment_t in it, so put the definition up here */
  3757. +typedef struct {
  3758. + unsigned long seg;
  3759. +} mm_segment_t;
  3760. +
  3761. +#include <linux/thread_info.h>
  3762. +#include <linux/errno.h>
  3763. +#include <asm/processor.h>
  3764. +#include <asm/elf.h>
  3765. +
  3766. +#define VERIFY_READ 0
  3767. +#define VERIFY_WRITE 1
  3768. +
  3769. +/*
  3770. + * The fs value determines whether argument validity checking should be
  3771. + * performed or not. If get_fs() == USER_DS, checking is performed, with
  3772. + * get_fs() == KERNEL_DS, checking is bypassed.
  3773. + *
  3774. + * For historical reasons, these macros are grossly misnamed.
  3775. + */
  3776. +
  3777. +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  3778. +
  3779. +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  3780. +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
  3781. +
  3782. +#define get_ds() (KERNEL_DS)
  3783. +#define get_fs() (current_thread_info()->addr_limit)
  3784. +#define set_fs(x) (current_thread_info()->addr_limit = (x))
  3785. +
  3786. +#define segment_eq(a, b) ((a).seg == (b).seg)
  3787. +
  3788. +#define __under_task_size(addr, size) \
  3789. + (((unsigned long) (addr) < TASK_SIZE) && \
  3790. + (((unsigned long) (addr) + (size)) < TASK_SIZE))
  3791. +
  3792. +#define __access_ok_vsyscall(type, addr, size) \
  3793. + ((type == VERIFY_READ) && \
  3794. + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
  3795. + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
  3796. + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
  3797. +
  3798. +#define __addr_range_nowrap(addr, size) \
  3799. + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
  3800. +
  3801. +#define access_ok(type, addr, size) \
  3802. + (__addr_range_nowrap(addr, size) && \
  3803. + (__under_task_size(addr, size) || \
  3804. + __access_ok_vsyscall(type, addr, size) || \
  3805. + segment_eq(get_fs(), KERNEL_DS)))
  3806. +
  3807. +extern int copy_from_user(void *to, const void __user *from, int n);
  3808. +extern int copy_to_user(void __user *to, const void *from, int n);
  3809. +
  3810. +/*
  3811. + * strncpy_from_user: - Copy a NUL terminated string from userspace.
  3812. + * @dst: Destination address, in kernel space. This buffer must be at
  3813. + * least @count bytes long.
  3814. + * @src: Source address, in user space.
  3815. + * @count: Maximum number of bytes to copy, including the trailing NUL.
  3816. + *
  3817. + * Copies a NUL-terminated string from userspace to kernel space.
  3818. + *
  3819. + * On success, returns the length of the string (not including the trailing
  3820. + * NUL).
  3821. + *
  3822. + * If access to userspace fails, returns -EFAULT (some data may have been
  3823. + * copied).
  3824. + *
  3825. + * If @count is smaller than the length of the string, copies @count bytes
  3826. + * and returns @count.
  3827. + */
  3828. +
  3829. +extern int strncpy_from_user(char *dst, const char __user *src, int count);
  3830. +
  3831. +/*
  3832. + * __clear_user: - Zero a block of memory in user space, with less checking.
  3833. + * @to: Destination address, in user space.
  3834. + * @n: Number of bytes to zero.
  3835. + *
  3836. + * Zero a block of memory in user space. Caller must check
  3837. + * the specified block with access_ok() before calling this function.
  3838. + *
  3839. + * Returns number of bytes that could not be cleared.
  3840. + * On success, this will be zero.
  3841. + */
  3842. +extern int __clear_user(void __user *mem, int len);
  3843. +
  3844. +/*
  3845. + * clear_user: - Zero a block of memory in user space.
  3846. + * @to: Destination address, in user space.
  3847. + * @n: Number of bytes to zero.
  3848. + *
  3849. + * Zero a block of memory in user space.
  3850. + *
  3851. + * Returns number of bytes that could not be cleared.
  3852. + * On success, this will be zero.
  3853. + */
  3854. +extern int clear_user(void __user *mem, int len);
  3855. +
  3856. +/*
  3857. + * strlen_user: - Get the size of a string in user space.
  3858. + * @str: The string to measure.
  3859. + * @n: The maximum valid length
  3860. + *
  3861. + * Get the size of a NUL-terminated string in user space.
  3862. + *
  3863. + * Returns the size of the string INCLUDING the terminating NUL.
  3864. + * On exception, returns 0.
  3865. + * If the string is too long, returns a value greater than @n.
  3866. + */
  3867. +extern int strnlen_user(const void __user *str, int len);
  3868. +
  3869. +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
  3870. +
  3871. +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
  3872. +
  3873. +#define __copy_to_user_inatomic __copy_to_user
  3874. +#define __copy_from_user_inatomic __copy_from_user
  3875. +
  3876. +#define __get_user(x, ptr) \
  3877. +({ \
  3878. + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
  3879. + __typeof__(x) __private_val; \
  3880. + int __private_ret = -EFAULT; \
  3881. + (x) = (__typeof__(*(__private_ptr)))0; \
  3882. + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
  3883. + sizeof(*(__private_ptr))) == 0) { \
  3884. + (x) = (__typeof__(*(__private_ptr))) __private_val; \
  3885. + __private_ret = 0; \
  3886. + } \
  3887. + __private_ret; \
  3888. +})
  3889. +
  3890. +#define get_user(x, ptr) \
  3891. +({ \
  3892. + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
  3893. + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
  3894. + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
  3895. +})
  3896. +
  3897. +#define __put_user(x, ptr) \
  3898. +({ \
  3899. + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
  3900. + __typeof__(*(__private_ptr)) __private_val; \
  3901. + int __private_ret = -EFAULT; \
  3902. + __private_val = (__typeof__(*(__private_ptr))) (x); \
  3903. + if (__copy_to_user((__private_ptr), &__private_val, \
  3904. + sizeof(*(__private_ptr))) == 0) { \
  3905. + __private_ret = 0; \
  3906. + } \
  3907. + __private_ret; \
  3908. +})
  3909. +
  3910. +#define put_user(x, ptr) \
  3911. +({ \
  3912. + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
  3913. + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
  3914. + __put_user(x, private_ptr) : -EFAULT); \
  3915. +})
  3916. +
  3917. +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
  3918. +
  3919. +struct exception_table_entry
  3920. +{
  3921. + unsigned long insn;
  3922. + unsigned long fixup;
  3923. +};
  3924. +
  3925. +#endif