123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933 |
- From 5eac4d66049ab7d14a2b7311610c8cb85a2c1bf1 Mon Sep 17 00:00:00 2001
- From: Nicolas Thill <nico@openwrt.org>
- Date: Fri, 20 Mar 2015 00:31:06 +0100
- Subject: [PATCH] UM: fix make headers_install after UAPI header installation
- Signed-off-by: Nicolas Thill <nico@openwrt.org>
- ---
- From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
- From: Florian Fainelli <florian@openwrt.org>
- Date: Sun, 17 Mar 2013 20:12:10 +0100
- Subject: [PATCH] UM: fix make headers_install after UAPI header installation
- Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
- header installation and checking) breaks UML make headers_install with
- the following:
- $ ARCH=um make headers_install
- CHK include/generated/uapi/linux/version.h
- UPD include/generated/uapi/linux/version.h
- HOSTCC scripts/basic/fixdep
- WRAP arch/um/include/generated/asm/bug.h
- [snip]
- WRAP arch/um/include/generated/asm/trace_clock.h
- SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
- SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
- SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
- SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
- HOSTCC scripts/unifdef
- Makefile:912: *** Headers not exportable for the um architecture. Stop.
- zsh: exit 2 ARCH=um make headers_install
- The reason for that is because the top-level Makefile does the
- following:
- $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
- $(error Headers not exportable for the $(SRCARCH) architecture))
- we end-up in the else part of the $(if) statement because UML still uses
- the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
- by moving the header files to be in arch/um/include/uapi/asm/ thus
- making headers_install (and other make targets checking for uapi) to
- succeed.
- Signed-off-by: Florian Fainelli <florian@openwrt.org>
- ---
- Richard, this has been broken for 3.7+ onwards, if you want me to send
- you separate patches for 3.7 and 3.8 let me know. Thanks!
-
- --- a/arch/um/include/asm/Kbuild
- +++ /dev/null
- @@ -1,31 +0,0 @@
- -generic-y += barrier.h
- -generic-y += bug.h
- -generic-y += clkdev.h
- -generic-y += cputime.h
- -generic-y += current.h
- -generic-y += delay.h
- -generic-y += device.h
- -generic-y += emergency-restart.h
- -generic-y += exec.h
- -generic-y += ftrace.h
- -generic-y += futex.h
- -generic-y += hardirq.h
- -generic-y += hash.h
- -generic-y += hw_irq.h
- -generic-y += io.h
- -generic-y += irq_regs.h
- -generic-y += irq_work.h
- -generic-y += kdebug.h
- -generic-y += mcs_spinlock.h
- -generic-y += mutex.h
- -generic-y += param.h
- -generic-y += pci.h
- -generic-y += percpu.h
- -generic-y += preempt.h
- -generic-y += scatterlist.h
- -generic-y += sections.h
- -generic-y += switch_to.h
- -generic-y += topology.h
- -generic-y += trace_clock.h
- -generic-y += word-at-a-time.h
- -generic-y += xor.h
- --- a/arch/um/include/asm/a.out-core.h
- +++ /dev/null
- @@ -1,27 +0,0 @@
- -/* a.out coredump register dumper
- - *
- - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- - * Written by David Howells (dhowells@redhat.com)
- - *
- - * This program is free software; you can redistribute it and/or
- - * modify it under the terms of the GNU General Public Licence
- - * as published by the Free Software Foundation; either version
- - * 2 of the Licence, or (at your option) any later version.
- - */
- -
- -#ifndef __UM_A_OUT_CORE_H
- -#define __UM_A_OUT_CORE_H
- -
- -#ifdef __KERNEL__
- -
- -#include <linux/user.h>
- -
- -/*
- - * fill in the user structure for an a.out core dump
- - */
- -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
- -{
- -}
- -
- -#endif /* __KERNEL__ */
- -#endif /* __UM_A_OUT_CORE_H */
- --- a/arch/um/include/asm/bugs.h
- +++ /dev/null
- @@ -1,6 +0,0 @@
- -#ifndef __UM_BUGS_H
- -#define __UM_BUGS_H
- -
- -void check_bugs(void);
- -
- -#endif
- --- a/arch/um/include/asm/cache.h
- +++ /dev/null
- @@ -1,17 +0,0 @@
- -#ifndef __UM_CACHE_H
- -#define __UM_CACHE_H
- -
- -
- -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
- -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
- -#elif defined(CONFIG_UML_X86) /* 64-bit */
- -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
- -#else
- -/* XXX: this was taken from x86, now it's completely random. Luckily only
- - * affects SMP padding. */
- -# define L1_CACHE_SHIFT 5
- -#endif
- -
- -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
- -
- -#endif
- --- a/arch/um/include/asm/common.lds.S
- +++ /dev/null
- @@ -1,107 +0,0 @@
- -#include <asm-generic/vmlinux.lds.h>
- -
- - .fini : { *(.fini) } =0x9090
- - _etext = .;
- - PROVIDE (etext = .);
- -
- - . = ALIGN(4096);
- - _sdata = .;
- - PROVIDE (sdata = .);
- -
- - RODATA
- -
- - .unprotected : { *(.unprotected) }
- - . = ALIGN(4096);
- - PROVIDE (_unprotected_end = .);
- -
- - . = ALIGN(4096);
- - .note : { *(.note.*) }
- - EXCEPTION_TABLE(0)
- -
- - BUG_TABLE
- -
- - .uml.setup.init : {
- - __uml_setup_start = .;
- - *(.uml.setup.init)
- - __uml_setup_end = .;
- - }
- -
- - .uml.help.init : {
- - __uml_help_start = .;
- - *(.uml.help.init)
- - __uml_help_end = .;
- - }
- -
- - .uml.postsetup.init : {
- - __uml_postsetup_start = .;
- - *(.uml.postsetup.init)
- - __uml_postsetup_end = .;
- - }
- -
- - .init.setup : {
- - INIT_SETUP(0)
- - }
- -
- - PERCPU_SECTION(32)
- -
- - .initcall.init : {
- - INIT_CALLS
- - }
- -
- - .con_initcall.init : {
- - CON_INITCALL
- - }
- -
- - .uml.initcall.init : {
- - __uml_initcall_start = .;
- - *(.uml.initcall.init)
- - __uml_initcall_end = .;
- - }
- -
- - SECURITY_INIT
- -
- - .exitcall : {
- - __exitcall_begin = .;
- - *(.exitcall.exit)
- - __exitcall_end = .;
- - }
- -
- - .uml.exitcall : {
- - __uml_exitcall_begin = .;
- - *(.uml.exitcall.exit)
- - __uml_exitcall_end = .;
- - }
- -
- - . = ALIGN(4);
- - .altinstructions : {
- - __alt_instructions = .;
- - *(.altinstructions)
- - __alt_instructions_end = .;
- - }
- - .altinstr_replacement : { *(.altinstr_replacement) }
- - /* .exit.text is discard at runtime, not link time, to deal with references
- - from .altinstructions and .eh_frame */
- - .exit.text : { *(.exit.text) }
- - .exit.data : { *(.exit.data) }
- -
- - .preinit_array : {
- - __preinit_array_start = .;
- - *(.preinit_array)
- - __preinit_array_end = .;
- - }
- - .init_array : {
- - __init_array_start = .;
- - *(.init_array)
- - __init_array_end = .;
- - }
- - .fini_array : {
- - __fini_array_start = .;
- - *(.fini_array)
- - __fini_array_end = .;
- - }
- -
- - . = ALIGN(4096);
- - .init.ramfs : {
- - INIT_RAM_FS
- - }
- -
- --- a/arch/um/include/asm/dma.h
- +++ /dev/null
- @@ -1,10 +0,0 @@
- -#ifndef __UM_DMA_H
- -#define __UM_DMA_H
- -
- -#include <asm/io.h>
- -
- -extern unsigned long uml_physmem;
- -
- -#define MAX_DMA_ADDRESS (uml_physmem)
- -
- -#endif
- --- a/arch/um/include/asm/fixmap.h
- +++ /dev/null
- @@ -1,60 +0,0 @@
- -#ifndef __UM_FIXMAP_H
- -#define __UM_FIXMAP_H
- -
- -#include <asm/processor.h>
- -#include <asm/kmap_types.h>
- -#include <asm/archparam.h>
- -#include <asm/page.h>
- -#include <linux/threads.h>
- -
- -/*
- - * Here we define all the compile-time 'special' virtual
- - * addresses. The point is to have a constant address at
- - * compile time, but to set the physical address only
- - * in the boot process. We allocate these special addresses
- - * from the end of virtual memory (0xfffff000) backwards.
- - * Also this lets us do fail-safe vmalloc(), we
- - * can guarantee that these special addresses and
- - * vmalloc()-ed addresses never overlap.
- - *
- - * these 'compile-time allocated' memory buffers are
- - * fixed-size 4k pages. (or larger if used with an increment
- - * highger than 1) use fixmap_set(idx,phys) to associate
- - * physical memory with fixmap indices.
- - *
- - * TLB entries of such buffers will not be flushed across
- - * task switches.
- - */
- -
- -/*
- - * on UP currently we will have no trace of the fixmap mechanizm,
- - * no page table allocations, etc. This might change in the
- - * future, say framebuffers for the console driver(s) could be
- - * fix-mapped?
- - */
- -enum fixed_addresses {
- -#ifdef CONFIG_HIGHMEM
- - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
- -#endif
- - __end_of_fixed_addresses
- -};
- -
- -extern void __set_fixmap (enum fixed_addresses idx,
- - unsigned long phys, pgprot_t flags);
- -
- -/*
- - * used by vmalloc.c.
- - *
- - * Leave one empty page between vmalloc'ed areas and
- - * the start of the fixmap, and leave one page empty
- - * at the top of mem..
- - */
- -
- -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
- -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
- -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
- -
- -#include <asm-generic/fixmap.h>
- -
- -#endif
- --- a/arch/um/include/asm/irq.h
- +++ /dev/null
- @@ -1,23 +0,0 @@
- -#ifndef __UM_IRQ_H
- -#define __UM_IRQ_H
- -
- -#define TIMER_IRQ 0
- -#define UMN_IRQ 1
- -#define CONSOLE_IRQ 2
- -#define CONSOLE_WRITE_IRQ 3
- -#define UBD_IRQ 4
- -#define UM_ETH_IRQ 5
- -#define SSL_IRQ 6
- -#define SSL_WRITE_IRQ 7
- -#define ACCEPT_IRQ 8
- -#define MCONSOLE_IRQ 9
- -#define WINCH_IRQ 10
- -#define SIGIO_WRITE_IRQ 11
- -#define TELNETD_IRQ 12
- -#define XTERM_IRQ 13
- -#define RANDOM_IRQ 14
- -
- -#define LAST_IRQ RANDOM_IRQ
- -#define NR_IRQS (LAST_IRQ + 1)
- -
- -#endif
- --- a/arch/um/include/asm/irqflags.h
- +++ /dev/null
- @@ -1,42 +0,0 @@
- -#ifndef __UM_IRQFLAGS_H
- -#define __UM_IRQFLAGS_H
- -
- -extern int get_signals(void);
- -extern int set_signals(int enable);
- -extern void block_signals(void);
- -extern void unblock_signals(void);
- -
- -static inline unsigned long arch_local_save_flags(void)
- -{
- - return get_signals();
- -}
- -
- -static inline void arch_local_irq_restore(unsigned long flags)
- -{
- - set_signals(flags);
- -}
- -
- -static inline void arch_local_irq_enable(void)
- -{
- - unblock_signals();
- -}
- -
- -static inline void arch_local_irq_disable(void)
- -{
- - block_signals();
- -}
- -
- -static inline unsigned long arch_local_irq_save(void)
- -{
- - unsigned long flags;
- - flags = arch_local_save_flags();
- - arch_local_irq_disable();
- - return flags;
- -}
- -
- -static inline bool arch_irqs_disabled(void)
- -{
- - return arch_local_save_flags() == 0;
- -}
- -
- -#endif
- --- a/arch/um/include/asm/kmap_types.h
- +++ /dev/null
- @@ -1,13 +0,0 @@
- -/*
- - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_KMAP_TYPES_H
- -#define __UM_KMAP_TYPES_H
- -
- -/* No more #include "asm/arch/kmap_types.h" ! */
- -
- -#define KM_TYPE_NR 14
- -
- -#endif
- --- a/arch/um/include/asm/kvm_para.h
- +++ /dev/null
- @@ -1 +0,0 @@
- -#include <asm-generic/kvm_para.h>
- --- a/arch/um/include/asm/mmu.h
- +++ /dev/null
- @@ -1,24 +0,0 @@
- -/*
- - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __ARCH_UM_MMU_H
- -#define __ARCH_UM_MMU_H
- -
- -#include <mm_id.h>
- -#include <asm/mm_context.h>
- -
- -typedef struct mm_context {
- - struct mm_id id;
- - struct uml_arch_mm_context arch;
- - struct page *stub_pages[2];
- -} mm_context_t;
- -
- -extern void __switch_mm(struct mm_id * mm_idp);
- -
- -/* Avoid tangled inclusion with asm/ldt.h */
- -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
- -extern void free_ldt(struct mm_context *mm);
- -
- -#endif
- --- a/arch/um/include/asm/mmu_context.h
- +++ /dev/null
- @@ -1,58 +0,0 @@
- -/*
- - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_MMU_CONTEXT_H
- -#define __UM_MMU_CONTEXT_H
- -
- -#include <linux/sched.h>
- -#include <asm/mmu.h>
- -
- -extern void uml_setup_stubs(struct mm_struct *mm);
- -extern void arch_exit_mmap(struct mm_struct *mm);
- -
- -#define deactivate_mm(tsk,mm) do { } while (0)
- -
- -extern void force_flush_all(void);
- -
- -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
- -{
- - /*
- - * This is called by fs/exec.c and sys_unshare()
- - * when the new ->mm is used for the first time.
- - */
- - __switch_mm(&new->context.id);
- - down_write(&new->mmap_sem);
- - uml_setup_stubs(new);
- - up_write(&new->mmap_sem);
- -}
- -
- -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- - struct task_struct *tsk)
- -{
- - unsigned cpu = smp_processor_id();
- -
- - if(prev != next){
- - cpumask_clear_cpu(cpu, mm_cpumask(prev));
- - cpumask_set_cpu(cpu, mm_cpumask(next));
- - if(next != &init_mm)
- - __switch_mm(&next->context.id);
- - }
- -}
- -
- -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
- -{
- - uml_setup_stubs(mm);
- -}
- -
- -static inline void enter_lazy_tlb(struct mm_struct *mm,
- - struct task_struct *tsk)
- -{
- -}
- -
- -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
- -
- -extern void destroy_context(struct mm_struct *mm);
- -
- -#endif
- --- a/arch/um/include/asm/page.h
- +++ /dev/null
- @@ -1,127 +0,0 @@
- -/*
- - * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
- - * Copyright 2003 PathScale, Inc.
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_PAGE_H
- -#define __UM_PAGE_H
- -
- -#include <linux/const.h>
- -
- -/* PAGE_SHIFT determines the page size */
- -#define PAGE_SHIFT 12
- -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
- -#define PAGE_MASK (~(PAGE_SIZE-1))
- -
- -#ifndef __ASSEMBLY__
- -
- -struct page;
- -
- -#include <linux/types.h>
- -#include <asm/vm-flags.h>
- -
- -/*
- - * These are used to make use of C type-checking..
- - */
- -
- -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
- -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
- -
- -#define clear_user_page(page, vaddr, pg) clear_page(page)
- -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
- -
- -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
- -
- -typedef struct { unsigned long pte_low, pte_high; } pte_t;
- -typedef struct { unsigned long pmd; } pmd_t;
- -typedef struct { unsigned long pgd; } pgd_t;
- -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
- -
- -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
- -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
- -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
- -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
- - smp_wmb(); \
- - (to).pte_low = (from).pte_low; })
- -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
- -#define pte_set_val(pte, phys, prot) \
- - ({ (pte).pte_high = (phys) >> 32; \
- - (pte).pte_low = (phys) | pgprot_val(prot); })
- -
- -#define pmd_val(x) ((x).pmd)
- -#define __pmd(x) ((pmd_t) { (x) } )
- -
- -typedef unsigned long long pfn_t;
- -typedef unsigned long long phys_t;
- -
- -#else
- -
- -typedef struct { unsigned long pte; } pte_t;
- -typedef struct { unsigned long pgd; } pgd_t;
- -
- -#ifdef CONFIG_3_LEVEL_PGTABLES
- -typedef struct { unsigned long pmd; } pmd_t;
- -#define pmd_val(x) ((x).pmd)
- -#define __pmd(x) ((pmd_t) { (x) } )
- -#endif
- -
- -#define pte_val(x) ((x).pte)
- -
- -
- -#define pte_get_bits(p, bits) ((p).pte & (bits))
- -#define pte_set_bits(p, bits) ((p).pte |= (bits))
- -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
- -#define pte_copy(to, from) ((to).pte = (from).pte)
- -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
- -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
- -
- -typedef unsigned long pfn_t;
- -typedef unsigned long phys_t;
- -
- -#endif
- -
- -typedef struct { unsigned long pgprot; } pgprot_t;
- -
- -typedef struct page *pgtable_t;
- -
- -#define pgd_val(x) ((x).pgd)
- -#define pgprot_val(x) ((x).pgprot)
- -
- -#define __pte(x) ((pte_t) { (x) } )
- -#define __pgd(x) ((pgd_t) { (x) } )
- -#define __pgprot(x) ((pgprot_t) { (x) } )
- -
- -extern unsigned long uml_physmem;
- -
- -#define PAGE_OFFSET (uml_physmem)
- -#define KERNELBASE PAGE_OFFSET
- -
- -#define __va_space (8*1024*1024)
- -
- -#include <mem.h>
- -
- -/* Cast to unsigned long before casting to void * to avoid a warning from
- - * mmap_kmem about cutting a long long down to a void *. Not sure that
- - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
- - * addresses
- - */
- -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
- -#define __va(phys) to_virt((unsigned long) (phys))
- -
- -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
- -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
- -
- -#define pfn_valid(pfn) ((pfn) < max_mapnr)
- -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
- -
- -#include <asm-generic/memory_model.h>
- -#include <asm-generic/getorder.h>
- -
- -#endif /* __ASSEMBLY__ */
- -
- -#ifdef CONFIG_X86_32
- -#define __HAVE_ARCH_GATE_AREA 1
- -#endif
- -
- -#endif /* __UM_PAGE_H */
- --- a/arch/um/include/asm/pgalloc.h
- +++ /dev/null
- @@ -1,61 +0,0 @@
- -/*
- - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- - * Copyright 2003 PathScale, Inc.
- - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_PGALLOC_H
- -#define __UM_PGALLOC_H
- -
- -#include <linux/mm.h>
- -
- -#define pmd_populate_kernel(mm, pmd, pte) \
- - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
- -
- -#define pmd_populate(mm, pmd, pte) \
- - set_pmd(pmd, __pmd(_PAGE_TABLE + \
- - ((unsigned long long)page_to_pfn(pte) << \
- - (unsigned long long) PAGE_SHIFT)))
- -#define pmd_pgtable(pmd) pmd_page(pmd)
- -
- -/*
- - * Allocate and free page tables.
- - */
- -extern pgd_t *pgd_alloc(struct mm_struct *);
- -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
- -
- -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
- -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
- -
- -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
- -{
- - free_page((unsigned long) pte);
- -}
- -
- -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
- -{
- - pgtable_page_dtor(pte);
- - __free_page(pte);
- -}
- -
- -#define __pte_free_tlb(tlb,pte, address) \
- -do { \
- - pgtable_page_dtor(pte); \
- - tlb_remove_page((tlb),(pte)); \
- -} while (0)
- -
- -#ifdef CONFIG_3_LEVEL_PGTABLES
- -
- -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
- -{
- - free_page((unsigned long)pmd);
- -}
- -
- -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
- -#endif
- -
- -#define check_pgt_cache() do { } while (0)
- -
- -#endif
- -
- --- a/arch/um/include/asm/pgtable-2level.h
- +++ /dev/null
- @@ -1,53 +0,0 @@
- -/*
- - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- - * Copyright 2003 PathScale, Inc.
- - * Derived from include/asm-i386/pgtable.h
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_PGTABLE_2LEVEL_H
- -#define __UM_PGTABLE_2LEVEL_H
- -
- -#include <asm-generic/pgtable-nopmd.h>
- -
- -/* PGDIR_SHIFT determines what a third-level page table entry can map */
- -
- -#define PGDIR_SHIFT 22
- -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
- -#define PGDIR_MASK (~(PGDIR_SIZE-1))
- -
- -/*
- - * entries per page directory level: the i386 is two-level, so
- - * we don't really have any PMD directory physically.
- - */
- -#define PTRS_PER_PTE 1024
- -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
- -#define PTRS_PER_PGD 1024
- -#define FIRST_USER_ADDRESS 0
- -
- -#define pte_ERROR(e) \
- - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
- - pte_val(e))
- -#define pgd_ERROR(e) \
- - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
- - pgd_val(e))
- -
- -static inline int pgd_newpage(pgd_t pgd) { return 0; }
- -static inline void pgd_mkuptodate(pgd_t pgd) { }
- -
- -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
- -
- -#define pte_pfn(x) phys_to_pfn(pte_val(x))
- -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
- -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
- -
- -/*
- - * Bits 0 through 4 are taken
- - */
- -#define PTE_FILE_MAX_BITS 27
- -
- -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
- -
- -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
- -
- -#endif
- --- a/arch/um/include/asm/pgtable-3level.h
- +++ /dev/null
- @@ -1,136 +0,0 @@
- -/*
- - * Copyright 2003 PathScale Inc
- - * Derived from include/asm-i386/pgtable.h
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_PGTABLE_3LEVEL_H
- -#define __UM_PGTABLE_3LEVEL_H
- -
- -#include <asm-generic/pgtable-nopud.h>
- -
- -/* PGDIR_SHIFT determines what a third-level page table entry can map */
- -
- -#ifdef CONFIG_64BIT
- -#define PGDIR_SHIFT 30
- -#else
- -#define PGDIR_SHIFT 31
- -#endif
- -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
- -#define PGDIR_MASK (~(PGDIR_SIZE-1))
- -
- -/* PMD_SHIFT determines the size of the area a second-level page table can
- - * map
- - */
- -
- -#define PMD_SHIFT 21
- -#define PMD_SIZE (1UL << PMD_SHIFT)
- -#define PMD_MASK (~(PMD_SIZE-1))
- -
- -/*
- - * entries per page directory level
- - */
- -
- -#define PTRS_PER_PTE 512
- -#ifdef CONFIG_64BIT
- -#define PTRS_PER_PMD 512
- -#define PTRS_PER_PGD 512
- -#else
- -#define PTRS_PER_PMD 1024
- -#define PTRS_PER_PGD 1024
- -#endif
- -
- -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
- -#define FIRST_USER_ADDRESS 0
- -
- -#define pte_ERROR(e) \
- - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
- - pte_val(e))
- -#define pmd_ERROR(e) \
- - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
- - pmd_val(e))
- -#define pgd_ERROR(e) \
- - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
- - pgd_val(e))
- -
- -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
- -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
- -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
- -#define pud_populate(mm, pud, pmd) \
- - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
- -
- -#ifdef CONFIG_64BIT
- -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
- -#else
- -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
- -#endif
- -
- -static inline int pgd_newpage(pgd_t pgd)
- -{
- - return(pgd_val(pgd) & _PAGE_NEWPAGE);
- -}
- -
- -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
- -
- -#ifdef CONFIG_64BIT
- -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
- -#else
- -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
- -#endif
- -
- -struct mm_struct;
- -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
- -
- -static inline void pud_clear (pud_t *pud)
- -{
- - set_pud(pud, __pud(_PAGE_NEWPAGE));
- -}
- -
- -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
- -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
- -
- -/* Find an entry in the second-level page table.. */
- -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
- - pmd_index(address))
- -
- -static inline unsigned long pte_pfn(pte_t pte)
- -{
- - return phys_to_pfn(pte_val(pte));
- -}
- -
- -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
- -{
- - pte_t pte;
- - phys_t phys = pfn_to_phys(page_nr);
- -
- - pte_set_val(pte, phys, pgprot);
- - return pte;
- -}
- -
- -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
- -{
- - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
- -}
- -
- -/*
- - * Bits 0 through 3 are taken in the low part of the pte,
- - * put the 32 bits of offset into the high part.
- - */
- -#define PTE_FILE_MAX_BITS 32
- -
- -#ifdef CONFIG_64BIT
- -
- -#define pte_to_pgoff(p) ((p).pte >> 32)
- -
- -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
- -
- -#else
- -
- -#define pte_to_pgoff(pte) ((pte).pte_high)
- -
- -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
- -
- -#endif
- -
- -#endif
- -
- --- a/arch/um/include/asm/pgtable.h
- +++ /dev/null
- @@ -1,375 +0,0 @@
- -/*
- - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- - * Copyright 2003 PathScale, Inc.
- - * Derived from include/asm-i386/pgtable.h
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_PGTABLE_H
- -#define __UM_PGTABLE_H
- -
- -#include <asm/fixmap.h>
- -
- -#define _PAGE_PRESENT 0x001
- -#define _PAGE_NEWPAGE 0x002
- -#define _PAGE_NEWPROT 0x004
- -#define _PAGE_RW 0x020
- -#define _PAGE_USER 0x040
- -#define _PAGE_ACCESSED 0x080
- -#define _PAGE_DIRTY 0x100
- -/* If _PAGE_PRESENT is clear, we use these: */
- -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
- -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
- - pte_present gives true */
- -
- -#ifdef CONFIG_3_LEVEL_PGTABLES
- -#include <asm/pgtable-3level.h>
- -#else
- -#include <asm/pgtable-2level.h>
- -#endif
- -
- -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
- -
- -/* zero page used for uninitialized stuff */
- -extern unsigned long *empty_zero_page;
- -
- -#define pgtable_cache_init() do ; while (0)
- -
- -/* Just any arbitrary offset to the start of the vmalloc VM area: the
- - * current 8MB value just means that there will be a 8MB "hole" after the
- - * physical memory until the kernel virtual memory starts. That means that
- - * any out-of-bounds memory accesses will hopefully be caught.
- - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- - * area for the same reason. ;)
- - */
- -
- -extern unsigned long end_iomem;
- -
- -#define VMALLOC_OFFSET (__va_space)
- -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
- -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
- -#ifdef CONFIG_HIGHMEM
- -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
- -#else
- -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
- -#endif
- -#define MODULES_VADDR VMALLOC_START
- -#define MODULES_END VMALLOC_END
- -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
- -
- -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
- -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
- -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
- -#define __PAGE_KERNEL_EXEC \
- - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
- -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
- -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
- -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
- -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
- -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
- -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
- -
- -/*
- - * The i386 can't do page protection for execute, and considers that the same
- - * are read.
- - * Also, write permissions imply read permissions. This is the closest we can
- - * get..
- - */
- -#define __P000 PAGE_NONE
- -#define __P001 PAGE_READONLY
- -#define __P010 PAGE_COPY
- -#define __P011 PAGE_COPY
- -#define __P100 PAGE_READONLY
- -#define __P101 PAGE_READONLY
- -#define __P110 PAGE_COPY
- -#define __P111 PAGE_COPY
- -
- -#define __S000 PAGE_NONE
- -#define __S001 PAGE_READONLY
- -#define __S010 PAGE_SHARED
- -#define __S011 PAGE_SHARED
- -#define __S100 PAGE_READONLY
- -#define __S101 PAGE_READONLY
- -#define __S110 PAGE_SHARED
- -#define __S111 PAGE_SHARED
- -
- -/*
- - * ZERO_PAGE is a global shared page that is always zero: used
- - * for zero-mapped memory areas etc..
- - */
- -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
- -
- -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
- -
- -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
- -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
- -
- -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
- -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
- -
- -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
- -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
- -
- -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
- -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
- -
- -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
- -
- -#define pte_page(x) pfn_to_page(pte_pfn(x))
- -
- -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
- -
- -/*
- - * =================================
- - * Flags checking section.
- - * =================================
- - */
- -
- -static inline int pte_none(pte_t pte)
- -{
- - return pte_is_zero(pte);
- -}
- -
- -/*
- - * The following only work if pte_present() is true.
- - * Undefined behaviour if not..
- - */
- -static inline int pte_read(pte_t pte)
- -{
- - return((pte_get_bits(pte, _PAGE_USER)) &&
- - !(pte_get_bits(pte, _PAGE_PROTNONE)));
- -}
- -
- -static inline int pte_exec(pte_t pte){
- - return((pte_get_bits(pte, _PAGE_USER)) &&
- - !(pte_get_bits(pte, _PAGE_PROTNONE)));
- -}
- -
- -static inline int pte_write(pte_t pte)
- -{
- - return((pte_get_bits(pte, _PAGE_RW)) &&
- - !(pte_get_bits(pte, _PAGE_PROTNONE)));
- -}
- -
- -/*
- - * The following only works if pte_present() is not true.
- - */
- -static inline int pte_file(pte_t pte)
- -{
- - return pte_get_bits(pte, _PAGE_FILE);
- -}
- -
- -static inline int pte_dirty(pte_t pte)
- -{
- - return pte_get_bits(pte, _PAGE_DIRTY);
- -}
- -
- -static inline int pte_young(pte_t pte)
- -{
- - return pte_get_bits(pte, _PAGE_ACCESSED);
- -}
- -
- -static inline int pte_newpage(pte_t pte)
- -{
- - return pte_get_bits(pte, _PAGE_NEWPAGE);
- -}
- -
- -static inline int pte_newprot(pte_t pte)
- -{
- - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
- -}
- -
- -static inline int pte_special(pte_t pte)
- -{
- - return 0;
- -}
- -
- -/*
- - * =================================
- - * Flags setting section.
- - * =================================
- - */
- -
- -static inline pte_t pte_mknewprot(pte_t pte)
- -{
- - pte_set_bits(pte, _PAGE_NEWPROT);
- - return(pte);
- -}
- -
- -static inline pte_t pte_mkclean(pte_t pte)
- -{
- - pte_clear_bits(pte, _PAGE_DIRTY);
- - return(pte);
- -}
- -
- -static inline pte_t pte_mkold(pte_t pte)
- -{
- - pte_clear_bits(pte, _PAGE_ACCESSED);
- - return(pte);
- -}
- -
- -static inline pte_t pte_wrprotect(pte_t pte)
- -{
- - pte_clear_bits(pte, _PAGE_RW);
- - return(pte_mknewprot(pte));
- -}
- -
- -static inline pte_t pte_mkread(pte_t pte)
- -{
- - pte_set_bits(pte, _PAGE_USER);
- - return(pte_mknewprot(pte));
- -}
- -
- -static inline pte_t pte_mkdirty(pte_t pte)
- -{
- - pte_set_bits(pte, _PAGE_DIRTY);
- - return(pte);
- -}
- -
- -static inline pte_t pte_mkyoung(pte_t pte)
- -{
- - pte_set_bits(pte, _PAGE_ACCESSED);
- - return(pte);
- -}
- -
- -static inline pte_t pte_mkwrite(pte_t pte)
- -{
- - pte_set_bits(pte, _PAGE_RW);
- - return(pte_mknewprot(pte));
- -}
- -
- -static inline pte_t pte_mkuptodate(pte_t pte)
- -{
- - pte_clear_bits(pte, _PAGE_NEWPAGE);
- - if(pte_present(pte))
- - pte_clear_bits(pte, _PAGE_NEWPROT);
- - return(pte);
- -}
- -
- -static inline pte_t pte_mknewpage(pte_t pte)
- -{
- - pte_set_bits(pte, _PAGE_NEWPAGE);
- - return(pte);
- -}
- -
- -static inline pte_t pte_mkspecial(pte_t pte)
- -{
- - return(pte);
- -}
- -
- -static inline void set_pte(pte_t *pteptr, pte_t pteval)
- -{
- - pte_copy(*pteptr, pteval);
- -
- - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
- - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
- - * mapped pages.
- - */
- -
- - *pteptr = pte_mknewpage(*pteptr);
- - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
- -}
- -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
- -
- -#define __HAVE_ARCH_PTE_SAME
- -static inline int pte_same(pte_t pte_a, pte_t pte_b)
- -{
- - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
- -}
- -
- -/*
- - * Conversion functions: convert a page and protection to a page entry,
- - * and a page entry and page directory to the page they refer to.
- - */
- -
- -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
- -#define __virt_to_page(virt) phys_to_page(__pa(virt))
- -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
- -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
- -
- -#define mk_pte(page, pgprot) \
- - ({ pte_t pte; \
- - \
- - pte_set_val(pte, page_to_phys(page), (pgprot)); \
- - if (pte_present(pte)) \
- - pte_mknewprot(pte_mknewpage(pte)); \
- - pte;})
- -
- -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
- -{
- - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
- - return pte;
- -}
- -
- -/*
- - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- - *
- - * this macro returns the index of the entry in the pgd page which would
- - * control the given virtual address
- - */
- -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
- -
- -/*
- - * pgd_offset() returns a (pgd_t *)
- - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- - */
- -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
- -
- -/*
- - * a shortcut which implies the use of the kernel's pgd, instead
- - * of a process's
- - */
- -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
- -
- -/*
- - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- - *
- - * this macro returns the index of the entry in the pmd page which would
- - * control the given virtual address
- - */
- -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
- -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
- -
- -#define pmd_page_vaddr(pmd) \
- - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
- -
- -/*
- - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- - *
- - * this macro returns the index of the entry in the pte page which would
- - * control the given virtual address
- - */
- -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
- -#define pte_offset_kernel(dir, address) \
- - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
- -#define pte_offset_map(dir, address) \
- - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
- -#define pte_unmap(pte) do { } while (0)
- -
- -struct mm_struct;
- -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
- -
- -#define update_mmu_cache(vma,address,ptep) do ; while (0)
- -
- -/* Encode and de-code a swap entry */
- -#define __swp_type(x) (((x).val >> 5) & 0x1f)
- -#define __swp_offset(x) ((x).val >> 11)
- -
- -#define __swp_entry(type, offset) \
- - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
- -#define __pte_to_swp_entry(pte) \
- - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
- -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
- -
- -#define kern_addr_valid(addr) (1)
- -
- -#include <asm-generic/pgtable.h>
- -
- -/* Clear a kernel PTE and flush it from the TLB */
- -#define kpte_clear_flush(ptep, vaddr) \
- -do { \
- - pte_clear(&init_mm, (vaddr), (ptep)); \
- - __flush_tlb_one((vaddr)); \
- -} while (0)
- -
- -#endif
- --- a/arch/um/include/asm/processor-generic.h
- +++ /dev/null
- @@ -1,115 +0,0 @@
- -/*
- - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_PROCESSOR_GENERIC_H
- -#define __UM_PROCESSOR_GENERIC_H
- -
- -struct pt_regs;
- -
- -struct task_struct;
- -
- -#include <asm/ptrace.h>
- -#include <registers.h>
- -#include <sysdep/archsetjmp.h>
- -
- -#include <linux/prefetch.h>
- -
- -struct mm_struct;
- -
- -struct thread_struct {
- - struct pt_regs regs;
- - struct pt_regs *segv_regs;
- - int singlestep_syscall;
- - void *fault_addr;
- - jmp_buf *fault_catcher;
- - struct task_struct *prev_sched;
- - struct arch_thread arch;
- - jmp_buf switch_buf;
- - struct {
- - int op;
- - union {
- - struct {
- - int pid;
- - } fork, exec;
- - struct {
- - int (*proc)(void *);
- - void *arg;
- - } thread;
- - struct {
- - void (*proc)(void *);
- - void *arg;
- - } cb;
- - } u;
- - } request;
- -};
- -
- -#define INIT_THREAD \
- -{ \
- - .regs = EMPTY_REGS, \
- - .fault_addr = NULL, \
- - .prev_sched = NULL, \
- - .arch = INIT_ARCH_THREAD, \
- - .request = { 0 } \
- -}
- -
- -static inline void release_thread(struct task_struct *task)
- -{
- -}
- -
- -extern unsigned long thread_saved_pc(struct task_struct *t);
- -
- -static inline void mm_copy_segments(struct mm_struct *from_mm,
- - struct mm_struct *new_mm)
- -{
- -}
- -
- -#define init_stack (init_thread_union.stack)
- -
- -/*
- - * User space process size: 3GB (default).
- - */
- -extern unsigned long task_size;
- -
- -#define TASK_SIZE (task_size)
- -
- -#undef STACK_TOP
- -#undef STACK_TOP_MAX
- -
- -extern unsigned long stacksizelim;
- -
- -#define STACK_ROOM (stacksizelim)
- -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
- -#define STACK_TOP_MAX STACK_TOP
- -
- -/* This decides where the kernel will search for a free chunk of vm
- - * space during mmap's.
- - */
- -#define TASK_UNMAPPED_BASE (0x40000000)
- -
- -extern void start_thread(struct pt_regs *regs, unsigned long entry,
- - unsigned long stack);
- -
- -struct cpuinfo_um {
- - unsigned long loops_per_jiffy;
- - int ipi_pipe[2];
- -};
- -
- -extern struct cpuinfo_um boot_cpu_data;
- -
- -#define my_cpu_data cpu_data[smp_processor_id()]
- -
- -#ifdef CONFIG_SMP
- -extern struct cpuinfo_um cpu_data[];
- -#define current_cpu_data cpu_data[smp_processor_id()]
- -#else
- -#define cpu_data (&boot_cpu_data)
- -#define current_cpu_data boot_cpu_data
- -#endif
- -
- -
- -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
- -extern unsigned long get_wchan(struct task_struct *p);
- -
- -#endif
- --- a/arch/um/include/asm/ptrace-generic.h
- +++ /dev/null
- @@ -1,45 +0,0 @@
- -/*
- - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_PTRACE_GENERIC_H
- -#define __UM_PTRACE_GENERIC_H
- -
- -#ifndef __ASSEMBLY__
- -
- -#include <asm/ptrace-abi.h>
- -#include <sysdep/ptrace.h>
- -
- -struct pt_regs {
- - struct uml_pt_regs regs;
- -};
- -
- -#define arch_has_single_step() (1)
- -
- -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
- -
- -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
- -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
- -
- -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
- -
- -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
- -
- -#define instruction_pointer(regs) PT_REGS_IP(regs)
- -
- -struct task_struct;
- -
- -extern long subarch_ptrace(struct task_struct *child, long request,
- - unsigned long addr, unsigned long data);
- -extern unsigned long getreg(struct task_struct *child, int regno);
- -extern int putreg(struct task_struct *child, int regno, unsigned long value);
- -
- -extern int arch_copy_tls(struct task_struct *new);
- -extern void clear_flushed_tls(struct task_struct *task);
- -extern void syscall_trace_enter(struct pt_regs *regs);
- -extern void syscall_trace_leave(struct pt_regs *regs);
- -
- -#endif
- -
- -#endif
- --- a/arch/um/include/asm/setup.h
- +++ /dev/null
- @@ -1,10 +0,0 @@
- -#ifndef SETUP_H_INCLUDED
- -#define SETUP_H_INCLUDED
- -
- -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
- - * command line, so this choice is ok.
- - */
- -
- -#define COMMAND_LINE_SIZE 4096
- -
- -#endif /* SETUP_H_INCLUDED */
- --- a/arch/um/include/asm/smp.h
- +++ /dev/null
- @@ -1,32 +0,0 @@
- -#ifndef __UM_SMP_H
- -#define __UM_SMP_H
- -
- -#ifdef CONFIG_SMP
- -
- -#include <linux/bitops.h>
- -#include <asm/current.h>
- -#include <linux/cpumask.h>
- -
- -#define raw_smp_processor_id() (current_thread->cpu)
- -
- -#define cpu_logical_map(n) (n)
- -#define cpu_number_map(n) (n)
- -extern int hard_smp_processor_id(void);
- -#define NO_PROC_ID -1
- -
- -extern int ncpus;
- -
- -
- -static inline void smp_cpus_done(unsigned int maxcpus)
- -{
- -}
- -
- -extern struct task_struct *idle_threads[NR_CPUS];
- -
- -#else
- -
- -#define hard_smp_processor_id() 0
- -
- -#endif
- -
- -#endif
- --- a/arch/um/include/asm/stacktrace.h
- +++ /dev/null
- @@ -1,42 +0,0 @@
- -#ifndef _ASM_UML_STACKTRACE_H
- -#define _ASM_UML_STACKTRACE_H
- -
- -#include <linux/uaccess.h>
- -#include <linux/ptrace.h>
- -
- -struct stack_frame {
- - struct stack_frame *next_frame;
- - unsigned long return_address;
- -};
- -
- -struct stacktrace_ops {
- - void (*address)(void *data, unsigned long address, int reliable);
- -};
- -
- -#ifdef CONFIG_FRAME_POINTER
- -static inline unsigned long
- -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
- -{
- - if (!task || task == current)
- - return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
- - return KSTK_EBP(task);
- -}
- -#else
- -static inline unsigned long
- -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
- -{
- - return 0;
- -}
- -#endif
- -
- -static inline unsigned long
- -*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
- -{
- - if (!task || task == current)
- - return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
- - return (unsigned long *)KSTK_ESP(task);
- -}
- -
- -void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
- -
- -#endif /* _ASM_UML_STACKTRACE_H */
- --- a/arch/um/include/asm/sysrq.h
- +++ /dev/null
- @@ -1,7 +0,0 @@
- -#ifndef __UM_SYSRQ_H
- -#define __UM_SYSRQ_H
- -
- -struct task_struct;
- -extern void show_trace(struct task_struct* task, unsigned long *stack);
- -
- -#endif
- --- a/arch/um/include/asm/thread_info.h
- +++ /dev/null
- @@ -1,78 +0,0 @@
- -/*
- - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_THREAD_INFO_H
- -#define __UM_THREAD_INFO_H
- -
- -#ifndef __ASSEMBLY__
- -
- -#include <asm/types.h>
- -#include <asm/page.h>
- -#include <asm/uaccess.h>
- -
- -struct thread_info {
- - struct task_struct *task; /* main task structure */
- - struct exec_domain *exec_domain; /* execution domain */
- - unsigned long flags; /* low level flags */
- - __u32 cpu; /* current CPU */
- - int preempt_count; /* 0 => preemptable,
- - <0 => BUG */
- - mm_segment_t addr_limit; /* thread address space:
- - 0-0xBFFFFFFF for user
- - 0-0xFFFFFFFF for kernel */
- - struct restart_block restart_block;
- - struct thread_info *real_thread; /* Points to non-IRQ stack */
- -};
- -
- -#define INIT_THREAD_INFO(tsk) \
- -{ \
- - .task = &tsk, \
- - .exec_domain = &default_exec_domain, \
- - .flags = 0, \
- - .cpu = 0, \
- - .preempt_count = INIT_PREEMPT_COUNT, \
- - .addr_limit = KERNEL_DS, \
- - .restart_block = { \
- - .fn = do_no_restart_syscall, \
- - }, \
- - .real_thread = NULL, \
- -}
- -
- -#define init_thread_info (init_thread_union.thread_info)
- -#define init_stack (init_thread_union.stack)
- -
- -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
- -/* how to get the thread information struct from C */
- -static inline struct thread_info *current_thread_info(void)
- -{
- - struct thread_info *ti;
- - unsigned long mask = THREAD_SIZE - 1;
- - void *p;
- -
- - asm volatile ("" : "=r" (p) : "0" (&ti));
- - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
- - return ti;
- -}
- -
- -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
- -
- -#endif
- -
- -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
- -#define TIF_SIGPENDING 1 /* signal pending */
- -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
- -#define TIF_RESTART_BLOCK 4
- -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
- -#define TIF_SYSCALL_AUDIT 6
- -#define TIF_RESTORE_SIGMASK 7
- -#define TIF_NOTIFY_RESUME 8
- -
- -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
- -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
- -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- -
- -#endif
- --- a/arch/um/include/asm/timex.h
- +++ /dev/null
- @@ -1,13 +0,0 @@
- -#ifndef __UM_TIMEX_H
- -#define __UM_TIMEX_H
- -
- -typedef unsigned long cycles_t;
- -
- -static inline cycles_t get_cycles (void)
- -{
- - return 0;
- -}
- -
- -#define CLOCK_TICK_RATE (HZ)
- -
- -#endif
- --- a/arch/um/include/asm/tlb.h
- +++ /dev/null
- @@ -1,134 +0,0 @@
- -#ifndef __UM_TLB_H
- -#define __UM_TLB_H
- -
- -#include <linux/pagemap.h>
- -#include <linux/swap.h>
- -#include <asm/percpu.h>
- -#include <asm/pgalloc.h>
- -#include <asm/tlbflush.h>
- -
- -#define tlb_start_vma(tlb, vma) do { } while (0)
- -#define tlb_end_vma(tlb, vma) do { } while (0)
- -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
- -
- -/* struct mmu_gather is an opaque type used by the mm code for passing around
- - * any data needed by arch specific code for tlb_remove_page.
- - */
- -struct mmu_gather {
- - struct mm_struct *mm;
- - unsigned int need_flush; /* Really unmapped some ptes? */
- - unsigned long start;
- - unsigned long end;
- - unsigned int fullmm; /* non-zero means full mm flush */
- -};
- -
- -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- - unsigned long address)
- -{
- - if (tlb->start > address)
- - tlb->start = address;
- - if (tlb->end < address + PAGE_SIZE)
- - tlb->end = address + PAGE_SIZE;
- -}
- -
- -static inline void init_tlb_gather(struct mmu_gather *tlb)
- -{
- - tlb->need_flush = 0;
- -
- - tlb->start = TASK_SIZE;
- - tlb->end = 0;
- -
- - if (tlb->fullmm) {
- - tlb->start = 0;
- - tlb->end = TASK_SIZE;
- - }
- -}
- -
- -static inline void
- -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- -{
- - tlb->mm = mm;
- - tlb->start = start;
- - tlb->end = end;
- - tlb->fullmm = !(start | (end+1));
- -
- - init_tlb_gather(tlb);
- -}
- -
- -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
- - unsigned long end);
- -
- -static inline void
- -tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
- -{
- - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
- -}
- -
- -static inline void
- -tlb_flush_mmu_free(struct mmu_gather *tlb)
- -{
- - init_tlb_gather(tlb);
- -}
- -
- -static inline void
- -tlb_flush_mmu(struct mmu_gather *tlb)
- -{
- - if (!tlb->need_flush)
- - return;
- -
- - tlb_flush_mmu_tlbonly(tlb);
- - tlb_flush_mmu_free(tlb);
- -}
- -
- -/* tlb_finish_mmu
- - * Called at the end of the shootdown operation to free up any resources
- - * that were required.
- - */
- -static inline void
- -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
- -{
- - tlb_flush_mmu(tlb);
- -
- - /* keep the page table cache within bounds */
- - check_pgt_cache();
- -}
- -
- -/* tlb_remove_page
- - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
- - * while handling the additional races in SMP caused by other CPUs
- - * caching valid mappings in their TLBs.
- - */
- -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- -{
- - tlb->need_flush = 1;
- - free_page_and_swap_cache(page);
- - return 1; /* avoid calling tlb_flush_mmu */
- -}
- -
- -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- -{
- - __tlb_remove_page(tlb, page);
- -}
- -
- -/**
- - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
- - *
- - * Record the fact that pte's were really umapped in ->need_flush, so we can
- - * later optimise away the tlb invalidate. This helps when userspace is
- - * unmapping already-unmapped pages, which happens quite a lot.
- - */
- -#define tlb_remove_tlb_entry(tlb, ptep, address) \
- - do { \
- - tlb->need_flush = 1; \
- - __tlb_remove_tlb_entry(tlb, ptep, address); \
- - } while (0)
- -
- -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
- -
- -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
- -
- -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
- -
- -#define tlb_migrate_finish(mm) do {} while (0)
- -
- -#endif
- --- a/arch/um/include/asm/tlbflush.h
- +++ /dev/null
- @@ -1,31 +0,0 @@
- -/*
- - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_TLBFLUSH_H
- -#define __UM_TLBFLUSH_H
- -
- -#include <linux/mm.h>
- -
- -/*
- - * TLB flushing:
- - *
- - * - flush_tlb() flushes the current mm struct TLBs
- - * - flush_tlb_all() flushes all processes TLBs
- - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- - * - flush_tlb_page(vma, vmaddr) flushes one page
- - * - flush_tlb_kernel_vm() flushes the kernel vm area
- - * - flush_tlb_range(vma, start, end) flushes a range of pages
- - */
- -
- -extern void flush_tlb_all(void);
- -extern void flush_tlb_mm(struct mm_struct *mm);
- -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- - unsigned long end);
- -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
- -extern void flush_tlb_kernel_vm(void);
- -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
- -extern void __flush_tlb_one(unsigned long addr);
- -
- -#endif
- --- a/arch/um/include/asm/uaccess.h
- +++ /dev/null
- @@ -1,178 +0,0 @@
- -/*
- - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- - * Licensed under the GPL
- - */
- -
- -#ifndef __UM_UACCESS_H
- -#define __UM_UACCESS_H
- -
- -/* thread_info has a mm_segment_t in it, so put the definition up here */
- -typedef struct {
- - unsigned long seg;
- -} mm_segment_t;
- -
- -#include <linux/thread_info.h>
- -#include <linux/errno.h>
- -#include <asm/processor.h>
- -#include <asm/elf.h>
- -
- -#define VERIFY_READ 0
- -#define VERIFY_WRITE 1
- -
- -/*
- - * The fs value determines whether argument validity checking should be
- - * performed or not. If get_fs() == USER_DS, checking is performed, with
- - * get_fs() == KERNEL_DS, checking is bypassed.
- - *
- - * For historical reasons, these macros are grossly misnamed.
- - */
- -
- -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
- -
- -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
- -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
- -
- -#define get_ds() (KERNEL_DS)
- -#define get_fs() (current_thread_info()->addr_limit)
- -#define set_fs(x) (current_thread_info()->addr_limit = (x))
- -
- -#define segment_eq(a, b) ((a).seg == (b).seg)
- -
- -#define __under_task_size(addr, size) \
- - (((unsigned long) (addr) < TASK_SIZE) && \
- - (((unsigned long) (addr) + (size)) < TASK_SIZE))
- -
- -#define __access_ok_vsyscall(type, addr, size) \
- - ((type == VERIFY_READ) && \
- - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
- - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
- - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
- -
- -#define __addr_range_nowrap(addr, size) \
- - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
- -
- -#define access_ok(type, addr, size) \
- - (__addr_range_nowrap(addr, size) && \
- - (__under_task_size(addr, size) || \
- - __access_ok_vsyscall(type, addr, size) || \
- - segment_eq(get_fs(), KERNEL_DS)))
- -
- -extern int copy_from_user(void *to, const void __user *from, int n);
- -extern int copy_to_user(void __user *to, const void *from, int n);
- -
- -/*
- - * strncpy_from_user: - Copy a NUL terminated string from userspace.
- - * @dst: Destination address, in kernel space. This buffer must be at
- - * least @count bytes long.
- - * @src: Source address, in user space.
- - * @count: Maximum number of bytes to copy, including the trailing NUL.
- - *
- - * Copies a NUL-terminated string from userspace to kernel space.
- - *
- - * On success, returns the length of the string (not including the trailing
- - * NUL).
- - *
- - * If access to userspace fails, returns -EFAULT (some data may have been
- - * copied).
- - *
- - * If @count is smaller than the length of the string, copies @count bytes
- - * and returns @count.
- - */
- -
- -extern int strncpy_from_user(char *dst, const char __user *src, int count);
- -
- -/*
- - * __clear_user: - Zero a block of memory in user space, with less checking.
- - * @to: Destination address, in user space.
- - * @n: Number of bytes to zero.
- - *
- - * Zero a block of memory in user space. Caller must check
- - * the specified block with access_ok() before calling this function.
- - *
- - * Returns number of bytes that could not be cleared.
- - * On success, this will be zero.
- - */
- -extern int __clear_user(void __user *mem, int len);
- -
- -/*
- - * clear_user: - Zero a block of memory in user space.
- - * @to: Destination address, in user space.
- - * @n: Number of bytes to zero.
- - *
- - * Zero a block of memory in user space.
- - *
- - * Returns number of bytes that could not be cleared.
- - * On success, this will be zero.
- - */
- -extern int clear_user(void __user *mem, int len);
- -
- -/*
- - * strlen_user: - Get the size of a string in user space.
- - * @str: The string to measure.
- - * @n: The maximum valid length
- - *
- - * Get the size of a NUL-terminated string in user space.
- - *
- - * Returns the size of the string INCLUDING the terminating NUL.
- - * On exception, returns 0.
- - * If the string is too long, returns a value greater than @n.
- - */
- -extern int strnlen_user(const void __user *str, int len);
- -
- -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
- -
- -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
- -
- -#define __copy_to_user_inatomic __copy_to_user
- -#define __copy_from_user_inatomic __copy_from_user
- -
- -#define __get_user(x, ptr) \
- -({ \
- - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
- - __typeof__(x) __private_val; \
- - int __private_ret = -EFAULT; \
- - (x) = (__typeof__(*(__private_ptr)))0; \
- - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
- - sizeof(*(__private_ptr))) == 0) { \
- - (x) = (__typeof__(*(__private_ptr))) __private_val; \
- - __private_ret = 0; \
- - } \
- - __private_ret; \
- -})
- -
- -#define get_user(x, ptr) \
- -({ \
- - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
- - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
- - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
- -})
- -
- -#define __put_user(x, ptr) \
- -({ \
- - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
- - __typeof__(*(__private_ptr)) __private_val; \
- - int __private_ret = -EFAULT; \
- - __private_val = (__typeof__(*(__private_ptr))) (x); \
- - if (__copy_to_user((__private_ptr), &__private_val, \
- - sizeof(*(__private_ptr))) == 0) { \
- - __private_ret = 0; \
- - } \
- - __private_ret; \
- -})
- -
- -#define put_user(x, ptr) \
- -({ \
- - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
- - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
- - __put_user(x, private_ptr) : -EFAULT); \
- -})
- -
- -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
- -
- -struct exception_table_entry
- -{
- - unsigned long insn;
- - unsigned long fixup;
- -};
- -
- -#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/Kbuild
- @@ -0,0 +1,30 @@
- +generic-y += barrier.h
- +generic-y += bug.h
- +generic-y += clkdev.h
- +generic-y += cputime.h
- +generic-y += current.h
- +generic-y += delay.h
- +generic-y += device.h
- +generic-y += emergency-restart.h
- +generic-y += exec.h
- +generic-y += ftrace.h
- +generic-y += futex.h
- +generic-y += hardirq.h
- +generic-y += hash.h
- +generic-y += hw_irq.h
- +generic-y += io.h
- +generic-y += irq_regs.h
- +generic-y += irq_work.h
- +generic-y += kdebug.h
- +generic-y += mcs_spinlock.h
- +generic-y += mutex.h
- +generic-y += param.h
- +generic-y += pci.h
- +generic-y += percpu.h
- +generic-y += preempt.h
- +generic-y += scatterlist.h
- +generic-y += sections.h
- +generic-y += switch_to.h
- +generic-y += topology.h
- +generic-y += trace_clock.h
- +generic-y += xor.h
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/a.out-core.h
- @@ -0,0 +1,27 @@
- +/* a.out coredump register dumper
- + *
- + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- + * Written by David Howells (dhowells@redhat.com)
- + *
- + * This program is free software; you can redistribute it and/or
- + * modify it under the terms of the GNU General Public Licence
- + * as published by the Free Software Foundation; either version
- + * 2 of the Licence, or (at your option) any later version.
- + */
- +
- +#ifndef __UM_A_OUT_CORE_H
- +#define __UM_A_OUT_CORE_H
- +
- +#ifdef __KERNEL__
- +
- +#include <linux/user.h>
- +
- +/*
- + * fill in the user structure for an a.out core dump
- + */
- +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
- +{
- +}
- +
- +#endif /* __KERNEL__ */
- +#endif /* __UM_A_OUT_CORE_H */
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/bugs.h
- @@ -0,0 +1,6 @@
- +#ifndef __UM_BUGS_H
- +#define __UM_BUGS_H
- +
- +void check_bugs(void);
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/cache.h
- @@ -0,0 +1,17 @@
- +#ifndef __UM_CACHE_H
- +#define __UM_CACHE_H
- +
- +
- +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
- +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
- +#elif defined(CONFIG_UML_X86) /* 64-bit */
- +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
- +#else
- +/* XXX: this was taken from x86, now it's completely random. Luckily only
- + * affects SMP padding. */
- +# define L1_CACHE_SHIFT 5
- +#endif
- +
- +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/common.lds.S
- @@ -0,0 +1,107 @@
- +#include <asm-generic/vmlinux.lds.h>
- +
- + .fini : { *(.fini) } =0x9090
- + _etext = .;
- + PROVIDE (etext = .);
- +
- + . = ALIGN(4096);
- + _sdata = .;
- + PROVIDE (sdata = .);
- +
- + RODATA
- +
- + .unprotected : { *(.unprotected) }
- + . = ALIGN(4096);
- + PROVIDE (_unprotected_end = .);
- +
- + . = ALIGN(4096);
- + .note : { *(.note.*) }
- + EXCEPTION_TABLE(0)
- +
- + BUG_TABLE
- +
- + .uml.setup.init : {
- + __uml_setup_start = .;
- + *(.uml.setup.init)
- + __uml_setup_end = .;
- + }
- +
- + .uml.help.init : {
- + __uml_help_start = .;
- + *(.uml.help.init)
- + __uml_help_end = .;
- + }
- +
- + .uml.postsetup.init : {
- + __uml_postsetup_start = .;
- + *(.uml.postsetup.init)
- + __uml_postsetup_end = .;
- + }
- +
- + .init.setup : {
- + INIT_SETUP(0)
- + }
- +
- + PERCPU_SECTION(32)
- +
- + .initcall.init : {
- + INIT_CALLS
- + }
- +
- + .con_initcall.init : {
- + CON_INITCALL
- + }
- +
- + .uml.initcall.init : {
- + __uml_initcall_start = .;
- + *(.uml.initcall.init)
- + __uml_initcall_end = .;
- + }
- +
- + SECURITY_INIT
- +
- + .exitcall : {
- + __exitcall_begin = .;
- + *(.exitcall.exit)
- + __exitcall_end = .;
- + }
- +
- + .uml.exitcall : {
- + __uml_exitcall_begin = .;
- + *(.uml.exitcall.exit)
- + __uml_exitcall_end = .;
- + }
- +
- + . = ALIGN(4);
- + .altinstructions : {
- + __alt_instructions = .;
- + *(.altinstructions)
- + __alt_instructions_end = .;
- + }
- + .altinstr_replacement : { *(.altinstr_replacement) }
- + /* .exit.text is discard at runtime, not link time, to deal with references
- + from .altinstructions and .eh_frame */
- + .exit.text : { *(.exit.text) }
- + .exit.data : { *(.exit.data) }
- +
- + .preinit_array : {
- + __preinit_array_start = .;
- + *(.preinit_array)
- + __preinit_array_end = .;
- + }
- + .init_array : {
- + __init_array_start = .;
- + *(.init_array)
- + __init_array_end = .;
- + }
- + .fini_array : {
- + __fini_array_start = .;
- + *(.fini_array)
- + __fini_array_end = .;
- + }
- +
- + . = ALIGN(4096);
- + .init.ramfs : {
- + INIT_RAM_FS
- + }
- +
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/dma.h
- @@ -0,0 +1,10 @@
- +#ifndef __UM_DMA_H
- +#define __UM_DMA_H
- +
- +#include <asm/io.h>
- +
- +extern unsigned long uml_physmem;
- +
- +#define MAX_DMA_ADDRESS (uml_physmem)
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/fixmap.h
- @@ -0,0 +1,60 @@
- +#ifndef __UM_FIXMAP_H
- +#define __UM_FIXMAP_H
- +
- +#include <asm/processor.h>
- +#include <asm/kmap_types.h>
- +#include <asm/archparam.h>
- +#include <asm/page.h>
- +#include <linux/threads.h>
- +
- +/*
- + * Here we define all the compile-time 'special' virtual
- + * addresses. The point is to have a constant address at
- + * compile time, but to set the physical address only
- + * in the boot process. We allocate these special addresses
- + * from the end of virtual memory (0xfffff000) backwards.
- + * Also this lets us do fail-safe vmalloc(), we
- + * can guarantee that these special addresses and
- + * vmalloc()-ed addresses never overlap.
- + *
- + * these 'compile-time allocated' memory buffers are
- + * fixed-size 4k pages. (or larger if used with an increment
- + * highger than 1) use fixmap_set(idx,phys) to associate
- + * physical memory with fixmap indices.
- + *
- + * TLB entries of such buffers will not be flushed across
- + * task switches.
- + */
- +
- +/*
- + * on UP currently we will have no trace of the fixmap mechanizm,
- + * no page table allocations, etc. This might change in the
- + * future, say framebuffers for the console driver(s) could be
- + * fix-mapped?
- + */
- +enum fixed_addresses {
- +#ifdef CONFIG_HIGHMEM
- + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
- +#endif
- + __end_of_fixed_addresses
- +};
- +
- +extern void __set_fixmap (enum fixed_addresses idx,
- + unsigned long phys, pgprot_t flags);
- +
- +/*
- + * used by vmalloc.c.
- + *
- + * Leave one empty page between vmalloc'ed areas and
- + * the start of the fixmap, and leave one page empty
- + * at the top of mem..
- + */
- +
- +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
- +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
- +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
- +
- +#include <asm-generic/fixmap.h>
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/irq.h
- @@ -0,0 +1,23 @@
- +#ifndef __UM_IRQ_H
- +#define __UM_IRQ_H
- +
- +#define TIMER_IRQ 0
- +#define UMN_IRQ 1
- +#define CONSOLE_IRQ 2
- +#define CONSOLE_WRITE_IRQ 3
- +#define UBD_IRQ 4
- +#define UM_ETH_IRQ 5
- +#define SSL_IRQ 6
- +#define SSL_WRITE_IRQ 7
- +#define ACCEPT_IRQ 8
- +#define MCONSOLE_IRQ 9
- +#define WINCH_IRQ 10
- +#define SIGIO_WRITE_IRQ 11
- +#define TELNETD_IRQ 12
- +#define XTERM_IRQ 13
- +#define RANDOM_IRQ 14
- +
- +#define LAST_IRQ RANDOM_IRQ
- +#define NR_IRQS (LAST_IRQ + 1)
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/irqflags.h
- @@ -0,0 +1,42 @@
- +#ifndef __UM_IRQFLAGS_H
- +#define __UM_IRQFLAGS_H
- +
- +extern int get_signals(void);
- +extern int set_signals(int enable);
- +extern void block_signals(void);
- +extern void unblock_signals(void);
- +
- +static inline unsigned long arch_local_save_flags(void)
- +{
- + return get_signals();
- +}
- +
- +static inline void arch_local_irq_restore(unsigned long flags)
- +{
- + set_signals(flags);
- +}
- +
- +static inline void arch_local_irq_enable(void)
- +{
- + unblock_signals();
- +}
- +
- +static inline void arch_local_irq_disable(void)
- +{
- + block_signals();
- +}
- +
- +static inline unsigned long arch_local_irq_save(void)
- +{
- + unsigned long flags;
- + flags = arch_local_save_flags();
- + arch_local_irq_disable();
- + return flags;
- +}
- +
- +static inline bool arch_irqs_disabled(void)
- +{
- + return arch_local_save_flags() == 0;
- +}
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/kmap_types.h
- @@ -0,0 +1,13 @@
- +/*
- + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_KMAP_TYPES_H
- +#define __UM_KMAP_TYPES_H
- +
- +/* No more #include "asm/arch/kmap_types.h" ! */
- +
- +#define KM_TYPE_NR 14
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/kvm_para.h
- @@ -0,0 +1 @@
- +#include <asm-generic/kvm_para.h>
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/mmu.h
- @@ -0,0 +1,24 @@
- +/*
- + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __ARCH_UM_MMU_H
- +#define __ARCH_UM_MMU_H
- +
- +#include <mm_id.h>
- +#include <asm/mm_context.h>
- +
- +typedef struct mm_context {
- + struct mm_id id;
- + struct uml_arch_mm_context arch;
- + struct page *stub_pages[2];
- +} mm_context_t;
- +
- +extern void __switch_mm(struct mm_id * mm_idp);
- +
- +/* Avoid tangled inclusion with asm/ldt.h */
- +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
- +extern void free_ldt(struct mm_context *mm);
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/mmu_context.h
- @@ -0,0 +1,58 @@
- +/*
- + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_MMU_CONTEXT_H
- +#define __UM_MMU_CONTEXT_H
- +
- +#include <linux/sched.h>
- +#include <asm/mmu.h>
- +
- +extern void uml_setup_stubs(struct mm_struct *mm);
- +extern void arch_exit_mmap(struct mm_struct *mm);
- +
- +#define deactivate_mm(tsk,mm) do { } while (0)
- +
- +extern void force_flush_all(void);
- +
- +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
- +{
- + /*
- + * This is called by fs/exec.c and sys_unshare()
- + * when the new ->mm is used for the first time.
- + */
- + __switch_mm(&new->context.id);
- + down_write(&new->mmap_sem);
- + uml_setup_stubs(new);
- + up_write(&new->mmap_sem);
- +}
- +
- +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- + struct task_struct *tsk)
- +{
- + unsigned cpu = smp_processor_id();
- +
- + if(prev != next){
- + cpumask_clear_cpu(cpu, mm_cpumask(prev));
- + cpumask_set_cpu(cpu, mm_cpumask(next));
- + if(next != &init_mm)
- + __switch_mm(&next->context.id);
- + }
- +}
- +
- +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
- +{
- + uml_setup_stubs(mm);
- +}
- +
- +static inline void enter_lazy_tlb(struct mm_struct *mm,
- + struct task_struct *tsk)
- +{
- +}
- +
- +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
- +
- +extern void destroy_context(struct mm_struct *mm);
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/page.h
- @@ -0,0 +1,127 @@
- +/*
- + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
- + * Copyright 2003 PathScale, Inc.
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_PAGE_H
- +#define __UM_PAGE_H
- +
- +#include <linux/const.h>
- +
- +/* PAGE_SHIFT determines the page size */
- +#define PAGE_SHIFT 12
- +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
- +#define PAGE_MASK (~(PAGE_SIZE-1))
- +
- +#ifndef __ASSEMBLY__
- +
- +struct page;
- +
- +#include <linux/types.h>
- +#include <asm/vm-flags.h>
- +
- +/*
- + * These are used to make use of C type-checking..
- + */
- +
- +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
- +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
- +
- +#define clear_user_page(page, vaddr, pg) clear_page(page)
- +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
- +
- +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
- +
- +typedef struct { unsigned long pte_low, pte_high; } pte_t;
- +typedef struct { unsigned long pmd; } pmd_t;
- +typedef struct { unsigned long pgd; } pgd_t;
- +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
- +
- +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
- +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
- +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
- +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
- + smp_wmb(); \
- + (to).pte_low = (from).pte_low; })
- +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
- +#define pte_set_val(pte, phys, prot) \
- + ({ (pte).pte_high = (phys) >> 32; \
- + (pte).pte_low = (phys) | pgprot_val(prot); })
- +
- +#define pmd_val(x) ((x).pmd)
- +#define __pmd(x) ((pmd_t) { (x) } )
- +
- +typedef unsigned long long pfn_t;
- +typedef unsigned long long phys_t;
- +
- +#else
- +
- +typedef struct { unsigned long pte; } pte_t;
- +typedef struct { unsigned long pgd; } pgd_t;
- +
- +#ifdef CONFIG_3_LEVEL_PGTABLES
- +typedef struct { unsigned long pmd; } pmd_t;
- +#define pmd_val(x) ((x).pmd)
- +#define __pmd(x) ((pmd_t) { (x) } )
- +#endif
- +
- +#define pte_val(x) ((x).pte)
- +
- +
- +#define pte_get_bits(p, bits) ((p).pte & (bits))
- +#define pte_set_bits(p, bits) ((p).pte |= (bits))
- +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
- +#define pte_copy(to, from) ((to).pte = (from).pte)
- +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
- +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
- +
- +typedef unsigned long pfn_t;
- +typedef unsigned long phys_t;
- +
- +#endif
- +
- +typedef struct { unsigned long pgprot; } pgprot_t;
- +
- +typedef struct page *pgtable_t;
- +
- +#define pgd_val(x) ((x).pgd)
- +#define pgprot_val(x) ((x).pgprot)
- +
- +#define __pte(x) ((pte_t) { (x) } )
- +#define __pgd(x) ((pgd_t) { (x) } )
- +#define __pgprot(x) ((pgprot_t) { (x) } )
- +
- +extern unsigned long uml_physmem;
- +
- +#define PAGE_OFFSET (uml_physmem)
- +#define KERNELBASE PAGE_OFFSET
- +
- +#define __va_space (8*1024*1024)
- +
- +#include <mem.h>
- +
- +/* Cast to unsigned long before casting to void * to avoid a warning from
- + * mmap_kmem about cutting a long long down to a void *. Not sure that
- + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
- + * addresses
- + */
- +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
- +#define __va(phys) to_virt((unsigned long) (phys))
- +
- +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
- +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
- +
- +#define pfn_valid(pfn) ((pfn) < max_mapnr)
- +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
- +
- +#include <asm-generic/memory_model.h>
- +#include <asm-generic/getorder.h>
- +
- +#endif /* __ASSEMBLY__ */
- +
- +#ifdef CONFIG_X86_32
- +#define __HAVE_ARCH_GATE_AREA 1
- +#endif
- +
- +#endif /* __UM_PAGE_H */
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/pgalloc.h
- @@ -0,0 +1,61 @@
- +/*
- + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- + * Copyright 2003 PathScale, Inc.
- + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_PGALLOC_H
- +#define __UM_PGALLOC_H
- +
- +#include <linux/mm.h>
- +
- +#define pmd_populate_kernel(mm, pmd, pte) \
- + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
- +
- +#define pmd_populate(mm, pmd, pte) \
- + set_pmd(pmd, __pmd(_PAGE_TABLE + \
- + ((unsigned long long)page_to_pfn(pte) << \
- + (unsigned long long) PAGE_SHIFT)))
- +#define pmd_pgtable(pmd) pmd_page(pmd)
- +
- +/*
- + * Allocate and free page tables.
- + */
- +extern pgd_t *pgd_alloc(struct mm_struct *);
- +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
- +
- +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
- +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
- +
- +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
- +{
- + free_page((unsigned long) pte);
- +}
- +
- +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
- +{
- + pgtable_page_dtor(pte);
- + __free_page(pte);
- +}
- +
- +#define __pte_free_tlb(tlb,pte, address) \
- +do { \
- + pgtable_page_dtor(pte); \
- + tlb_remove_page((tlb),(pte)); \
- +} while (0)
- +
- +#ifdef CONFIG_3_LEVEL_PGTABLES
- +
- +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
- +{
- + free_page((unsigned long)pmd);
- +}
- +
- +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
- +#endif
- +
- +#define check_pgt_cache() do { } while (0)
- +
- +#endif
- +
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/pgtable-2level.h
- @@ -0,0 +1,53 @@
- +/*
- + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- + * Copyright 2003 PathScale, Inc.
- + * Derived from include/asm-i386/pgtable.h
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_PGTABLE_2LEVEL_H
- +#define __UM_PGTABLE_2LEVEL_H
- +
- +#include <asm-generic/pgtable-nopmd.h>
- +
- +/* PGDIR_SHIFT determines what a third-level page table entry can map */
- +
- +#define PGDIR_SHIFT 22
- +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
- +#define PGDIR_MASK (~(PGDIR_SIZE-1))
- +
- +/*
- + * entries per page directory level: the i386 is two-level, so
- + * we don't really have any PMD directory physically.
- + */
- +#define PTRS_PER_PTE 1024
- +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
- +#define PTRS_PER_PGD 1024
- +#define FIRST_USER_ADDRESS 0
- +
- +#define pte_ERROR(e) \
- + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
- + pte_val(e))
- +#define pgd_ERROR(e) \
- + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
- + pgd_val(e))
- +
- +static inline int pgd_newpage(pgd_t pgd) { return 0; }
- +static inline void pgd_mkuptodate(pgd_t pgd) { }
- +
- +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
- +
- +#define pte_pfn(x) phys_to_pfn(pte_val(x))
- +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
- +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
- +
- +/*
- + * Bits 0 through 4 are taken
- + */
- +#define PTE_FILE_MAX_BITS 27
- +
- +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
- +
- +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/pgtable-3level.h
- @@ -0,0 +1,136 @@
- +/*
- + * Copyright 2003 PathScale Inc
- + * Derived from include/asm-i386/pgtable.h
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_PGTABLE_3LEVEL_H
- +#define __UM_PGTABLE_3LEVEL_H
- +
- +#include <asm-generic/pgtable-nopud.h>
- +
- +/* PGDIR_SHIFT determines what a third-level page table entry can map */
- +
- +#ifdef CONFIG_64BIT
- +#define PGDIR_SHIFT 30
- +#else
- +#define PGDIR_SHIFT 31
- +#endif
- +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
- +#define PGDIR_MASK (~(PGDIR_SIZE-1))
- +
- +/* PMD_SHIFT determines the size of the area a second-level page table can
- + * map
- + */
- +
- +#define PMD_SHIFT 21
- +#define PMD_SIZE (1UL << PMD_SHIFT)
- +#define PMD_MASK (~(PMD_SIZE-1))
- +
- +/*
- + * entries per page directory level
- + */
- +
- +#define PTRS_PER_PTE 512
- +#ifdef CONFIG_64BIT
- +#define PTRS_PER_PMD 512
- +#define PTRS_PER_PGD 512
- +#else
- +#define PTRS_PER_PMD 1024
- +#define PTRS_PER_PGD 1024
- +#endif
- +
- +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
- +#define FIRST_USER_ADDRESS 0
- +
- +#define pte_ERROR(e) \
- + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
- + pte_val(e))
- +#define pmd_ERROR(e) \
- + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
- + pmd_val(e))
- +#define pgd_ERROR(e) \
- + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
- + pgd_val(e))
- +
- +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
- +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
- +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
- +#define pud_populate(mm, pud, pmd) \
- + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
- +
- +#ifdef CONFIG_64BIT
- +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
- +#else
- +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
- +#endif
- +
- +static inline int pgd_newpage(pgd_t pgd)
- +{
- + return(pgd_val(pgd) & _PAGE_NEWPAGE);
- +}
- +
- +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
- +
- +#ifdef CONFIG_64BIT
- +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
- +#else
- +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
- +#endif
- +
- +struct mm_struct;
- +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
- +
- +static inline void pud_clear (pud_t *pud)
- +{
- + set_pud(pud, __pud(_PAGE_NEWPAGE));
- +}
- +
- +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
- +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
- +
- +/* Find an entry in the second-level page table.. */
- +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
- + pmd_index(address))
- +
- +static inline unsigned long pte_pfn(pte_t pte)
- +{
- + return phys_to_pfn(pte_val(pte));
- +}
- +
- +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
- +{
- + pte_t pte;
- + phys_t phys = pfn_to_phys(page_nr);
- +
- + pte_set_val(pte, phys, pgprot);
- + return pte;
- +}
- +
- +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
- +{
- + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
- +}
- +
- +/*
- + * Bits 0 through 3 are taken in the low part of the pte,
- + * put the 32 bits of offset into the high part.
- + */
- +#define PTE_FILE_MAX_BITS 32
- +
- +#ifdef CONFIG_64BIT
- +
- +#define pte_to_pgoff(p) ((p).pte >> 32)
- +
- +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
- +
- +#else
- +
- +#define pte_to_pgoff(pte) ((pte).pte_high)
- +
- +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
- +
- +#endif
- +
- +#endif
- +
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/pgtable.h
- @@ -0,0 +1,375 @@
- +/*
- + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- + * Copyright 2003 PathScale, Inc.
- + * Derived from include/asm-i386/pgtable.h
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_PGTABLE_H
- +#define __UM_PGTABLE_H
- +
- +#include <asm/fixmap.h>
- +
- +#define _PAGE_PRESENT 0x001
- +#define _PAGE_NEWPAGE 0x002
- +#define _PAGE_NEWPROT 0x004
- +#define _PAGE_RW 0x020
- +#define _PAGE_USER 0x040
- +#define _PAGE_ACCESSED 0x080
- +#define _PAGE_DIRTY 0x100
- +/* If _PAGE_PRESENT is clear, we use these: */
- +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
- +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
- + pte_present gives true */
- +
- +#ifdef CONFIG_3_LEVEL_PGTABLES
- +#include <asm/pgtable-3level.h>
- +#else
- +#include <asm/pgtable-2level.h>
- +#endif
- +
- +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
- +
- +/* zero page used for uninitialized stuff */
- +extern unsigned long *empty_zero_page;
- +
- +#define pgtable_cache_init() do ; while (0)
- +
- +/* Just any arbitrary offset to the start of the vmalloc VM area: the
- + * current 8MB value just means that there will be a 8MB "hole" after the
- + * physical memory until the kernel virtual memory starts. That means that
- + * any out-of-bounds memory accesses will hopefully be caught.
- + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- + * area for the same reason. ;)
- + */
- +
- +extern unsigned long end_iomem;
- +
- +#define VMALLOC_OFFSET (__va_space)
- +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
- +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
- +#ifdef CONFIG_HIGHMEM
- +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
- +#else
- +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
- +#endif
- +#define MODULES_VADDR VMALLOC_START
- +#define MODULES_END VMALLOC_END
- +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
- +
- +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
- +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
- +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
- +#define __PAGE_KERNEL_EXEC \
- + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
- +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
- +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
- +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
- +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
- +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
- +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
- +
- +/*
- + * The i386 can't do page protection for execute, and considers that the same
- + * are read.
- + * Also, write permissions imply read permissions. This is the closest we can
- + * get..
- + */
- +#define __P000 PAGE_NONE
- +#define __P001 PAGE_READONLY
- +#define __P010 PAGE_COPY
- +#define __P011 PAGE_COPY
- +#define __P100 PAGE_READONLY
- +#define __P101 PAGE_READONLY
- +#define __P110 PAGE_COPY
- +#define __P111 PAGE_COPY
- +
- +#define __S000 PAGE_NONE
- +#define __S001 PAGE_READONLY
- +#define __S010 PAGE_SHARED
- +#define __S011 PAGE_SHARED
- +#define __S100 PAGE_READONLY
- +#define __S101 PAGE_READONLY
- +#define __S110 PAGE_SHARED
- +#define __S111 PAGE_SHARED
- +
- +/*
- + * ZERO_PAGE is a global shared page that is always zero: used
- + * for zero-mapped memory areas etc..
- + */
- +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
- +
- +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
- +
- +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
- +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
- +
- +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
- +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
- +
- +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
- +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
- +
- +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
- +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
- +
- +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
- +
- +#define pte_page(x) pfn_to_page(pte_pfn(x))
- +
- +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
- +
- +/*
- + * =================================
- + * Flags checking section.
- + * =================================
- + */
- +
- +static inline int pte_none(pte_t pte)
- +{
- + return pte_is_zero(pte);
- +}
- +
- +/*
- + * The following only work if pte_present() is true.
- + * Undefined behaviour if not..
- + */
- +static inline int pte_read(pte_t pte)
- +{
- + return((pte_get_bits(pte, _PAGE_USER)) &&
- + !(pte_get_bits(pte, _PAGE_PROTNONE)));
- +}
- +
- +static inline int pte_exec(pte_t pte){
- + return((pte_get_bits(pte, _PAGE_USER)) &&
- + !(pte_get_bits(pte, _PAGE_PROTNONE)));
- +}
- +
- +static inline int pte_write(pte_t pte)
- +{
- + return((pte_get_bits(pte, _PAGE_RW)) &&
- + !(pte_get_bits(pte, _PAGE_PROTNONE)));
- +}
- +
- +/*
- + * The following only works if pte_present() is not true.
- + */
- +static inline int pte_file(pte_t pte)
- +{
- + return pte_get_bits(pte, _PAGE_FILE);
- +}
- +
- +static inline int pte_dirty(pte_t pte)
- +{
- + return pte_get_bits(pte, _PAGE_DIRTY);
- +}
- +
- +static inline int pte_young(pte_t pte)
- +{
- + return pte_get_bits(pte, _PAGE_ACCESSED);
- +}
- +
- +static inline int pte_newpage(pte_t pte)
- +{
- + return pte_get_bits(pte, _PAGE_NEWPAGE);
- +}
- +
- +static inline int pte_newprot(pte_t pte)
- +{
- + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
- +}
- +
- +static inline int pte_special(pte_t pte)
- +{
- + return 0;
- +}
- +
- +/*
- + * =================================
- + * Flags setting section.
- + * =================================
- + */
- +
- +static inline pte_t pte_mknewprot(pte_t pte)
- +{
- + pte_set_bits(pte, _PAGE_NEWPROT);
- + return(pte);
- +}
- +
- +static inline pte_t pte_mkclean(pte_t pte)
- +{
- + pte_clear_bits(pte, _PAGE_DIRTY);
- + return(pte);
- +}
- +
- +static inline pte_t pte_mkold(pte_t pte)
- +{
- + pte_clear_bits(pte, _PAGE_ACCESSED);
- + return(pte);
- +}
- +
- +static inline pte_t pte_wrprotect(pte_t pte)
- +{
- + pte_clear_bits(pte, _PAGE_RW);
- + return(pte_mknewprot(pte));
- +}
- +
- +static inline pte_t pte_mkread(pte_t pte)
- +{
- + pte_set_bits(pte, _PAGE_USER);
- + return(pte_mknewprot(pte));
- +}
- +
- +static inline pte_t pte_mkdirty(pte_t pte)
- +{
- + pte_set_bits(pte, _PAGE_DIRTY);
- + return(pte);
- +}
- +
- +static inline pte_t pte_mkyoung(pte_t pte)
- +{
- + pte_set_bits(pte, _PAGE_ACCESSED);
- + return(pte);
- +}
- +
- +static inline pte_t pte_mkwrite(pte_t pte)
- +{
- + pte_set_bits(pte, _PAGE_RW);
- + return(pte_mknewprot(pte));
- +}
- +
- +static inline pte_t pte_mkuptodate(pte_t pte)
- +{
- + pte_clear_bits(pte, _PAGE_NEWPAGE);
- + if(pte_present(pte))
- + pte_clear_bits(pte, _PAGE_NEWPROT);
- + return(pte);
- +}
- +
- +static inline pte_t pte_mknewpage(pte_t pte)
- +{
- + pte_set_bits(pte, _PAGE_NEWPAGE);
- + return(pte);
- +}
- +
- +static inline pte_t pte_mkspecial(pte_t pte)
- +{
- + return(pte);
- +}
- +
- +static inline void set_pte(pte_t *pteptr, pte_t pteval)
- +{
- + pte_copy(*pteptr, pteval);
- +
- + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
- + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
- + * mapped pages.
- + */
- +
- + *pteptr = pte_mknewpage(*pteptr);
- + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
- +}
- +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
- +
- +#define __HAVE_ARCH_PTE_SAME
- +static inline int pte_same(pte_t pte_a, pte_t pte_b)
- +{
- + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
- +}
- +
- +/*
- + * Conversion functions: convert a page and protection to a page entry,
- + * and a page entry and page directory to the page they refer to.
- + */
- +
- +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
- +#define __virt_to_page(virt) phys_to_page(__pa(virt))
- +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
- +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
- +
- +#define mk_pte(page, pgprot) \
- + ({ pte_t pte; \
- + \
- + pte_set_val(pte, page_to_phys(page), (pgprot)); \
- + if (pte_present(pte)) \
- + pte_mknewprot(pte_mknewpage(pte)); \
- + pte;})
- +
- +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
- +{
- + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
- + return pte;
- +}
- +
- +/*
- + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- + *
- + * this macro returns the index of the entry in the pgd page which would
- + * control the given virtual address
- + */
- +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
- +
- +/*
- + * pgd_offset() returns a (pgd_t *)
- + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- + */
- +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
- +
- +/*
- + * a shortcut which implies the use of the kernel's pgd, instead
- + * of a process's
- + */
- +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
- +
- +/*
- + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- + *
- + * this macro returns the index of the entry in the pmd page which would
- + * control the given virtual address
- + */
- +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
- +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
- +
- +#define pmd_page_vaddr(pmd) \
- + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
- +
- +/*
- + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- + *
- + * this macro returns the index of the entry in the pte page which would
- + * control the given virtual address
- + */
- +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
- +#define pte_offset_kernel(dir, address) \
- + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
- +#define pte_offset_map(dir, address) \
- + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
- +#define pte_unmap(pte) do { } while (0)
- +
- +struct mm_struct;
- +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
- +
- +#define update_mmu_cache(vma,address,ptep) do ; while (0)
- +
- +/* Encode and de-code a swap entry */
- +#define __swp_type(x) (((x).val >> 5) & 0x1f)
- +#define __swp_offset(x) ((x).val >> 11)
- +
- +#define __swp_entry(type, offset) \
- + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
- +#define __pte_to_swp_entry(pte) \
- + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
- +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
- +
- +#define kern_addr_valid(addr) (1)
- +
- +#include <asm-generic/pgtable.h>
- +
- +/* Clear a kernel PTE and flush it from the TLB */
- +#define kpte_clear_flush(ptep, vaddr) \
- +do { \
- + pte_clear(&init_mm, (vaddr), (ptep)); \
- + __flush_tlb_one((vaddr)); \
- +} while (0)
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/processor-generic.h
- @@ -0,0 +1,115 @@
- +/*
- + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_PROCESSOR_GENERIC_H
- +#define __UM_PROCESSOR_GENERIC_H
- +
- +struct pt_regs;
- +
- +struct task_struct;
- +
- +#include <asm/ptrace.h>
- +#include <registers.h>
- +#include <sysdep/archsetjmp.h>
- +
- +#include <linux/prefetch.h>
- +
- +struct mm_struct;
- +
- +struct thread_struct {
- + struct pt_regs regs;
- + struct pt_regs *segv_regs;
- + int singlestep_syscall;
- + void *fault_addr;
- + jmp_buf *fault_catcher;
- + struct task_struct *prev_sched;
- + struct arch_thread arch;
- + jmp_buf switch_buf;
- + struct {
- + int op;
- + union {
- + struct {
- + int pid;
- + } fork, exec;
- + struct {
- + int (*proc)(void *);
- + void *arg;
- + } thread;
- + struct {
- + void (*proc)(void *);
- + void *arg;
- + } cb;
- + } u;
- + } request;
- +};
- +
- +#define INIT_THREAD \
- +{ \
- + .regs = EMPTY_REGS, \
- + .fault_addr = NULL, \
- + .prev_sched = NULL, \
- + .arch = INIT_ARCH_THREAD, \
- + .request = { 0 } \
- +}
- +
- +static inline void release_thread(struct task_struct *task)
- +{
- +}
- +
- +extern unsigned long thread_saved_pc(struct task_struct *t);
- +
- +static inline void mm_copy_segments(struct mm_struct *from_mm,
- + struct mm_struct *new_mm)
- +{
- +}
- +
- +#define init_stack (init_thread_union.stack)
- +
- +/*
- + * User space process size: 3GB (default).
- + */
- +extern unsigned long task_size;
- +
- +#define TASK_SIZE (task_size)
- +
- +#undef STACK_TOP
- +#undef STACK_TOP_MAX
- +
- +extern unsigned long stacksizelim;
- +
- +#define STACK_ROOM (stacksizelim)
- +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
- +#define STACK_TOP_MAX STACK_TOP
- +
- +/* This decides where the kernel will search for a free chunk of vm
- + * space during mmap's.
- + */
- +#define TASK_UNMAPPED_BASE (0x40000000)
- +
- +extern void start_thread(struct pt_regs *regs, unsigned long entry,
- + unsigned long stack);
- +
- +struct cpuinfo_um {
- + unsigned long loops_per_jiffy;
- + int ipi_pipe[2];
- +};
- +
- +extern struct cpuinfo_um boot_cpu_data;
- +
- +#define my_cpu_data cpu_data[smp_processor_id()]
- +
- +#ifdef CONFIG_SMP
- +extern struct cpuinfo_um cpu_data[];
- +#define current_cpu_data cpu_data[smp_processor_id()]
- +#else
- +#define cpu_data (&boot_cpu_data)
- +#define current_cpu_data boot_cpu_data
- +#endif
- +
- +
- +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
- +extern unsigned long get_wchan(struct task_struct *p);
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/ptrace-generic.h
- @@ -0,0 +1,45 @@
- +/*
- + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_PTRACE_GENERIC_H
- +#define __UM_PTRACE_GENERIC_H
- +
- +#ifndef __ASSEMBLY__
- +
- +#include <asm/ptrace-abi.h>
- +#include <sysdep/ptrace.h>
- +
- +struct pt_regs {
- + struct uml_pt_regs regs;
- +};
- +
- +#define arch_has_single_step() (1)
- +
- +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
- +
- +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
- +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
- +
- +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
- +
- +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
- +
- +#define instruction_pointer(regs) PT_REGS_IP(regs)
- +
- +struct task_struct;
- +
- +extern long subarch_ptrace(struct task_struct *child, long request,
- + unsigned long addr, unsigned long data);
- +extern unsigned long getreg(struct task_struct *child, int regno);
- +extern int putreg(struct task_struct *child, int regno, unsigned long value);
- +
- +extern int arch_copy_tls(struct task_struct *new);
- +extern void clear_flushed_tls(struct task_struct *task);
- +extern void syscall_trace_enter(struct pt_regs *regs);
- +extern void syscall_trace_leave(struct pt_regs *regs);
- +
- +#endif
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/setup.h
- @@ -0,0 +1,10 @@
- +#ifndef SETUP_H_INCLUDED
- +#define SETUP_H_INCLUDED
- +
- +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
- + * command line, so this choice is ok.
- + */
- +
- +#define COMMAND_LINE_SIZE 4096
- +
- +#endif /* SETUP_H_INCLUDED */
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/smp.h
- @@ -0,0 +1,32 @@
- +#ifndef __UM_SMP_H
- +#define __UM_SMP_H
- +
- +#ifdef CONFIG_SMP
- +
- +#include <linux/bitops.h>
- +#include <asm/current.h>
- +#include <linux/cpumask.h>
- +
- +#define raw_smp_processor_id() (current_thread->cpu)
- +
- +#define cpu_logical_map(n) (n)
- +#define cpu_number_map(n) (n)
- +extern int hard_smp_processor_id(void);
- +#define NO_PROC_ID -1
- +
- +extern int ncpus;
- +
- +
- +static inline void smp_cpus_done(unsigned int maxcpus)
- +{
- +}
- +
- +extern struct task_struct *idle_threads[NR_CPUS];
- +
- +#else
- +
- +#define hard_smp_processor_id() 0
- +
- +#endif
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/stacktrace.h
- @@ -0,0 +1,42 @@
- +#ifndef _ASM_UML_STACKTRACE_H
- +#define _ASM_UML_STACKTRACE_H
- +
- +#include <linux/uaccess.h>
- +#include <linux/ptrace.h>
- +
- +struct stack_frame {
- + struct stack_frame *next_frame;
- + unsigned long return_address;
- +};
- +
- +struct stacktrace_ops {
- + void (*address)(void *data, unsigned long address, int reliable);
- +};
- +
- +#ifdef CONFIG_FRAME_POINTER
- +static inline unsigned long
- +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
- +{
- + if (!task || task == current)
- + return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
- + return KSTK_EBP(task);
- +}
- +#else
- +static inline unsigned long
- +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
- +{
- + return 0;
- +}
- +#endif
- +
- +static inline unsigned long
- +*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
- +{
- + if (!task || task == current)
- + return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
- + return (unsigned long *)KSTK_ESP(task);
- +}
- +
- +void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
- +
- +#endif /* _ASM_UML_STACKTRACE_H */
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/sysrq.h
- @@ -0,0 +1,7 @@
- +#ifndef __UM_SYSRQ_H
- +#define __UM_SYSRQ_H
- +
- +struct task_struct;
- +extern void show_trace(struct task_struct* task, unsigned long *stack);
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/thread_info.h
- @@ -0,0 +1,78 @@
- +/*
- + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_THREAD_INFO_H
- +#define __UM_THREAD_INFO_H
- +
- +#ifndef __ASSEMBLY__
- +
- +#include <asm/types.h>
- +#include <asm/page.h>
- +#include <asm/uaccess.h>
- +
- +struct thread_info {
- + struct task_struct *task; /* main task structure */
- + struct exec_domain *exec_domain; /* execution domain */
- + unsigned long flags; /* low level flags */
- + __u32 cpu; /* current CPU */
- + int preempt_count; /* 0 => preemptable,
- + <0 => BUG */
- + mm_segment_t addr_limit; /* thread address space:
- + 0-0xBFFFFFFF for user
- + 0-0xFFFFFFFF for kernel */
- + struct restart_block restart_block;
- + struct thread_info *real_thread; /* Points to non-IRQ stack */
- +};
- +
- +#define INIT_THREAD_INFO(tsk) \
- +{ \
- + .task = &tsk, \
- + .exec_domain = &default_exec_domain, \
- + .flags = 0, \
- + .cpu = 0, \
- + .preempt_count = INIT_PREEMPT_COUNT, \
- + .addr_limit = KERNEL_DS, \
- + .restart_block = { \
- + .fn = do_no_restart_syscall, \
- + }, \
- + .real_thread = NULL, \
- +}
- +
- +#define init_thread_info (init_thread_union.thread_info)
- +#define init_stack (init_thread_union.stack)
- +
- +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
- +/* how to get the thread information struct from C */
- +static inline struct thread_info *current_thread_info(void)
- +{
- + struct thread_info *ti;
- + unsigned long mask = THREAD_SIZE - 1;
- + void *p;
- +
- + asm volatile ("" : "=r" (p) : "0" (&ti));
- + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
- + return ti;
- +}
- +
- +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
- +
- +#endif
- +
- +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
- +#define TIF_SIGPENDING 1 /* signal pending */
- +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
- +#define TIF_RESTART_BLOCK 4
- +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
- +#define TIF_SYSCALL_AUDIT 6
- +#define TIF_RESTORE_SIGMASK 7
- +#define TIF_NOTIFY_RESUME 8
- +
- +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
- +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
- +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/timex.h
- @@ -0,0 +1,13 @@
- +#ifndef __UM_TIMEX_H
- +#define __UM_TIMEX_H
- +
- +typedef unsigned long cycles_t;
- +
- +static inline cycles_t get_cycles (void)
- +{
- + return 0;
- +}
- +
- +#define CLOCK_TICK_RATE (HZ)
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/tlb.h
- @@ -0,0 +1,134 @@
- +#ifndef __UM_TLB_H
- +#define __UM_TLB_H
- +
- +#include <linux/pagemap.h>
- +#include <linux/swap.h>
- +#include <asm/percpu.h>
- +#include <asm/pgalloc.h>
- +#include <asm/tlbflush.h>
- +
- +#define tlb_start_vma(tlb, vma) do { } while (0)
- +#define tlb_end_vma(tlb, vma) do { } while (0)
- +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
- +
- +/* struct mmu_gather is an opaque type used by the mm code for passing around
- + * any data needed by arch specific code for tlb_remove_page.
- + */
- +struct mmu_gather {
- + struct mm_struct *mm;
- + unsigned int need_flush; /* Really unmapped some ptes? */
- + unsigned long start;
- + unsigned long end;
- + unsigned int fullmm; /* non-zero means full mm flush */
- +};
- +
- +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- + unsigned long address)
- +{
- + if (tlb->start > address)
- + tlb->start = address;
- + if (tlb->end < address + PAGE_SIZE)
- + tlb->end = address + PAGE_SIZE;
- +}
- +
- +static inline void init_tlb_gather(struct mmu_gather *tlb)
- +{
- + tlb->need_flush = 0;
- +
- + tlb->start = TASK_SIZE;
- + tlb->end = 0;
- +
- + if (tlb->fullmm) {
- + tlb->start = 0;
- + tlb->end = TASK_SIZE;
- + }
- +}
- +
- +static inline void
- +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- +{
- + tlb->mm = mm;
- + tlb->start = start;
- + tlb->end = end;
- + tlb->fullmm = !(start | (end+1));
- +
- + init_tlb_gather(tlb);
- +}
- +
- +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
- + unsigned long end);
- +
- +static inline void
- +tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
- +{
- + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
- +}
- +
- +static inline void
- +tlb_flush_mmu_free(struct mmu_gather *tlb)
- +{
- + init_tlb_gather(tlb);
- +}
- +
- +static inline void
- +tlb_flush_mmu(struct mmu_gather *tlb)
- +{
- + if (!tlb->need_flush)
- + return;
- +
- + tlb_flush_mmu_tlbonly(tlb);
- + tlb_flush_mmu_free(tlb);
- +}
- +
- +/* tlb_finish_mmu
- + * Called at the end of the shootdown operation to free up any resources
- + * that were required.
- + */
- +static inline void
- +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
- +{
- + tlb_flush_mmu(tlb);
- +
- + /* keep the page table cache within bounds */
- + check_pgt_cache();
- +}
- +
- +/* tlb_remove_page
- + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
- + * while handling the additional races in SMP caused by other CPUs
- + * caching valid mappings in their TLBs.
- + */
- +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- +{
- + tlb->need_flush = 1;
- + free_page_and_swap_cache(page);
- + return 1; /* avoid calling tlb_flush_mmu */
- +}
- +
- +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
- +{
- + __tlb_remove_page(tlb, page);
- +}
- +
- +/**
- + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
- + *
- + * Record the fact that pte's were really umapped in ->need_flush, so we can
- + * later optimise away the tlb invalidate. This helps when userspace is
- + * unmapping already-unmapped pages, which happens quite a lot.
- + */
- +#define tlb_remove_tlb_entry(tlb, ptep, address) \
- + do { \
- + tlb->need_flush = 1; \
- + __tlb_remove_tlb_entry(tlb, ptep, address); \
- + } while (0)
- +
- +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
- +
- +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
- +
- +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
- +
- +#define tlb_migrate_finish(mm) do {} while (0)
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/tlbflush.h
- @@ -0,0 +1,31 @@
- +/*
- + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_TLBFLUSH_H
- +#define __UM_TLBFLUSH_H
- +
- +#include <linux/mm.h>
- +
- +/*
- + * TLB flushing:
- + *
- + * - flush_tlb() flushes the current mm struct TLBs
- + * - flush_tlb_all() flushes all processes TLBs
- + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- + * - flush_tlb_page(vma, vmaddr) flushes one page
- + * - flush_tlb_kernel_vm() flushes the kernel vm area
- + * - flush_tlb_range(vma, start, end) flushes a range of pages
- + */
- +
- +extern void flush_tlb_all(void);
- +extern void flush_tlb_mm(struct mm_struct *mm);
- +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- + unsigned long end);
- +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
- +extern void flush_tlb_kernel_vm(void);
- +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
- +extern void __flush_tlb_one(unsigned long addr);
- +
- +#endif
- --- /dev/null
- +++ b/arch/um/include/uapi/asm/uaccess.h
- @@ -0,0 +1,178 @@
- +/*
- + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- + * Licensed under the GPL
- + */
- +
- +#ifndef __UM_UACCESS_H
- +#define __UM_UACCESS_H
- +
- +/* thread_info has a mm_segment_t in it, so put the definition up here */
- +typedef struct {
- + unsigned long seg;
- +} mm_segment_t;
- +
- +#include <linux/thread_info.h>
- +#include <linux/errno.h>
- +#include <asm/processor.h>
- +#include <asm/elf.h>
- +
- +#define VERIFY_READ 0
- +#define VERIFY_WRITE 1
- +
- +/*
- + * The fs value determines whether argument validity checking should be
- + * performed or not. If get_fs() == USER_DS, checking is performed, with
- + * get_fs() == KERNEL_DS, checking is bypassed.
- + *
- + * For historical reasons, these macros are grossly misnamed.
- + */
- +
- +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
- +
- +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
- +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
- +
- +#define get_ds() (KERNEL_DS)
- +#define get_fs() (current_thread_info()->addr_limit)
- +#define set_fs(x) (current_thread_info()->addr_limit = (x))
- +
- +#define segment_eq(a, b) ((a).seg == (b).seg)
- +
- +#define __under_task_size(addr, size) \
- + (((unsigned long) (addr) < TASK_SIZE) && \
- + (((unsigned long) (addr) + (size)) < TASK_SIZE))
- +
- +#define __access_ok_vsyscall(type, addr, size) \
- + ((type == VERIFY_READ) && \
- + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
- + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
- + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
- +
- +#define __addr_range_nowrap(addr, size) \
- + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
- +
- +#define access_ok(type, addr, size) \
- + (__addr_range_nowrap(addr, size) && \
- + (__under_task_size(addr, size) || \
- + __access_ok_vsyscall(type, addr, size) || \
- + segment_eq(get_fs(), KERNEL_DS)))
- +
- +extern int copy_from_user(void *to, const void __user *from, int n);
- +extern int copy_to_user(void __user *to, const void *from, int n);
- +
- +/*
- + * strncpy_from_user: - Copy a NUL terminated string from userspace.
- + * @dst: Destination address, in kernel space. This buffer must be at
- + * least @count bytes long.
- + * @src: Source address, in user space.
- + * @count: Maximum number of bytes to copy, including the trailing NUL.
- + *
- + * Copies a NUL-terminated string from userspace to kernel space.
- + *
- + * On success, returns the length of the string (not including the trailing
- + * NUL).
- + *
- + * If access to userspace fails, returns -EFAULT (some data may have been
- + * copied).
- + *
- + * If @count is smaller than the length of the string, copies @count bytes
- + * and returns @count.
- + */
- +
- +extern int strncpy_from_user(char *dst, const char __user *src, int count);
- +
- +/*
- + * __clear_user: - Zero a block of memory in user space, with less checking.
- + * @to: Destination address, in user space.
- + * @n: Number of bytes to zero.
- + *
- + * Zero a block of memory in user space. Caller must check
- + * the specified block with access_ok() before calling this function.
- + *
- + * Returns number of bytes that could not be cleared.
- + * On success, this will be zero.
- + */
- +extern int __clear_user(void __user *mem, int len);
- +
- +/*
- + * clear_user: - Zero a block of memory in user space.
- + * @to: Destination address, in user space.
- + * @n: Number of bytes to zero.
- + *
- + * Zero a block of memory in user space.
- + *
- + * Returns number of bytes that could not be cleared.
- + * On success, this will be zero.
- + */
- +extern int clear_user(void __user *mem, int len);
- +
- +/*
- + * strlen_user: - Get the size of a string in user space.
- + * @str: The string to measure.
- + * @n: The maximum valid length
- + *
- + * Get the size of a NUL-terminated string in user space.
- + *
- + * Returns the size of the string INCLUDING the terminating NUL.
- + * On exception, returns 0.
- + * If the string is too long, returns a value greater than @n.
- + */
- +extern int strnlen_user(const void __user *str, int len);
- +
- +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
- +
- +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
- +
- +#define __copy_to_user_inatomic __copy_to_user
- +#define __copy_from_user_inatomic __copy_from_user
- +
- +#define __get_user(x, ptr) \
- +({ \
- + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
- + __typeof__(x) __private_val; \
- + int __private_ret = -EFAULT; \
- + (x) = (__typeof__(*(__private_ptr)))0; \
- + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
- + sizeof(*(__private_ptr))) == 0) { \
- + (x) = (__typeof__(*(__private_ptr))) __private_val; \
- + __private_ret = 0; \
- + } \
- + __private_ret; \
- +})
- +
- +#define get_user(x, ptr) \
- +({ \
- + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
- + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
- + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
- +})
- +
- +#define __put_user(x, ptr) \
- +({ \
- + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
- + __typeof__(*(__private_ptr)) __private_val; \
- + int __private_ret = -EFAULT; \
- + __private_val = (__typeof__(*(__private_ptr))) (x); \
- + if (__copy_to_user((__private_ptr), &__private_val, \
- + sizeof(*(__private_ptr))) == 0) { \
- + __private_ret = 0; \
- + } \
- + __private_ret; \
- +})
- +
- +#define put_user(x, ptr) \
- +({ \
- + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
- + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
- + __put_user(x, private_ptr) : -EFAULT); \
- +})
- +
- +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
- +
- +struct exception_table_entry
- +{
- + unsigned long insn;
- + unsigned long fixup;
- +};
- +
- +#endif
|