diff -Nru a/Documentation/vax/README b/Documentation/vax/README --- a/Documentation/vax/README 1970-01-01 01:00:00 +++ b/Documentation/vax/README 2005-04-08 00:56:25 @@ -0,0 +1,206 @@ + +Last updated Oct 30, 2003 + +GETTING STARTED + +To play with this port you need the following: + +1. The cross-compiler and binutils +2. The kernel sources +3. A MOP server (mopd) +4. A VAX with an ethernet card or SCSI interface + +Help on obtaining and building user-land binaries can be found in +Documentation/vax/userland.txt. + +Unfortunately, there are a few large downloads involved to get +up and running... + +All the sources are in CVS on SourceForge. For the initial +check-out of the sources, you can save some typing by defining +CVSROOT as follows: + + export CVSROOT=:pserver:anonymous@cvs.sf.net:/cvsroot/linux-vax + +You won't need this definition after the initial checkout. When you +run a CVS command inside a checked-out tree, CVS gets the info from +the CVS/Root and CVS/Repository files. + +Then do 'cvs login' and hit return at the login prompt so that CVS can +cache the login details in your ~/.cvspass file. + +1. The cross-compiler and binutils + + Pull the toolchain from CVS: + + mkdir ~/linux-vax/src + cd ~/linux-vax/src + cvs checkout toolchain + + Then build it: + + cd toolchain + ./build-vax.sh + + All the object files and binaries will go into toolchain/b-vax-dec-linux. + + These should complete without errors. If you get errors, something + is seriously wrong and you probably won't get a correctly-installed + toolchain. All object files and binaries will be created in + vax-cross/b-vax-dec-linux without touching the source trees. + + Then install them: + + $ su -c './build-vax.sh install' + + This will create programs in /usr/local/bin prefixed with vax-dec-linux- + (for example /usr/local/bin/vax-dec-linux-gcc) and directories + /usr/local/vax-dec-linux and /usr/local/lib/gcc-lib/vax-dec-linux. + + This will not touch your current GCC installation. + + Note: If you want to install the toolchain outside /usr/local (for + example, in your home directory), edit toolchain/build-vax.sh and + set prefix to something like ~/linux-vax/tools-bin. You need to + completely rebuild the toolchain if you change the prefix. + +2. The kernel sources + + Grab the sources from CVS: + + cd ~/linux-vax/src + cvs checkout kernel-2.5 + + We recommend that you use a separate output directory when compiling + the kernel. (It should work with a same-directory compile, but separate + output directories are just too useful to ignore them!) + + cd kernel-2.5 + mkdir ~/linux-vax/kbuild + + Start with one of the default configs (i.e. run one of these commands): + + make ARCH=vax O=~/linux-vax/kbuild ka650_defconfig + make ARCH=vax O=~/linux-vax/kbuild ka46_defconfig + make ARCH=vax O=~/linux-vax/kbuild ka43_defconfig + + You could of course, have multiple output directories, one for each + config. + + Tweak the config if you want: + + make ARCH=vax O=~/linux-vax/kbuild config + + BTW, as a shorthand, you can define a KBUILD_OUTPUT environment variable + rather than type O=... all the time. + + Now build the kernel: + + make ARCH=vax O=~/linux-vax/kbuild + + (As an alternative to specifying ARCH=vax on the make command line, you + can set ARCH=vax in your environment instead.) + + This will create a MOP-bootable image called vmlinux.SYS + + If you have your VAX and Linux machine on the same SCSI chain and + you've got a scratch disk handy, you can do + + $ make ARCH=vax diskboot && dd if=vmlinux.dsk of=/dev/sdX + + and then tell your VAX to boot from this disk. This is faster than + netbooting. + + NOTE THAT THIS WILL DESTROY ANY FILESYSTEM AREADY ON THE DISK. + + YOU HAVE BEEN WARNED. + +3. A MOP server (mopd) + + Pull mopd from CVS + + cd ~/linux-vax/src + cvs checkout usr/mopd-linux + + Compile and install. Create the directory /tftpboot/mop. mopd looks + here, and here only, when searching for boot images. + + Create a link from /tftpboot/mop/.SYS to the vmlinux.SYS file + in your development tree. is the ethernet address of your + VAX in _lowercase_ with no separators. For example, mine is + 08002b0db20f.SYS. + + In can be useful to run mopd with the -d switch to see what it + receives from the network. + +4. A VAX with an ethernet card or SCSI interface. + + As we don't really have any hardware support in yet, hardware + requirements are pretty minimal: + + CPU + Serial console + 8 MB ram + Ethernet card + + So far we've had success reports from people with the following + machines: + + VAXstation 2000 + VAXstation 3100/m30 + VAXstation 3100/m76 + VAXstation 3500 + VAXstation II/GPX + + First you'll want to get your VAX to stop at the >>> console prompt + at power up. There is usually a switch on the CPU board, front + panel or rear panel (depending on the model) to select this. Look for + a circle with a dot inside. + + Hook your VAX up to a standalone terminal, such a VT-series terminal + or a serial port on your PC. The VAX will probably have an MMJ + serial connector. I can't find a URL with the pin-out info for this + guy. + + If you have an OS installed (e.g. VMS, Ultrix, NetBSD), it would be + a good idea to take your disks offline, if your VAX has a handy way + to do this. For example, the VS3500 has front panel switches to + take the internal disks offline. + + At the >>> prompt, try B , B XQA0 or B ESA0 and see if one + of them tries to netboot (watch the output of mopd -d). + + If it looks like mopd sent over a boot image, let us know what + happens. Depending on your hardware, you might get a kernel + version banner and some diagnostic output. However, if we don't + support your serial console hardware, you'll probably just get an + error message such as 'HLT INST' and return to the >>> prompt. + If this happens, do the following: + + >>> E PC + >>> E PSL + >>> E SP + >>> E/V @ + >>> E + >>> E + >>> E + >>> E + >>> E + >>> E + + And send us the output. This will hopefully give us clues as + to how to get your serial console supported. + + If your VAX has a SCSI interface and you have an external SCSI + connector on your Linux box, you can connect both of them to + the same SCSI bus. (Make sure the host adapters in each machine + have different SCSI IDs. VAXen usually ship with the host adapter + set to ID 6, PCs are usually ID 7.) + + Then you can copy a kernel image onto a disk on the bus and boot + from there. + + NOTE THAT THIS WILL DESTROY ANY FILESYSTEM AREADY ON THE DISK. + + YOU HAVE BEEN WARNED. + diff -Nru a/Documentation/vax/assembler.txt b/Documentation/vax/assembler.txt --- a/Documentation/vax/assembler.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/assembler.txt 2003-06-19 01:02:48 @@ -0,0 +1,47 @@ + +The GNU assembler in our cross compilation tool set is a little +different from DEC's MACRO32 assembler. This file summarises the +differences. + +1. Register names are prefixed with % + + In VAX MACRO you might write: + + movl #0, r0 + + In gas, these are written: + + movl $0, %r0 + +2. #, ^ and @ become $, ` and * + + In VAX MACRO you might write: + + movl #0, r0 + movl #0, @8(r5) + movl #0, L^8(r5) + + In gas, these are written: + + movl $0, %r0 + movl $0, *8(%r5) + movl $0, L`8(%r5) + +3. ^X becomes 0x + + Hex constants are prefixed with 0x, rather than ^x + Similarly, a leading zero not followed by an x implies octal. + Therefore the following instructions are equivalent: + + VAX MACRO: + + movl #64, r0 + movl #^x40, r0 + movl #^o100, r0 + + gas: + + movl $64, %r0 + movl $0x40, %r0 + movl $0100, %r0 + diff -Nru a/Documentation/vax/cpu.txt b/Documentation/vax/cpu.txt --- a/Documentation/vax/cpu.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/cpu.txt 2002-05-20 02:33:29 @@ -0,0 +1,151 @@ + +$Id: cpu.txt,v 1.3 2002/05/20 00:33:29 kenn Exp $ + +INTRODUCTION +============ + +This file attempts to collate all the CPUs that we know about, how they +are identified and any quirks or bugs that we need to watch for. + +VAX CPUs are identified with model numbers beginning with KA followed +by 2 or 3 digits. Multiple DEC systems may use the same CPUs, with +different surrounding hardware (and slightly different firmware in some +cases), but the basic operation should be much the same. + +These CPUs fall into families that seem to have various codenames +(such as RIGEL and MARIAH). Where possible, we will try to use the KAxx +designations, rather than the codenames. + +A VAX CPU is identified during boot by first examining internal processor +register 0x3E (PR$_SID). The high byte of this register seems to denote +the processor family. The meaning of the low 3 bytes depends on the family. + +SUPPORTED CPUS +============== + + KA42 + KA43 + KA46 + KA410 + KA630 + KA650 + +UNSUPPORTED CPUS +================ + + KA41 + KA52 + KA55 + KA60 + KA620 + KA640 + KA655 + KA660 + KA730 + KA750 + KA780 + KA785 + KA790 + + +******************************************************************************* +******************************************************************************* + +KA650 +===== + +Description: + + Q-22 bus single-board CPU. M-number is M7620. Based on the CVAX + implementation of VAX. Sometimes called a MicroVAX III. + + The only I/O on the CPU itself is the console serial port. + +Shipped in: + + VAXstation 3500 + +Identification: + + PR$_SID: + + The high byte is 0x0A. This indicates a CVAX-based CPU. The low + byte holds the microcode revision. + + SIDEX at 20040004: + + The high byte is 0x01. This seems to indicate a Qbus CPU. + + Bits 16 to 23 hold the firmware revision. + + Bits 8 to 15 contain 0x01. This means KA650. + + The meaning of bits 0 to 7 is unknown. + +Notes: + + The KA650's firmware is held in a pair of 27512 EPROMs. Some units + shipped with firmware versions that didn't even have a HELP + command. + + Looking at the KA650 firmware, it looks like the same firmware is + used in the KA640 and KA655 as well. There are a lot of CASEx + instructions that dispatch on bits 8 to 15 of SIDEX. + + + +******************************************************************************* +******************************************************************************* + +KA43 +==== + +Description: + + Integrated CPU and mainboard based on the RIGEL implementation of VAX. + + The board also contains two NCR5380 SCSI controllers, an AMD LANCE + ethernet controller and a DZ11-compatible serial controller. + Maximum memory is 32MB. + + Online copy of the VAXstation 3100 Model 76 Owner's Guide (EK-VX31M-UG) + available at http://www.whiteice.com/~williamwebb/intro/DOC-i.html. + +Shipped in: + + VAXstation 3100 Model 76 + +Identification: + + PR$_SID: + + The high byte is 0x0B. This seems to indicate a RIGEL-based CPU. + The meaning of the low 3 bytes is unknown. + + SIDEX at 20040004: + + The meaning of the SIDEX is unknown. + +Notes: + + Sharing memory with the LANCE chip requires a bit of hackery. Physical + memory is accessible from 0x00000000 to 0x01ffffff (as normal), but is + also accessible via the "DIAGMEM" region of I/O space from 0x28000000 + to 0x29ffffff. To prevent strange behaviour (such as memory read + parity error machine checks), you _must_ read and write the memory + shared with the LANCE via the DIAGMEM region. + + One way to do this is to kmalloc() a region for the LANCE structures + and buffers and modify the PTEs for this region to OR in bits + 0x00140000 in the PFN field (to make them point to the DIAGMEM region). + Actually, using get_free_pages() might be a better idea, since there + might be other data structures sharing pages with this region, because + kmalloc() doesn't page-align. + + Another way is to calculate the physical addresses behind the kmalloc()ed + region and ioremap() them. This has the disadvantage of using twice as + many PTEs. + + It looks like this might be needed for DMA to the SCSI controllers as + well. + diff -Nru a/Documentation/vax/interrupts.txt b/Documentation/vax/interrupts.txt --- a/Documentation/vax/interrupts.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/interrupts.txt 2004-09-06 15:22:22 @@ -0,0 +1,73 @@ + +20000709 KPH + +Here's how I intend to deal with interrupt and exception dispatching. + +o During boot time, trap_init fills the whole SCB with stray handlers. + Since the CPU might save some longwords of data on the stack after + an exception, we can't just continue from one of these exceptions + in the general case. (However, interrupts from devices that come + through the second and subsequent pages of the SCB should be + continuable.) + + The stray handlers might help out with autoprobing interrupts if we + decide to implement probe_irq_on() and probe_irq_off(). + + Dammit, I hate using the term IRQ when talking about VAXen. It just + seems so PC-centric... + +o When an interrupt (or exception) occurs and the CPU dispatches to + the handler address in the SCB, the only clue we have as to the + interrupt or exception number is the handler address. There is no + other way to tell which interrupt happened. + + This implies that every interrupt or exception handler must have a + unique address. + +o When a driver (or other code) calls request_irq(), we allocate a data + structure (let's call it irqvector) that contains a struct irqaction + and a little bit of in-line code. This code just pushes PC on the + stack and jumps to the generic handler. (It does this by executing + a JSB instruction.) + + This generic handler sees a stack that looks like: + + SP: handler_PC (inside the irqvector) + (maybe) exception info + saved PC + saved PSL + + The generic handler builds the required pt_regs struct by duplicating + the saved PC/PSL and saving all the other registers. This makes the + stack look like: + + SP: saved R0 + saved R1 + ... + saved R11 + saved FP + saved AP + saved SP + saved PC + saved PSL + saved R0 + handler PC (inside the irqvector) + (maybe) exception info + saved PC + saved PSL + + (The second saved R0 is because we need a working register in the + handler code.) + + The generic handler then obtains the handler PC from back up the stack, + then passes this PC, the addr of the pt_regs and exception info to a + dispatcher function. This function is responsible for calculating the + start address of the irqvector structure and calling irqaction.handler(). + + When control returns to the generic handler, it restores the registers, + clears the stack down as far as the original saved PC and PSL and does + an REI. + +Anyone playing around with this stuff really needs to read the Exceptions +and Interrupts chapter in the VAX Architecture Reference Manual. + diff -Nru a/Documentation/vax/ka43-interrupts.txt b/Documentation/vax/ka43-interrupts.txt --- a/Documentation/vax/ka43-interrupts.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/ka43-interrupts.txt 2002-05-20 02:33:29 @@ -0,0 +1,179 @@ + +$Id: ka43-interrupts.txt,v 1.3 2002/05/20 00:33:29 kenn Exp $ + +This info was obtained by trawling through a running VMS 7.2 on a +VAXstation 3100/m76 (KA43 CPU) with the System Dump Analyzer +(ANALYZE/SYSTEM). + +First off, this is the SCB (system control block): + + SDA> examine exe$gl_scb + EXE$GL_SCB: 81258000 "..%." + SDA> examine 81258000:81258000+3fc + 80BD6E09 80002491 8000A801 80002119 .!.......$...n�. 81258000 + 800025F8 80B723A4 80002518 80E5CFC0 ..�..%...#o.�%.. 81258010 + 80B722C0 80B723AC 80BB35B8 80B7223C <"o..5�.�#o.."o. 81258020 + 80BB3479 80002118 800021D0 80002308 .#...!...!..y4�. 81258030 + 80002300 800022F8 80B724D8 80B725E0 �%o.b$o.�"...#.. 81258040 + 8000A819 8000A811 8000A809 80002118 .!.............. 81258050 + 80002118 80002520 80002118 8000A821 !....!.. %...!.. 81258060 + 80002118 80002118 80002118 80002118 .!...!...!...!.. 81258070 + 80BE0C00 80BD04D0 80002118 80002118 .!...!....�..... 81258080 + 80C4E921 80C4E621 80002118 80BD3E91 .>�..!..!��.!��. 81258090 + 80C4E639 80C4E631 80C4E629 80C4E641 A��.)��.1��.9��. 812580A0 + 80002118 800027B1 80002118 80002471 q$...!..�'...!.. 812580B0 + 80E60A9C 80E60A00 80002118 80C4E739 9��..!....�...�. 812580C0 + 80002118 80002118 80002118 80002118 .!...!...!...!.. 812580D0 + 80002118 80002118 80002118 80002118 .!...!...!...!.. 812580E0 + 80C50A5D 80C50A25 80002118 80002118 .!...!..%.�.].�. 812580F0 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258100 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258110 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258120 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258130 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258140 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258150 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258160 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258170 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258180 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 81258190 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 812581A0 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 812581B0 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 812581C0 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 812581D0 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 812581E0 + 80002119 80002119 80002119 80002119 .!...!...!...!.. 812581F0 + 8000A829 8000A829 8000A829 80E642D9 .B�.)...)...)... 81258200 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258210 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258220 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258230 + 8000A829 80DBB389 80DBB351 8000A829 )...Q.......)... 81258240 + 8000A829 8000A829 8000A829 80D875D1 �ub.)...)...)... 81258250 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258260 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258270 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258280 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258290 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812582A0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812582B0 + 8000A829 8000A829 80DC33C9 80DC3391 .3�.�3�.)...)... 812582C0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812582D0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812582E0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812582F0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258300 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258310 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258320 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258330 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258340 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258350 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258360 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258370 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258380 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 81258390 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812583A0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812583B0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812583C0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812583D0 + 8000A829 8000A829 8000A829 8000A829 )...)...)...)... 812583E0 + 80D9CF11 80DB9BD1 8000A829 8000A829 )...)...�....... 812583F0 + +(The hex data reads right-to-left and the ASCII reads left-to-right, +i.e. VMS DUMP format.) + +The part we're interested in here is the second page (containing the +device vectors). Most of these vectors are set to 8000a829 (which +corresponds to address 8000a828, and the CPU should switch to the +interrupt stack): + + SDA> examine/instr 8000a828 + UBA$UNEXINT: JMP @#MCHK+00700 + UBA$UNEXINT+00006: HALT + +So, these are unexpected interrupts and will lead to code near the +machine check handling code. + +The other (used) vectors are: + + vector addr vector number handler addr + 81258200 80 80e642d8 + 81258244 91 80dbb350 + 81258248 92 80dbb388 + 81258250 94 80d875d0 + 812582c0 b0 80dc3390 + 812582c4 b1 80dc33c8 + 812583f8 fe 80db9bd0 + 812583fc ff 80d9cf10 + +These interrupt handler addresses are containing within the CRB +(Channel Request Block) for the relevant device or controller. + +Let's chase these down. First vector 0x80: + + SDA> examine/instr 80e642d8:80e642d8+10 + MCHK+006F8: INCL @#IO$GL_UBA_INT0 + MCHK+006FE: BRB MCHK+00700 + MCHK+00700: REI + +This just increments an interrupt counter and dismisses the interrupt. +(So the unexpected interrupt handler above effectively just dismisses +the interrupt.) + +Next vector 0x91: + + SDA> examine/instr/noskip 80DBB350;2 + 80DBB350: PUSHR #3F + 80DBB352: JSB @#GABDRIVER+00942 + +So, this interrupt is probably handled by GABDRIVER. Let's verify this by +looking at GABDRIVER's data structures: + + SDA> show device ga + ... + --- Primary Channel Request Block (CRB) 80DBB300 --- + + Reference count 1 Wait queue empty + IDB address 80DB5640 Unit init. 80E0D589 Int. service 80E0DCC2 + ADP address 80D87300 Ctrl. init. 80E0D4ED + ... + SDA> format 80DBB300 + 80DBB300 CRB$L_FQFL 00000000 + ... + 80DBB350 CRB$L_INTD 9F163FBB + 80DBB354 80E0DCC2 GABDRIVER+00942 + ... + 80DBB388 CRB$L_INTD2 9F163FBB + SDA> + +So the interrupt handler is 0x50 bytes into the CRB. Interestingly, there's +another interrupt handler 0x88 bytes into the CRB as well. This corresponds +to vector 0x92 in the table above. + + SDA> examine/instr 80DBB388;2 + 80DBB388: PUSHR #3F + 80DBB38A: JSB @#GABDRIVER+0145C + +So this device uses two interrupt vectors. + +This means that the CRBs for the other vectors are: + + vector addr num handler addr CRB address driver + 81258200 80 80e642d8 (no driver) + 81258244 91 80dbb350 80dbb300 GABDRIVER + 81258248 92 80dbb388 80dbb300 GABDRIVER + 81258250 94 80d875d0 80d87580 ESDRIVER + 812582c0 b0 80dc3390 80dc3340 YEDRIVER + 812582c4 b1 80dc33c8 80dc3340 YEDRIVER + 812583f8 fe 80db9bd0 80db9b80 PKNDRIVER (PKA) + 812583fc ff 80d9cf10 80d9cec0 PKNDRIVER (PKB) + + +GABDRIVER is the framebuffer driver, ESDRIVER is ethernet, YEDRIVER +is a terminal port driver (I think) and PKNDRIVER is a SCSI port +driver. So, to summarize, the KA43 device interrupts are: + + Framebuffer 0x91, 0x92 + LANCE ethernet 0x94 + DZ11 serial 0xb0, 0xb1 + NCR5380 SCSI: + internal 0xfe + external 0xff + + diff -Nru a/Documentation/vax/keep_an_eye_on.txt b/Documentation/vax/keep_an_eye_on.txt --- a/Documentation/vax/keep_an_eye_on.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/keep_an_eye_on.txt 2005-10-18 08:01:47 @@ -0,0 +1,29 @@ +This file contains various items that are about to change in any of the +important upstream kernel trees. Whenever you see patches flying around +on LKML that do have impact for the VAX tree (ie. new Kconfig entries, +function deprecation, ...) please add a note here so that in can be +revised later on. + +- Mar 4, 2005: "Re: [PATCH] new driver for ITM Touch touchscreen" + Use input_set_abs_params() to set the coordinate range for the digitizer. + +- Mar 7, 2005: "[0/many] Acrypto - asynchronous crypto layer for linux kernel 2.6" + If the asynchronous crypto stuff is accepted, don't forget to add + 'source "acrypto/Kconfig"' into ./arch/vax/Kconfig, somewhere between + crypto/Kconfig and lib/Kconfig. + +- Mar 7, 2005: "[bk, patches] Input update" + Vojtech Pavlik prepared a patchset that shifts over all Input API drivers + to something like the struct driver thing. lkkbd.c and vsxxxaa.c may need + checking. + +- Apr 5, 2005: set_current_state()/schedule_timeout() -> msleep() + Not related to a specific mailing list post, but all code that possibly + waits using schedule_timeout() should probably changed like this: + - set_current_state(TASK_UNINTERRUPTIBLE); + - schedule_timeout(30*HZ/100); + + msleep(300); + +- Oct 18, 2005: Debundle SGEC and vaxlance. + They share a struct net_dev last_dev (defined in lance). This seems to be + a relict from stoneage and needs to be removed. diff -Nru a/Documentation/vax/memory.txt b/Documentation/vax/memory.txt --- a/Documentation/vax/memory.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/memory.txt 2002-05-20 02:33:29 @@ -0,0 +1,469 @@ +$Id: memory.txt,v 1.3 2002/05/20 00:33:29 kenn Exp $ +ATP 20010910 + +Note: + +This is more a discussion document about the memory map on the vax +architecture. For discussion of VAX memory management, and the compromises +made by this port, and how that affects most people see the file +task-memory.txt in this directory. + +0) Terminology + + PAGE_OFFSET is set to be 0x80000000. So Physical Memory address 0 is + mapped to Virtual address PAGE_OFFSET. This is the start of the + VAX S0 segment, and limits the physical memory to 1024Mb. But, hey, + find me a VAX with more than 1024Mb RAM. + + PAGE_SIZE. A page is 4096 bytes long. + PAGELET_SIZE. A pagelet is 512 bytes long. + + See include/asm-vax/mm/pagelet*.h + + The Hardware page size on a VAX is 512 bytes. Hardware pages + are called pagelets. + + The pagelet layer, implemented in asm-vax/mm/ and arch/vax/mm + (pgalloc.c mostly), groups pages into logical pages of 4096 bytes. + + The rule here is; Any data structure likely to be seen by + arch-independent code uses pages. Any arch-specific code may use + pagelets, but its highly discouraged. There is one exception to + this, which is the S0 part of the process pgd (page directory). + The Linux arch independent code never goes near the S0 page table, + as its unaware that it exists (thankfully). We keep the S0 base + and length pair in pagelets. The P0 and P1 sections base and + length registers are kept in pages, for consistency, and converted + on the fly, when the registers in the PCB (process control block) + are updated. The S0 section is only ever touched at boot, and becomes + frozen by the time processes start. Its only ever touched by + vax arch code. + + A page table entry (pte, type pte_t ) maps a page. Each pte + is in fact a structure (struct pagecluster_t) that describes the + underlying pagelet ptes for that page (hwpte, hwpte_t). + + Why do we have a pagelet layer? Well, its a long story, but it + makes life a lot easier elsewhere. + +1) Memory map. + + The memory map has stabilised a little. Here is what it looks like + sept 2001. I feel that RPB shoud live in a well known place too. + + Virtual Length Description + 80000000 1 page bootmap (mem_map) + 80001000 1Mb-1page Free + 80100000 kern_size Kernel code data and bss sections + SPT_BASE SPT_SIZE Pagelet (512 bytes) aligned start of + system page table. + Length depends on physical memory, plus + other variables - see below. + iomap_base IOMAP_SIZE i/o remapping area. A set of ptes in the + system page table we can use for remapping + device io ports. e.g. microvax prom + registers, ethernet card CSR regs. The + start of this is page aligned (4096 bytes) + vmallocmap_base + VMALLOC_SIZE vmalloc() area. + TASKPTE_START see below TASKPTE area. Stores P0 and P1 page tables + for user processes. Sized at compile time. + See below. + + TASKPTE_END max_pfn*4096 Free (May contain VMB bitmaps on the last page) + +2) System page table. + + The system page table as far as TASKPTE_START is initialised in boot/head.S + the early boot assembly code. The initialisation of iomap and vmalloc should + probably move to mm/init.c. paging_init() in mm/init.c initialises the + remainder, which at present is the task pte area. Once paging_init() has + returned, there are no further alterations to the system page table. + + The following are equivalent. + S0 base register: SPT_BASE, swapper_pg_dir[2].br, pg0. + S0 length register: SPT_LEN, swapper_pg_dir[2].lr + + SPT_SIZE is the size in bytes of the SPT. + + The system page table must be pagelet aligned. + +3) TASKPTE areas. + + An area must be set aside in system space to hold process page tables. + This is the TASKPTE area. This is sized at kernel compile time (currently) + using the variabled defined in include/asm-vax/mm/task.h The task pte area + is composed of TASK_MAXUPRC "slots". Each slot is laid out like this + + name size description + p0pmd 2 pages Fake P0 page mid level directory + p1pmd 2 pages Fake P1 page mid level directory + p0pte set by TASK_WSMAX P0 page table + p1pte set by TASK_STKMAX P1 page table + + Slots are aligned to 8192 bytes. + The page mid level directories are needed because the linux MM code + needs to keep track of which ptes are allocated across the entire + address space. Its easier to fake a page midlevel directory each entry + of which is a 4 byte longword pointing at the relevant part of the + page table. + The TASK_WSMAX define limits how much virtual address space is allocated + to the process P0 region. This is composed of two sections, the text + section and the data section. The amount of address space allocated to + each is defined by TASK_TXTMAX and TASK_MMAPMAX. TASK_STKMAX limits + the amount of P1 space available. + + The need to restrict the virtual address spaces is imposed by the VAX + MM hardware. Each process has potentially 1Gb P0 and 1Gb P1 space + available to it. However, the allocation is not sparse, like it is + on CPUs with a tree structured MMU. If a process allocates a page + 200MB into its P0 space, then we must increase the P0 length register + to include the pte that describes this page at 200MB. That makes all the + intervening addresses in the page table from 0 to 200MB be + part of the P0 page table too. (The PTEs may be invalid, or the + addresses that they would occupy be used by something else, but + they are there as far as the MMU is concerned). + + Once we have mapped all of the intervening space, we can set the page + table base and length registers to the right values to point at the + base of the page table, and the length in ptes, up to 200MB. + + In contrast on an alpha or i386 for example, one only needs to allocate + a single page (plus one more for the pmd if on an alpha) and enter + it into the correct slot in the pgd. + + Additionally, The base and length registers for a P0 page table + point at a region that must be contiguous in S0 space. This makes + expansion hard, as there is a very specific S0 virtual address needed to + map any given address in a P0 pagetable. If that address is already + occupied by something else then either you cannot expand, or you + must move the other user of that virtual address. Thats not + feasible. + + The obvious solution here is to map a P0 or P1 process page table + in its entirety, from 0 to 1024Mb, into S0 space. This avoids + the expansion problem. We just reserve a chunk of S0 address space + for as many P0 and P1 page tables as we need. Each is located in + a specific range of S0 virtual address space. We can then map in + actual physical pages to hold the P0 page table ptes for addresses + on an as needed basis. They just need to be mapped to specific + S0 addresses. + + The problem with that, is that the S0 page table, which manages the + S0 address space, is located in _physical_ memory. The same problems + as above are in place, with the exception that specific physical + addresses are needed. So if we reserve a chunk of virtual address + space, then we are effectively allocating S0 ptes (sptes) that + map that space. One spte maps one page of S0 address space. + If we reserve enough S0 space for the page tables for one process's + P0 and P1 address space (2048MB), then we are reserving + 2048*1024*1024 / 4096 = 524288 pages of P0/1 space + = 524288 P0/1 ptes. + Each pte is 32 bytes in size. So the amount of S0 space we + need to reserve to hold this page table is. + 524288 * 32 = 16 Mb. + 16Mb of S0 space is; + 16 * 1024 * 1024 / 4096 = 4096 pages of S0 space + = 4096 S0 ptes. + Each pte is 32 bytes in size. So the the amount of + physical memory we need to allocate to the S0 page table is; + 4096*32 = 128 kb. + + If we allow 64 processes, then we are tying up; + 64 * 128 = 8Mb + So we have lost a 8 Mb of contiguous physical memory. + + And this is just RAM to hold the S0 page table. This does not + include the allocated pages which hold the P0 page table. + (Admittedly these can be any page returned by __get_free_page(), + so there is no need for contiguity.) + + Most processes have small memory requirements, so this 8 Mb is + mostly unused. Most VAXes have a small amount of RAM. For + later model 3100 series between 8 and 16 Mb is not an + unusual amount of RAM. Earlier systems will typically have + less. We cannot afford to waste this much RAM, so we take the + step of limiting the virtual address spaces to more practical + values. At the time of writing the values were set like this; + + TASK_TXTMAX 6Mb Maximum program size + TASK_MMAPMAX 58Mb Maxumum amount of address space + available for allocation. + TASK_STKMAX 4Mb Maximum stack size + TASK_MAXUPRC 64 Maximum number of processes + + Which allows large programs like gcc to run with some headroom. + + The space taken up by the process page tables with these + values is; + 68 * 1024 * 1024 / 4096 = 17408 P0/1 pages + = 17408 P0/1 ptes + 17408 * 32 = 544 kb S0 space + 544 *1024 / 4096 = 136 S0 pages + = 136 S0 ptes. + 136 * 32 = 4352 bytes of RAM. + for 64 processes, this is = 272 Kb. + + Which is not that much. The S0 page table needs to be allocated + in a block of contiguous physical memory, so we allocate it + in its entirety right at the start of the boot process. + + I suppose it is theoretically possible to shift pages around + and expand the S0 page table, on a running system, but I think + it would be nigh on impossible to backtrace the users of a + given physical page. One could swap out all the pages needed, + but doing that whilst in the middle of modifying the system + page table is prone to error to say the least. That just + leaves the problem of shuffling things around in the S0 + virtual address space to expand the process page tables. + + However, all the systems I know of on the VAX fix the process + virtual address space in this way, or similarly, taking the + lead from VMS. + + The actual pages allocated to hold the process page tables + are done on demand, so only as much physical memory as is + actually needed to hold the process PTEs is used. The PMD + keeps track of which pages in the process page table are allocated + (Because our PGD holds the base and length registers, amongst + other things). + + Room for Improvement + -------------------- + + We waste space with the pgd. + + We can use the TASK_xxxx macros to set default values. New values + can be supplied as a kernel command line argument, so that we only + need to reboot, not recompile to alter the page table sizes. + + We can condense the pmd down into a smaller number of pages, + but this requires smarter pmd_xxx routines to emulate the missing + bits of the process pmds, when linux scans the pmds. + + We need to eliminate the PGD_SPECIAL botch. + + PGD/PMD/PTE. + ------------ + + In Linux, the pgd is the highest level division of virtual address + space. For the VAX the mapping is clear, A process has 4 main + sections in the 32 bit address space. P0, P1, S0 and S1, each of which + is 1024Mb in size. + + P0 0x00000000 - 0x3fffffff "Process space" + P1 0x40000000 - 0x7fffffff "Process stack space" + S0 0x80000000 - 0xbfffffff "System Space" + S1 0xc0000000 - 0xffffffff "Unreachable/Reserved" + + Each one of these has a pgd entry in a page table. Each pgd_t is + a structure defined in include/asm-vax/mm/pagelet.h, which includes + the base and length registers for that segment. + + Each page is 4096 bytes in size. Each pte is 32 bytes in size. + So each page allocated to a page table holds + 4096/32 = 128 ptes. + + Each page of ptes in a page table therefore maps; + 128*4096 = 512 kb of address space. + + So, in order to map the whole of one segment (one pgd_t) we need + 1024*1024/512 = 2048 pages of ptes in the page table. + + To keep track of which pages are allocated, we need to keep a + PMD. Each pmd_t is a longword (4 bytes) so we need + 2048 * 4 / 4096 = 2 pages per PMD. + + These are located at the start of the task slot. + + -- atp Sept. 2001. + + +KPH 20000416 + +We need to decide on what the overall memory map in S0 space will look like. + +Here's what I think: + + Start Length + + 80000000 1MB Spare space left over from kernel load + time. Will be put on the kernel's free + list. + + 80100000 kern_size Kernel code, data and bss sections + + pg0 spt_size System page table. Length will be dependent + on physical memory size plus some extra space + for mapping I/O pages + + mem_map memsize*40 The mem_map array contains one entry for each + physical page of ram. + + remainder Remaining pages are put on free list + + + +====================================================================== + +KPH 20000107 (2.2.10-991101-kh5) + +After a little discussion with Andy, it looks like we'll create +a full-size system page table (SPT) in the asm code in head.S. +This SPT needs to have one entry for each physical page of memory +and additional entries to do any I/O space and ROM mapping required. +This page table needs to be physically contiguous. + +We also need to define a region for the interrupt stack. 4KB should +be plenty. (Might be a good idea to put canary values at the bottom +and check them periodically.) + +We need an SCB (system control block, contains the interrupt and +exception dispatch vectors). + + +====================================================================== + +KPH 19991118 (2.2.10-991101-kh2) + +Here's what happens with memory management during boot time: + +o VMB locates a region of good memory and leaves a little space + for a small stack. + + On my VAXstation 3500, this is always physical address 0x00005000 + The initial SP is 0x00005200, leaving 1 page for a stack. (If your + memory has no faults, then you could grow the stack below 0x00005000, + but VMB makes no guarantees about those pages.) + +o VMB loads the kernel image via MOP. + + On my machine, this is always 00005800 + +o VMB calls the entry point (512 bytes into the image - that's + why there is a page of zeroes tagged onto the front of the MOP image) + + Again, on my machine, that means that 'start' in head.S gets + called at 00005A00. + +o head.S then copies the whole loaded image up to 00100000 (1 MB). + Once VM is enabled, virtual address 80100000 will be mapped to + this physical address. The kernel image is linked with a base + address of 80100000 (see arch/vax/vmlinux.lds). + +o The BSS section is filled with zeroes. + +o At this point, head.S jumps from somewhere near 00005A00 to + the corresponding point above 00100000 (that's the jump to + 'reloc' in head.S). Note that SP is still down at 00005200. + +o A system page table is built at physical address 00200000 (2MB). + 16384 (0x4000) page table entries (PTEs) are created. Each is + marked as valid and protection is set to user write. The page + frame numbers (PFNs) in these PTEs are set to map the lower 8MB + of physical memory. The System Base Register (SBR) and System + Length Register (SLR) are loaded with 00200000 and 4000 to point + to this page table. + + Once VM is turned on, the addresses 80000000 to 807fffff will + map to the first 8MB of physical memory. But, we haven't turned + on VM yet... + +o To enable VM and start running the kernel code in S0-space above + 80100000, we need to do two things: + + 1. Set the MAPEN processor register to 1 + 2. Jump to an address in (the now valid) S0 space. + + However, immediately after we've set MAPEN, the PC still contains + an address somewhere above 00100000. The CPU now interprets this + as a virtual address in P0-space. We have to arrange for this + address to be valid, otherwise we'll crash and burn... + + To make this address valid, we need to make a P0 page table + that will be active when MAPEN is set. First we work out + how many pages from the start of memory to the _VAX_start_mm + code (i.e. _VAX_start_mm's page frame number, or PFN). We + have a small, 8-page P0 page table that we fill with this PFN + (and the 7 following PFNs). + + Then we load the P0 Base Register with a value that points to + the correct distance _before_ our little P0 page table such that + the first entry in the table maps _VAX_start_mm. For example: + + o _VAX_start_mm gets loaded at 00005C00 + + o head.S relocates it to 00100200, which is PFN 801 + + o Assume p0_table is at 00100280. This will be mapped + by virtual address 80100280 once MAPEN is set. + + o We fill our little P0 page table to map PFNs 801 to 808 + + o We set P0BR to 80100280 - (801*4). The *4 is because a + PTE is 4 bytes. P0LR is set to 809. + + Note that we're counting on the fact that nothing is going + to refer to any address between 00000000 and 001001ff. If + something does refer to an address in this range, we're in + trouble because the PTEs for these addresses are not + initialized correctly. + +o We load P1BR and P1LR with 'sensible' values to prevent the CPU + from freaking out. + +o Next we have to fix up the addresses on the stack. Note that SP + still points to somewhere below 00005200 (on my machine, anyway...). + _VAX_start_mm is called via a CALLS from head.S, so there is + exactly one full stack frame that needs fixing up: + + o The saved AP, FP and PC are incremented by 0x80000000 to + point to the corresponding addresses in S0 space once VM + gets turned on (remember that physical addresses 00000000 + to 007fffff will be mapped by 80000000 to 807fffff). + + o The current SP and FP are incremented by 80000000. + +o R6 is loaded with the physical address of 'vreloc' in mmstart.S + and incremented by 80000000 to give vreloc's soon-to-be-valid + virtual address. + +o MAPEN is set to 1. + + At mentioned above, PC still contains a virtual address that is + something above 00100200, but our fake P0 page table maps that + to the same physical address. + +o We jump to vreloc's virtual address held in R6. + +o Job is done... return to head.S which then calls start_kernel. + + +Some thoughts on the above: + +1. On older VAXen (780-era), VMB only tries to find 64Kb of good + memory. If this is still true on newer VAXen, then this won't + be enough to hold the full Linux kernel. Instead, what we'll + probably have to do to boot on machines with some bad memory is: + + o VMB loads a small boot loader which creates a system page + table that maps all good memory pages (or maybe maps all + pages and marks bad ones as invalid). + + o This boot loader then enables VM and loads the kernel proper. + + This isn't so nice because pulling the whole kernel across MOP + means we don't have to write boot-time device drivers. + +2. What happens if the kernel image is too big to fit between 00005A00 + and 00100000 (i.e. is 1MB or bigger)? Well, first we'll have to + relocate the kernel by starting the copy at the top and working + down. Secondly, we'll have to make sure that all code that runs + before the jump to 001xxxxx is at the start of the image. (Not + a problem, actually... The linker script will take care of that.) + +3. What about machines with more than 8MB? Or less than 8MB? What's + the best place to pull the memory size and good/bad info from the + RPB? Perhaps in head.S when we're building the system page table? + + diff -Nru a/Documentation/vax/syscall.txt b/Documentation/vax/syscall.txt --- a/Documentation/vax/syscall.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/syscall.txt 2002-05-20 02:33:29 @@ -0,0 +1,75 @@ + +$Id: syscall.txt,v 1.3 2002/05/20 00:33:29 kenn Exp $ + +This file describes how syscalls work on the VAX. + +When userland wants to do a system call, calls a wrapper +function in the standard way (so we get a standard call frame +built on the stack). This wrapper then simply does a CHMK +(change mode to kernel) instruction, specifying the number of +the syscall: + + In file user-app.c: + + fd = creat(filename, mode); + + In libc: + + #define CHMK(x) __asm__("chmk %0" : : "g" (x) : ) + + int creat(const char *filename, mode_t mode) + { + CHMK(__NR_creat); + } + +In the kernel, the exception handler for change-mode-to-kernel +exceptions will get control. At this point, the stack looks like: + + SP: + struct pt_regs * (points to pt_regs further up on stack) + void *excep_info (points to info pushed by hardware + further up on stack ) + ... + struct pt_regs saved_regs + ... + syscall_number (pointed to by excep_info pointer above) + saved PC + saved PSL + +The saved PSL, saved PC and syscall number are pushed by the +hardware when executing the CHMK instruction. The saved_regs +are pushed by the common exception handler code and eventually +we end up calling chmk_handler(): + + void chmk_handler(struct pt_regs *regs, void *excep_info) + { + int syscall = *(int *)excep_info; + ... + +The next step is to collect the arguments from user-space. +We cannot assume that they will be on the user stack since the +app may have called creat() via a CALLG instruction. However, +we do know that the AP (argument pointer register) inside creat() +in libc will point to the argument list. + +So we pull AP out of the pt_regs structure. This will point to +a standard VAX argument list, which starts with the number of +arguments: + + AP: arg_count (should be less than 256, if not return error + because userland is breaking the rules) + AP+4: arg1 + AP+8: arg2 + ... + +Of course, this is all completely untrusted so we have to be +careful to check all user-land accesses. We also need to copy +the complete argument list to kernel space before passing them +to the actual syscall function (which will do final validation). +(Otherwise another user-land task could modify a pointer argument +after we've verified that it points to accessible memory, but +before we actually dereference it.) + +We try to copy the whole argument list to the kernel stack and then +do a CALLS to the actual syscall handler. + diff -Nru a/Documentation/vax/task-memory.txt b/Documentation/vax/task-memory.txt --- a/Documentation/vax/task-memory.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/task-memory.txt 2002-05-20 02:33:29 @@ -0,0 +1,60 @@ +$Id: task-memory.txt,v 1.3 2002/05/20 00:33:29 kenn Exp $ +atp Sept 2001 + + For more details on the memory layout and details of the process + page tables, see the memory.txt file in this directory. + +If you see this message in your system logs, then this file is for you; + +VAXMM: process 81292000 exceeded TASK_WSMAX (64MB) addr 4000000 +VAXMM pte_alloc: sending SIGSEGV to process 81292000 +VM: killing process as +vax-dec-linux-gcc: Internal compiler error: program as got fatal signal 9 + + Due to the constraints of the VAX MMU, we need to decide at compile + time how much virtual address space to allocate to user processes. + The number of processes and the amount of memory is limited by a set + of #defines in the file. + + include/asm-vax/mm/task.h + + This allows us to size the number of tasks and the amount of virtual + address space each one is allowed. + + Those defines are; + + TASK_WSMAX This is the "process address space" in P0. This is normal + memory. If you run out of RAM, then this is the one to + pay attention to. In VMS terms this is like WSMAX. + TASK_WSMAX is the sum of TASK_TXTMAX and TASK_MMAPMAX + + TASK_TXTMAX This is largest program that can be run. The default value + is about 6Mb. (Bear in mind that the program size on disk + may not reflect its size in memory, as it may have lots of + debugging information and other stuff that wont be loaded + as a running program. + + TASK_MMAPMAX This is the memory used for the mmap() system call, and hence + to the malloc library routine. This is the amount of address + space available for allocation by a running program. The + default value is about 58Mb. If you see a warning about + WSMAX being exceeded, whilst running a program, this is + the one to increase. + + TASK_STKMAX The amount of address space in the P1 region. This is the amount + of stack memory allocated to the process. The default value + is 4 Mb. + + TASK_MAXUPRC The maximum number of user processes allowed to run at any + one time. This is like BALSETCNT on VMS. The default value is + 64. + + + TASK_WSMAX = TASK_TXTMAX + TASK_MMAPMAX + + Decide if you want to run bigger programs (increase TXTMAX) or let the +programs have more memory (MMAPMAX), or more programs (MAXUPRC). + + However, don't set the sizes too much larger than you need, as you will +lose more RAM to the system page table (and thats unavailable for user +processes) the bigger these variables are. diff -Nru a/Documentation/vax/userland.txt b/Documentation/vax/userland.txt --- a/Documentation/vax/userland.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/userland.txt 2003-11-09 20:49:30 @@ -0,0 +1,178 @@ + +Obtaining & Building VAX userland binaries +========================================== + +This file provides some information on obtaining pre-built binaries +to run on your VAX and how to build your own. + +Before worrying about all this, make sure you've got the cross-compiler +and kernel built, and your VAX boots the kernel as far as mounting +the root filesystem. See Documentation/vax/README for more details on +these steps. + +For your first userland adventures, you should start with Andy's +vaxroot tarball: + + http://linux-vax.sourceforge.net/download/vaxroot-20010920.tar.bz2 + +Untar this and export it via NFS. Either edit the defintion of +DEFAULT_CMDLINE in arch/vax/Makefile in the kernel source to pass +the right path for the root= option, or use arch/vax/tools/setcmdline +to change the command line in an existing disk or MOP image. + +More info on this root filesystem is at + + http://linux-vax.sourceforge.net/download/README.vaxroot-20010920 + +(Kaj-Micheal Lang is working on a newer root filesystem, using +shared libs: http://merman.tal.org/vax/) + + +Cross-compiling uClibc +====================== + +If you want to cross-compile your own userland binaries, you'll need +to build uClibc: + + o Pull uClibc sources from CVS + + o In the uClibc root dir, do 'make defconfig'. + + o uClibc needs a set of kernel headers to compile against. + However, it requires some headers that are only created + by the kernel build process. It also cannot deal with + the separate output directory provided by kernel 2.6. + The easiest way to prepare this tree is to checkout a + clean 2.6 tree and do: + + make ka650_defconfig + make include/asm + make include/linux/version.h + + o Run 'make config' or 'make menuconfig' and set + KERNEL_SOURCE to the base of the kernel source tree + you prepared in the previous step. + + o Apply this patch to uClibc, since the create_module and + get_kernel_syms syscalls have been removed in 2.6. + +===================== cut here ======================= + +Index: libc/sysdeps/linux/common/Makefile +=================================================================== +RCS file: /cvsroot/linux-vax/uClibc/libc/sysdeps/linux/common/Makefile,v +retrieving revision 1.1.1.4 +diff -u -r1.1.1.4 Makefile +--- libc/sysdeps/linux/common/Makefile 27 Sep 2003 14:33:09 -0000 1.1.1.4 ++++ libc/sysdeps/linux/common/Makefile 30 Oct 2003 00:53:20 -0000 +@@ -21,7 +21,7 @@ + + CSRC= waitpid.c getdnnm.c gethstnm.c getcwd.c ptrace.c \ + mkfifo.c setegid.c wait.c getpagesize.c seteuid.c \ +- wait3.c setpgrp.c getdtablesize.c create_module.c \ ++ wait3.c setpgrp.c getdtablesize.c \ + cmsg_nxthdr.c longjmp.c open64.c ftruncate64.c mmap64.c \ + truncate64.c getrlimit64.c setrlimit64.c creat64.c \ + llseek.c pread_write.c _exit.c sync.c getdirname.c \ +Index: libc/sysdeps/linux/common/syscalls.c +=================================================================== +RCS file: /cvsroot/linux-vax/uClibc/libc/sysdeps/linux/common/syscalls.c,v +retrieving revision 1.1.1.4 +diff -u -r1.1.1.4 syscalls.c +--- libc/sysdeps/linux/common/syscalls.c 27 Sep 2003 14:33:09 -0000 1.1.1.4 ++++ libc/sysdeps/linux/common/syscalls.c 30 Oct 2003 00:53:23 -0000 +@@ -1346,12 +1346,6 @@ + # endif + #endif + +-//#define __NR_get_kernel_syms 130 +-#ifdef L_get_kernel_syms +-struct kernel_sym; +-_syscall1(int, get_kernel_syms, struct kernel_sym *, table); +-#endif +- + //#define __NR_quotactl 131 + #ifdef __NR_quotactl + #ifdef L_quotactl +===================== cut here ======================= + + o You may want to set DEVEL_PREFIX to somewhere in your + home directory, such as ~/linux-vax/vax-uclibc-dev/ + + o Compile uClibc with 'make CROSS=vax-dec-linux-' + + o Install the uClibc development environment with 'make install'. + This will install the uClibc .a files in DEVEL_PREFIX/lib, + along with wrappers for gcc, ld and friends in DEVEL_PREFIX/bin. + These wrappers will be named vax-uclibc-gcc, vax-uclibc-ld, etc. + + You'll use these wrappers when compiling user-land binaries. + + o Test static VAX binaries by compiling a simple hello-world program: + + PATH=$PATH:~/linux-vax/vax-uclibc-dev/bin + vax-uclibc-gcc -static -o hello-static hello.c + + Copy this executable to your VAX filesystem and try running it. + + o Using shared libs requires installing some uClibc files (such as + uClibc's replacements for ld.so and libc.so) into the VAX + filesystem. In the uClibc root dir, do + + make PREFIX=~/linux-vax/vaxroot install_target. + + o Test a shared hello-world program: + + vax-uclibc-gcc -o hello hello.c + + Copy this executable to your VAX filesystem and run it. + + +Building a native compiler +========================== + +It should be possible to cross-compile the compiler itself, to +build a native compiler for use within the VAX filesystem. +(As of 2003-10-30, the native compiler builds and runs, but dies +very often with reserved instruction faults. So there is +still some work to do on it.) + + o First, put your uClibc compiler wrappers on PATH as above. + Then create a new build directory in the toolchain root: + + PATH=$PATH:~/linux-vax/vax-uclibc-dev/bin + cd ~/linux-vax/src/toolchain + mkdir build-native + cd build-native + + o Now configure and compile the toolchain: + + CC=vax-uclibc-gcc ../src/configure --host=vax-dec-linux \ + --build=i386-linux --target=vax-dec-linux \ + --enable-languages=c --prefix=/usr + make LDFLAGS=-lm + + o Install this toolchain into your VAX filesystem: + + make install prefix=~/linux-vax/vaxroot + + +Cross-compiling glibc +===================== + +Glibc isn't working yet, but we're getting closer. It now builds as +far as libc.so: + + cvs linux-vax/cvs/glibc + mkdir b-vax + cd b-vax + ../configure --host=vax-dec-linux --build=i386-pc-linux \ + --with-headers=/home/kenn/linux-vax/kbuild/kernel-2.6-libc-vax/include \ + --enable-add-ons --disable-profile --without-gd --without-fp + make lib + +If you just do 'make' with no targets, it will do 'make lib others', but the +'others' target doesn't build yet. + +$Id: userland.txt,v 1.3 2003/11/09 19:49:30 kenn Exp $ + diff -Nru a/Documentation/vax/xdelta.txt b/Documentation/vax/xdelta.txt --- a/Documentation/vax/xdelta.txt 1970-01-01 01:00:00 +++ b/Documentation/vax/xdelta.txt 2002-05-20 02:33:29 @@ -0,0 +1,106 @@ +KPH - 20000206 + +Here's how to use XDELTA as a kernel debugger on a VS3500 with VMB 5.3. + +1. Edit arch/vax/boot/head.S and add a HALT instruction somewhere near + the beginning and recompile. (Replacing one of the initial 4 NOPs might be useful, + since that won't change the layout of the linked image.) + + Insert a BPT instruction where you want a breakpoint + +2. Boot with a boot parameter of 20 (hex). This tells VMB to load + XDELTA and trigger a breakpoint. + + What this actually does is make the initial SCB vectors for machine + check, reserved operand, access violation, page fault, trace and + breakpoint point into code in XDELTA. (XDELTA is copied to RAM along + with the rest of VMB when you enter the BOOT command.) + +3. Before trying to locate a boot device and load an image, VMB will + stop at a breakpoint: + + >>>e\e\b/20 + (BOOT/R5:20 XQA0) + + + 1 brk at 000004EB + +4. Type '4000/'. This will 'open' the address 4000 (which is the + base of VMB's SCB). Hit Ctrl-J repeatedly to examine up as far + as address 402C: + + 1 brk at 000004EB 4000/00000D0D + + 00004004/0000313C + + 00004008/00000D0D + + 0000400C/00000D0D + + 00004010/00000D0D + + 00004014/00000D0D + + 00004018/0000313C + + 0000401C/00000D0D + + 00004020/0000313C + + 00004024/0000313C + + 00004028/00003251 + + 0000402C/000031F1 + +5. Note the values at addresses 4004 (machine check), 4018 (res opr), + 4020 (accvio), 4024 (page fault), 4028 (trace) and 402C (bpt). + Unfortunately, VMB clobbers these before passing control to the + loaded kernel image. + +6. Type ';P' and hit RETURN to continue from the breakpoint. VMB will + load the kernel as normal, transfer control to it and hit the HALT + you inserted in step 1. + +7. Now, use the console's DEPOSIT command to set the SCB vectors recorded + above to point into XDELTA again and CONTINUE: + + >>> D/P 4004 313C + >>> D 4018 313C + >>> D 4020 313C + >>> D 4024 313C + >>> D 4028 3251 + >>> D 402C 31F1 + >>> C + +8. When your BPT instruction is reached, XDELTA will gain control again. + + +Some additional notes: + +o There seems to be a problem with the above method. The S command + (single step) causes the machine to lock up when used after step 8. + Front-panel halt switch, or a BREAK from the console is required to + restore life. + +o You can find the manual for XDELTA at the Compaq OpenVMS web site: + + http://www.openvms.digital.com:8000/72final/4540/4540pro.html + + Warning! It's pretty primitive... + +o The version of XDELTA in VMB 5.3 doesn't include 'instruction' mode, + so you can't disassemble instructions. Since there is an + EXAMINE/INSTRUCTION console command, there is code somewhere in the + ROM to decode instructions. It shouldn't be too difficult to hack + XDELTA to use this code. The XDELTA code itself is very simple. + +o The ROM-based XDELTA won't work once VM has been enabled. This + limits its usefulness at present. However, the XDELTA code looks + to be 100% position-independent, so the kernel should be able to + copy it (either from ROM or from RAM) at boot, and hook the + relevant SCB vectors up to it. (The kernel could also patch + XDELTA to add support for instruction mode at this point.) + + + diff -Nru a/README.cvs-tags b/README.cvs-tags --- a/README.cvs-tags 1970-01-01 01:00:00 +++ b/README.cvs-tags 2003-01-24 01:46:48 @@ -0,0 +1,55 @@ + +Module kernel-2.5 + +Branch: MAIN Mainline development + + This branch holds the Linux/VAX development work. Various tags + will be added on this branch as interesting things happen (like + a particular feature being implemented, or when a binary release + is made). + + Tags on this branch: + + pre-2_5_x-import These tags get applied just before importing + a new Linus tree. This way, if the import + screws up, or leaves a broken kernel, it's + easy to revert to a working kernel. + + post-2_5_x-import These tags get applied just after a new Linus + tree is imported, and his changes get merged + successfully with ours (at least as far as + getting a clean compile, if possible). + + + kh-20020527 Initial working build on 2.5 tree. Boots + fine using NFS root on my VS4000/60. (Nothing + else is guaranteed). + + kh-20020929 Mostly-complete driver for DELQA ethernet card. + First-cut driver for Q-bus adapter in KA650 CPU. + Partially working serial driver for any console + port driven by RXCS, RXDB, TXCS and TXDB internal + processor registers. Boots to user mode on a KA42 + with NFS root and no SCSI (NCR5380 driver is broken). + +Branch: linux-2_5 Vendor branch for official Linus kernels + + This branch is a "vendor" branch in CVS terminology. We use it to hold + pristine "Linus" kernel trees. The only tags on this branch will be the + ones for each Linus tree. + + Tags on this branch: + + linux-2_5-vax Erroneous initial import (contained VAX code, but missing + net/core directory). You shouldn't need to use this tag. + I may rename or remove it, in fact... + + linus_2_5_0 Official 2.5.0 kernel tree as released by Linus + + linus_2_5_1 Official 2.5.1 kernel tree as released by Linus + + ... ... + + linus_2_5_16 Official 2.5.16 kernel tree as released by Linus + + diff -Nru a/arch/vax/Kconfig b/arch/vax/Kconfig --- a/arch/vax/Kconfig 1970-01-01 01:00:00 +++ b/arch/vax/Kconfig 2005-10-18 07:44:44 @@ -0,0 +1,298 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# + +mainmenu "Linux Kernel Configuration" + +config VAX + bool + default y + help + Select this option, if you want to run the kernel on + one of Digital's VAX line of computers. You will need + a VAX compiler (see http://www.linux-vax.org). + +config MMU + bool + default y + +config UID16 + bool + default y + +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config RWSEM_XCHGADD_ALGORITHM + bool + default n + +config ELF_KERNEL + bool + default y + +config KCORE_ELF + bool + default y + +# Force these to zero to avoid warnings in drivers/video/Kconfig (rather +# than pull in I2C config menus) +config I2C + bool + default n + +config I2C_ALGOBIT + bool + default n +# End I2C placeholders + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config PREEMPT + bool "Preemptible kernel (currently broken)" + default n + +config CMDLINE + string "Kernel command line" + default "root=/dev/nfs ip=bootp rw debug" + help + This is the command line the booting kernel will get. + +config EARLY_PRINTK + bool + default y + +menu "VAX CPU types" + +config CPU_KA630 + bool "Support for KA630 (MicroVAX II)" + default n + +config CPU_KA640 + bool "Support for KA640" + default n + +config CPU_KA650 + bool "Support for KA650/KA655" + default n + +config CPU_KA660 + bool "Support for KA660" + default n + +config CPU_KA410 + bool "Support for KA410" + default n + +config CPU_KA41 + bool "Support for KA41 (MicroVAX 3100 Model 10)" + default n + +config CPU_KA42 + bool "Support for KA42 (VAXstation 3100 Model 38)" + default n + +config CPU_KA43 + bool "Support for KA43 (VAXstation 3100 Model 76)" + default n + +config CPU_KA46 + bool "Support for KA46 (VAXstation 4000 Model 60)" + default n + +config CPU_KA48 + bool "Support for KA48 (VAXstation 4000 VLC)" + default n + +config CPU_KA52 + bool "Support for KA52 (VAXstation 4000 Model 100A)" + select CONS_PREVM_KA52 + default n + +config CPU_KA55 + bool "Support for KA55" + default n + +config CPU_KA49 + bool "Support for KA49 (VAXstation 4000/90)" + default n + +config CPU_KA62 + bool "Support for KA62 (VAXstation 6000/3x0)" + select SERIAL_IPR + default n + +config CPU_VXT + bool "Support for VXT" + default n + +config CONS_PREVM_KA52 + bool + default n +endmenu + +source "init/Kconfig" + +# It's needed for FB console, but if we compile in VT support, +# it currently Oopses. - 20031010 jbglaw +# +source "drivers/input/Kconfig" +# +source "drivers/char/Kconfig" +# +source "drivers/video/Kconfig" + +source "fs/Kconfig.binfmt" + +menu "VAX Bus support" + +config QBUS + bool "Support for Q-bus" + default n + +config VSBUS + bool "Support for VAXstation bus" + default n + +endmenu + +source "drivers/block/Kconfig" + +source "net/Kconfig" + +if NETDEVICES +menu "VAX Network device support" + +config VAX_LANCE + bool "LANCE ethernet controller support" + select VSBUS + default n + +config VAX_SGEC + bool "SGEC ethernet controller support (EXPERIMENTAL)" + select VSBUS + select VAX_LANCE + default n + +config DELQA + bool "DELQA/DEQNA Q-bus ethernet controller support" + select QBUS + default n + +endmenu +endif + + +source "drivers/scsi/Kconfig" + +if SCSI + +comment "VAX SCSI low-level drivers" + +config SCSI_VAX_5380 + bool "NCR53C80 Scsi Driver (used in VAXstation/MicroVAX 3100 family)" + default n + +config SCSI_VAX_53C94 + bool "NCR53C94 Scsi Driver (used in VAXstation 4000 family) (NOT WORKING)" + default n + +endif + +source "fs/Kconfig" + +source "drivers/base/Kconfig" + +menu "VAX character devices" + +config SERIAL + bool "Serial port support" + default y + +if SERIAL + +config DZ + bool "DZ11 Serial Support" + default n + +config SERIAL_IPR + bool "CPU register-based Serial Console Support" + default y + select SERIAL_CORE + +config SERIAL_CONSOLE + bool "Support for console on serial port" + default y + select SERIAL_CORE_CONSOLE + +endif + +endmenu + + +menu "Kernel hacking" + +config DEBUG_KERNEL + bool "Kernel debugging" + default y + +if DEBUG_KERNEL +config DEBUG_SLAB + bool "SLAB allocator debugging" + default y + +config DEBUG_SPINLOCK + bool "Spinlock debugging" + default y + +config DEBUG_BUGVERBOSE + bool "Verbose BUG reporting" + default y + +config VAX_DIAG_LED + tristate "Support for accessing the diagnostic LEDs" + default n + depends VAX + help + This driver is a hack and will probably go away soon. It's + main purpose is to aid me towards rewriting the dz11 driver + to be more modular... + +# +# The next two options do basically have only one user. After his +# userland is fixed, they can go away (cf. ./arch/vax/kernel/syscall.c). +# +config DEBUG_VAX_CHECK_CHMx_ARGS + bool "Check number of syscall arguments" + default y + depends VAX + help + This option allows you to switch off checking the number of + supplied arguments to syscalls. That is, you'd call a syscall + with less atguments than needed unrecognized if you switch off + this option. Only a moron would do that, or somebody who's + on the way to fix his ABI code... + +config DEBUG_VAX_CHECK_CHMx_ARGS_ABORT + bool "Abort syscall with wrong number of arguments" + default y + depends DEBUG_VAX_CHECK_CHMx_ARGS + help + With this option enabled, you'll not only get a warning, but + the whole system call will be aborted before it can cause + any harm. That's a good think, so keep it enabled! +endif + +endmenu + +source "security/Kconfig" +source "crypto/Kconfig" +source "drivers/media/Kconfig" +source "sound/Kconfig" +source "lib/Kconfig" +source "lib/Kconfig.debug" + diff -Nru a/arch/vax/Makefile b/arch/vax/Makefile --- a/arch/vax/Makefile 1970-01-01 01:00:00 +++ b/arch/vax/Makefile 2005-10-14 00:52:00 @@ -0,0 +1,122 @@ +# +# vax/Makefile +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. Remember to do have actions +# for "archclean" and for cleaning up this architecture +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# + +CROSS_COMPILE := vax-dec-linux- + +LDFLAGS_BLOB := --format binary --oformat elf32-vax + + +# Note that this CFLAGS definition will override the definition +# in the top-level Makefile. We want -O1, not -O2. This is probably +# only needed for our old gcc-2.95, but no longer for gcc-4.1. +CFLAGS += -pipe +CFLAGS += $(call cc-option,-fno-unit-at-a-time) +CFLAGS := $(subst -O2,-O1,$(CFLAGS)) + +# These flags are used by the top-level makefile when linking +# the kernel +LDFLAGS_vmlinux=-N -nostartfiles -nostdlib -warn-once \ + -Map vmlinux.map -cref + +# Tell the top-level makefile about the addition arch-specific +# stuff that's needed +head-y := arch/vax/boot/head.o + +init-y += arch/vax/boot/ +core-y += arch/vax/kernel/ arch/vax/mm/ +libs-y += arch/vax/lib/ + +TOOLSDIR := arch/vax/tools +MKBOOTBLK := $(obj)/$(TOOLSDIR)/mkbootblk +SETCMDLINE := $(obj)/$(TOOLSDIR)/setcmdline +SHOWCMDLINE := $(obj)/$(TOOLSDIR)/showcmdline +DEFAULT_CMDLINE := "$(CONFIG_CMDLINE)" + +# Default target if none specified is to make both MOP and disk images +all: mopboot diskboot + +# This will make a MOP-bootable image +mopboot: vmlinux.SYS + +.PHONY: TOOLS +TOOLS: + $(Q)$(MAKE) $(build)=$(TOOLSDIR) + +# +# This will make a disk-bootable image. dd this directly to a +# disk and then tell your VAX to boot from this disk. +# +# NOTE: +# If you want to keep a partition table on the disk, and use +# a boot loader to load the kernel from a file system, then +# use the vmlinux.SYS and something like asbl. Don't forget to +# set the command line to something useful like "root=/dev/sda1" +# +diskboot: vmlinux.dsk + +OBJCOPYFLAGS := -O binary -R .note -R .comment -S + +quiet_cmd_vmlinux.bin = OBJCOPY $@ +cmd_vmlinux.bin = $(OBJCOPY) $(OBJCOPYFLAGS) $< $@ + +vmlinux.bin: vmlinux + $(call cmd,vmlinux.bin) + +quiet_cmd_setcmd = SETCMD $@ "$(DEFAULT_CMDLINE)" +cmd_setcmd = $(SETCMDLINE) $@ "$(DEFAULT_CMDLINE)" + +quiet_cmd_mksys = MKSYS $@ +cmd_mksys = dd if=/dev/zero bs=512 count=1 of=$@ 2>/dev/null && cat $< >> $@ + +vmlinux.SYS: vmlinux.bin TOOLS + $(call cmd,mksys) + $(call cmd,setcmd) + +quiet_cmd_mkdsk = MKDSK $@ +cmd_mkdsk = $(MKBOOTBLK) $< > $@ && cat $< >> $@ + +vmlinux.dsk: vmlinux.bin TOOLS + $(call cmd,mkdsk) + $(call cmd,setcmd) + +# This is helpful for low level debuggery +listfile: + rm -f vmlinux.lst + $(OBJDUMP) -D vmlinux > vmlinux.lst + +CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h + +prepare: include/asm-$(ARCH)/asm_offsets.h +arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ + include/config/MARKER +include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s + $(call filechk,gen-asm-offsets) + +archclean: + @echo 'Cleaning up (arch/vax)' + @rm -f vmlinux.* vmlinux + @cd $(TOPDIR)/arch/vax ; rm -f *.out TEST.BIN TEST.SYS + @cd $(TOOLSDIR) ; rm -f setcmdline showcmdline mkbootblk + +archmrproper: + +define archhelp + echo '* mopboot - MOP-bootable kernel image (vmlinux.SYS)' + echo '* diskboot - HDD bootable kernel image (vmlinux.dsk)' + echo '' + echo 'Notice: module support is not complete right now' +endef + +clean: archclean + diff -Nru a/arch/vax/boot/Makefile b/arch/vax/boot/Makefile --- a/arch/vax/boot/Makefile 1970-01-01 01:00:00 +++ b/arch/vax/boot/Makefile 2004-07-24 17:19:26 @@ -0,0 +1,11 @@ +# +# Makefile for the boot-time code for the Linux/VAX kernel +# +# Note! Dependencies are done automagically +# DON'T put your own dependencies here +# unless it's something special (ie not a .c file). + +extra-y := head.o + +obj-y := lib.o hexdump.o mmstart.o cpu_sel.o startup.o + diff -Nru a/arch/vax/boot/boot_sections.h b/arch/vax/boot/boot_sections.h --- a/arch/vax/boot/boot_sections.h 1970-01-01 01:00:00 +++ b/arch/vax/boot/boot_sections.h 2004-09-28 10:48:52 @@ -0,0 +1,8 @@ + +#define __boot __attribute__ ((__section__ (".boot.text"))) +#define __boottdata __attribute__ ((__section__ (".boot.data"))) + +/* For assembly routines */ +#define __BOOT .section ".boot.text","ax" +#define __BOOTDATA .section ".boot.data","aw" + diff -Nru a/arch/vax/boot/cpu_sel.c b/arch/vax/boot/cpu_sel.c --- a/arch/vax/boot/cpu_sel.c 1970-01-01 01:00:00 +++ b/arch/vax/boot/cpu_sel.c 2005-05-15 16:32:36 @@ -0,0 +1,70 @@ +/* + * Boot-time CPU identification - requires virtual memory to be + * turned off (MAPEN=0). + */ + +#include /* Processor register definitions */ +#include /* machine vector definitions */ +#include /* for PAGE_OFFSET and KERNEL_START_PHYS */ +#include /* for HALT */ + +/* + * Given a virtual address in the final kernel image (i.e. an S0 + * address like 0x80123456, convert it to the corresponding address + * in the loaded kernel before we relocate (which depends on the + * exact load address) + */ +static void * +s0vmaddr_to_load_addr(void *vaddr, unsigned int kernel_load_addr) +{ + return (char *) vaddr - PAGE_OFFSET - KERNEL_START_PHYS + kernel_load_addr; +} + +struct vax_mv * +idcpu (unsigned int kernel_load_addr) +{ + extern struct cpu_match __init_cpumatch_start, __init_cpumatch_end; + struct cpu_match *match = &__init_cpumatch_start; + unsigned long sid; + unsigned long sidex; + unsigned int i; + unsigned int num_matches; + struct vax_mv *retmv; + + sid = __mfpr (PR_SID); + num_matches = &__init_cpumatch_end - &__init_cpumatch_start; + + for (i = 0; i < num_matches; i++) { + if ((sid & match[i].sid_mask) == match[i].sid_match) { + /* + * No sidex known? Accept the vector. + * FIXME: Maybe sort the metch structs to have + * those with "long" masks first, then the loose + * entries with weaker/shorter masks + */ + if (!match[i].sidex_addr) + return s0vmaddr_to_load_addr(match[i].mv, kernel_load_addr); + + /* + * If a SIDEX match was supplied, too, check it! + */ + sidex = * ((unsigned long *) match[i].sidex_addr); + if ((sidex & match[i].sidex_mask) == match[i].sidex_match) { + retmv = s0vmaddr_to_load_addr(match[i].mv, kernel_load_addr); + retmv->sidex = sidex; + return retmv; + } + } + } + + /* + * No matching vector found, so you're on your own to get a SID + * and SIDEX value and add it to one of the existing vectors (if + * that works for you) or create an own vector for your machine. + */ + HALT; + + /* Not reached */ + return NULL; +} + diff -Nru a/arch/vax/boot/head.S b/arch/vax/boot/head.S --- a/arch/vax/boot/head.S 1970-01-01 01:00:00 +++ b/arch/vax/boot/head.S 2005-05-24 23:43:30 @@ -0,0 +1,365 @@ +# Copyright atp Nov 1998. +# Changes for aligning IOMAP/VMALLOC - Copyright airlied@linux.ie - June 2001 +# start of boot. entry point +# this assumes vmb has does most of the hard work (ie uvax rom vmb) +# save useful registers. jump to c in boot.c +# +# TBD: Some of this stuff could do with being rewritten in C +# Some of this stuff could be in .init sections and thrown away. +# + +#include /* For __INITDATA */ +#include /* Processor register definitions */ +#include /* machine vector definitions */ +#include /* PTE definitions */ +#include /* interrupt stack definitions */ + +#include "boot_sections.h" + +__BOOT + +.globl _stext +_stext: +.globl start +start: + jmp codestart # Word displacement. + + +.globl bootparam +bootparam: + .fill 256,1,0 # The boot parameter block. Presently just the + # command line. + + +codestart: + # disable memory mapping + mtpr $0, $PR_MAPEN + mtpr $31, $PR_IPL + + # save r11, ap and scbb and location of command line + movl %ap, boot_ap + movl %r11, boot_r11 + mfpr $PR_SCBB, boot_scb + movab bootparam, %r5 + addl2 $PAGE_OFFSET,%r5 # we will only access this when MAPEN=1 + movl %r5,kernel_cmd_line + + # Put the sp somewhere safe, over our bootblock in fact + moval start, %r5 + subl2 $0x200, %r5 + movl %r5,%sp + +# Debug code: +# movzbl $0x42, %r2 +# jsb 0x20040058 + + pushal start + calls $1, idcpu # Identify this CPU and... + movl %r0, mv # ...put the returned mv ptr into mv. + + # Now fix up the machine vector entries. (They currently contain + # pointers to virtual addresses in S0 space. We need to change + # the pointers to the functions we use before VM init to point + # into the newly-loaded kernel image.) + movl mv, %r10 + moval start, %r8 + + subl2 $PAGE_OFFSET+KERNEL_START_PHYS, %r8 + addl2 %r8, MV_PRE_VM_PUTCHAR(%r10) + addl2 %r8, MV_PRE_VM_GETCHAR(%r10) + addl2 %r8, MV_CPU_TYPE_STR(%r10) + + calls $0, boot_crlf + calls $0, boot_crlf + + # print the cpu type + calls $0, boot_print_cpu_id + + # print first line of debug diagnostics + pushab msg_loaded # ascii string + calls $1, boot_printstr + pushal start # where we were loaded + calls $1, boot_printint + calls $0, boot_crlf + pushab msg_registers # ascii string + calls $1, boot_printstr + calls $0, boot_printspace + movl boot_r11, %r11 + pushl %r11 # r11 (holds the rpb base address, usually 0x0) + calls $1, boot_printint + calls $0, boot_printspace + # FIXME: magic offset -> asmoffsets.h + pushl 48(%r11) # saved r5 in RPB (argument to BOOT command) + calls $1, boot_printint + calls $0, boot_printspace + pushl %ap # argument pointer (struct arglist) + calls $1, boot_printint + calls $0, boot_printspace + pushl %sp # stack pointer + calls $1, boot_printint + calls $0, boot_crlf + + # Save off the current machine vector address in boot_mv, because it + # lies in the .bss section and it will get clobbered real soon... + # - atp. in fact it gets clobbered real quick, if your kernel is + # larger than about 950k, as the relocation code clobbers it, along + # with every thing else poking its head above $KERNEL_START_PHYS, + # like the entire .bss section. + movl mv, boot_mv + + # copy the loaded image higher up in physical RAM + + movl $__bss_start, %r6 + subl2 $start, %r6 # byte count to r6 + pushl %r6 + pushab start # source address + pushl $KERNEL_START_PHYS # dest address + calls $3, boot_memmove + + # Next we have to fill the .bss section with zeros. We do it now + # instead of when we are preparing the loadable image because it + # makes the image smaller. + subl3 $__bss_start, $_end, %r6 # length of .bss to r6 + + subl3 $start, $__bss_start, %r7 # offset of .bss to r7 + addl2 $KERNEL_START_PHYS, %r7 # phys address of .bss start now in r7 + + pushl %r6 + pushl %r7 + calls $2, boot_memzero + addl2 %r6, %r7 + + decl %r7 + movl %r7, %r9 # save phys addr of last byte of kernel + # in R9. We will need this later. + + # Need to know the distance we have moved the kernel, so that we can + # fix up the machine vector pointer after we jump + moval start, %r2 + subl3 %r2, $KERNEL_START_PHYS, %r2 # r2 = START_PHYS - load_address + + # Calculate the position after jump to reloc + movl $KERNEL_START_PHYS, %r8 + addl2 $reloc, %r8 + subl2 $start, %r8 + jmp (%r8) + halt + +reloc: + # Fix up the machine vector pointer (by restoring it from boot_mv + # and adding in the distance that the kernel was re-located). + addl3 %r2, boot_mv, mv + movl mv, %r10 + + # ...as well as all pre-VM mv function pointers + addl2 %r2, MV_PRE_VM_PUTCHAR(%r10) + addl2 %r2, MV_PRE_VM_GETCHAR(%r10) + addl2 %r2, MV_CPU_TYPE_STR(%r10) + + # Print 'relocated at phys addr xxxxx' + pushab msg_relocated + calls $1, boot_printstr + pushal reloc + calls $1, boot_printint + calls $0, boot_crlf + calls $0, boot_crlf + + # Save RPB before it gets obliterated + movl boot_r11, %r11 + movc3 $RPB_SIZE, (%r11), boot_rpb + + # Execute mv->pre_vm_init() if it is needed + movl mv, %r10 + tstl MV_PRE_VM_INIT(%r10) + beql no_pre_vm_init + subl2 $PAGE_OFFSET, MV_PRE_VM_INIT(%r10) + calls $0, *MV_PRE_VM_INIT(%r10) +no_pre_vm_init: + + # set up the system page table for all of physical memory. + # for the i386, the first page only is setup. For us, as the + # system memory map is contiguous anyway, we might as well setup + # all of physical ram at once, which will be useful for later bits + # of system start up. We will expand the page table later when we + # set up the virtual memory map properly, but for now, we can use + # this to access all of RAM via the kernel _page_offset mapping. + # + # PAGE_OFFSET here is 0x80000000 - the start of system space. + # + # swapper_pg_dir is actually a pgd_t. The spt is the third entry. + # see include/asm-vax/mm/pagelet.h for details of the pgd_t structure. + # If you change the SPT, change the offsets in asm/pgtable.h. + # + # First find a suitable start position for the SPT. This must be + # longword aligned. + # + # Correction, for 3100/85 it needs to be page aligned. + addl3 $0x200, %r9, %r5 # R9 holds kernel end + bicl2 $0x1ff, %r5 # R5 is R9 rounded up to page aligned + moval swapper_pg_dir, %r0 + # This is (pgd_t)swapper_pg_dir[2].br + movl %r5, ASM_SBR_OFFSET(%r0) # save address of base of system page table + + # Fill in the main part of the SPT (the entries that map physical + # memory) + movl $0, %r6 # pfn number + movl RPB_PFNCNT_OFFSET(%r11), %r7 # pfncnt from rpb. +sysfill: + bisl3 $_PAGE_VALID + _PAGE_UW, %r6, (%r5)+ + # set PFN, VALID bit and protection UW in PTE + incl %r6 # next PFN + cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTES + blssu sysfill + + # We need to align the IOMAP/VMALLOC tables (well at least the VMALLOC + # tables, but no harm.. we will waste the space here or later) on a Linux + # PAGE boundary (4K) + # we need to check is r7 4k-aligned if not we need to fill zero until it + # is, so round up r7 to the next page, add 7 then and with 7, + # check with r6 if the same we are aligned if not put zeros into the + # PTE until we are aligned. - D.A. June 2001 (this stuff is bitchin..) + addl2 $0x7, %r7 + bicl2 $7, %r7 + cmpl %r6, %r7 + beql nozerofl +zerofl: + movl $0x00000000, (%r5)+ + incl %r6 # next PFN + cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTES + blssu zerofl +nozerofl: + # Zero out the spare part of the SPT (the entries that will be used + # to map I/O space and provide virtual addrs for vmalloc() later) + movl %r5, iomap_base + addl2 $SPT_HWPTES_IOMAP+0, %r7 +sparef1: + movl $0x00000000, (%r5)+ + incl %r6 # next PFN + cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTES + blssu sparef1 + + movl %r5, vmallocmap_base + addl2 $SPT_HWPTES_VMALLOC, %r7 +sparefill2: + movl $0x00000000, (%r5)+ + incl %r6 # next PFN + cmpl %r6, %r7 # one page of PTE Table -> 128 Pages of PTES + blssu sparefill2 + + # System page table is setup. Save SPT length and zap processor registers + moval swapper_pg_dir, %r0 + movl %r7, ASM_SLR_OFFSET(%r0) + mtpr ASM_SBR_OFFSET(%r0), $PR_SBR # set SBR + mtpr %r7, $PR_SLR # set SLR + + # PCBB + # Set up the process control block. Some machines need a valid PCB for + # MM to work properly. + # We should use the PCB for the init task for this, but since this bit + # should be done in C, rather than hardwiring offsets, I have put a fake + # PCB in a throwaway .init section below. + moval fake_pcb, %r9 + movl $PAGE_OFFSET, 88(%r9) # p1br FIXME: magic offset + mtpr %r9, $PR_PCBB + + # No need to TBIA - memory mapping not enabled + # ready to turn on VM + pushab msg_starting_vm + calls $1, boot_printstr + calls $0, boot_crlf + calls $0, VAX_start_mm # do that ole black magic + # made it + + moval swapper_pg_dir, %r0 + addl2 $PAGE_OFFSET, ASM_SBR_OFFSET(%r0) # fix up our reference to the system page tbl. + addl2 $PAGE_OFFSET, iomap_base # ... and the IOMAP PTEs + addl2 $PAGE_OFFSET, vmallocmap_base # ... and the IOMAP PTEs + addl2 $PAGE_OFFSET, mv # fix up machine vector pointer + movl mv, %r10 + addl2 $PAGE_OFFSET, MV_CPU_TYPE_STR(%r10) + + # relocate the interrupt stack. The C code will turn the + # last page of the interrupt stack into a read-only guard + # page. + + # FIXME SMP: This needs to select the right stack for this CPU + # rather than hard-coding the first one. Looks like we need to + # determine our CPU_ID before this point... + moval interrupt_stack, %r0 + addl2 $INT_STACK_SIZE, %r0 + movl %r0, %sp + + # Now that we have ISP (the interrupt stack pointer) sorted, + # we need to move over to working on the kernel stack. We do this + # by loading KSP with the top of the kernel stack for the 'init task' + # and faking a saved PC/PSL on the interrupt stack which we then + # 'return' to + moval init_thread_union, %r0 + addl2 $8192, %r0 # FIXME: taken from + mtpr %r0,$PR_KSP + + pushl $0x001f0000 # IS=0, accmode=prevmode=K, IPL=31 + pushab now_on_kstack + rei + halt + +now_on_kstack: + calls $0, vax_start_kernel # should never return + halt + +msg_loaded: + .ascii "Boot Head.S loaded at address \0" + .align 1 +msg_registers: + .ascii "rpb/bootr5/ap/sp \0" + .align 1 +msg_relocated: + .ascii "relocated at phys address \0" + .align 1 +msg_starting_vm: + .ascii "Starting VM\0" + .align 1 + + +# +# Memory locations. +# + +# iomap_base holds the physical address of the first PTE in the +# IOMAP portion of the system page table. +# Once VM is enabled, this is replaced with the VIRTUAL address +.globl iomap_base +iomap_base: + .int 0x00000000 + +.globl vmallocmap_base +vmallocmap_base: + .int 0x00000000 + +# These global symbols allow us to pass stuff to C in a relatively painless +# manner. +.globl boot_ap +boot_ap: + .int 0x00000000 +.globl boot_r11 +boot_r11: + .int 0x00000000 +.globl boot_scb +boot_scb: + .int 0x00000000 +# This is here because we need a safe place to store it as we +# relocate around in memory. +boot_mv: + .int 0x00000000 + +# Our dummy PCB +__INITDATA +.globl fake_pcb +fake_pcb: + .fill 24,4,0x00000000 + +# ...and a pointer to our initial command line +.globl kernel_cmd_line +kernel_cmd_line: + .int 0x00000000 + diff -Nru a/arch/vax/boot/hexdump.S b/arch/vax/boot/hexdump.S --- a/arch/vax/boot/hexdump.S 1970-01-01 01:00:00 +++ b/arch/vax/boot/hexdump.S 2004-09-29 00:26:39 @@ -0,0 +1,85 @@ +# Copyright atp Sept 1998 +# simple register based hexdumper. aide debug for head.S and friends +# inputs: r10 offset to dump +# r9 number of longwords to dump +# (0x80 is one page, 0x200 is 4 pages) +# side effects: clobbered registers: r3, r4, r5, r6, r9, r10 +# +.globl hexdump +hexdump: +# This is broken since the C code can clobber r0 to r5 and this +# code has not been updated for this + halt + +# move offset to start dumping at into r4 +# movl $0xffa800,r4 +# movl ap,r4 +# movl $0x0, r4 + movl %r10,%r4 + movl %r9,%r11 + pushl %r10 + calls $1, boot_printint + calls $1, boot_printspace + pushl (%r4) + calls $1, boot_printint + calls $1, boot_printspace + pushl %r11 + calls $1, boot_printint + calls $0, boot_crlf + clrl %r3 + clrl %r9 +LOOP: + pushl %r11 + calls $1, boot_printint + calls $1, boot_printspace + pushl %r3 + calls $1, boot_printint + calls $1, boot_printspace + pushl %r4 + calls $1, boot_printint + calls $1, boot_printspace + pushl (%r4) + calls $1, boot_printint + calls $1, boot_printspace + jmp PASC + +LRET: calls $1, boot_crlf + incl %r3 + incl %r9 +# page length on my uvax is about 0x1f lines + cmpl %r9, $0x1F + beql pause +#c 0x80 is one page 0x200 is 4 pages +LRET2: addl2 $4, %r4 + cmpl %r3, %r11 + bleq LOOP + rsb + +PASC: movl %r4, %r6 + pushl (%r6) + calls $1, boot_printchar + incl %r6 + pushl (%r6) + calls $1, boot_printchar + incl %r6 + pushl (%r6) + calls $1, boot_printchar + incl %r6 + pushl (%r6) + calls $1, boot_printchar + jmp LRET + +pause: + pushab more + calls $1, boot_printstr +GCLOOP: jsb 0x20040044 + tstl %r0 + beql GCLOOP + clrl %r9 + calls $1, boot_crlf + jmp LRET2 + +more: +.ascii "----------more-----------\0" +.align 1 + diff -Nru a/arch/vax/boot/lib.c b/arch/vax/boot/lib.c --- a/arch/vax/boot/lib.c 1970-01-01 01:00:00 +++ b/arch/vax/boot/lib.c 2005-04-23 21:43:40 @@ -0,0 +1,144 @@ + +#include +#include + +#include +#include + +#include "boot_sections.h" + +void __boot boot_putchar(unsigned int c) +{ + mv->pre_vm_putchar(c); +} + +void __boot boot_crlf(void) +{ + boot_putchar('\r'); + boot_putchar('\n'); +} + +void __boot boot_printspace(void) +{ + boot_putchar(' '); +} + +void __boot boot_printstr(const char *s) +{ + while (*s) { + boot_putchar(*s); + s++; + } +} + +void __boot boot_printchar(unsigned int c) +{ + if ((c >= 0x20) && (c <= 0x7e)) { + boot_putchar(c); + } else { + boot_putchar('.'); + } +} + +void __boot boot_printint(unsigned int x) +{ + int i; + int d; + + boot_putchar('0'); + boot_putchar('x'); + + for (i = 28; i>= 0; i -= 4) { + d = (x >> i) & 0x0f; + if (d > 9) { + boot_putchar(d - 10 + 'A'); + } else { + boot_putchar(d + '0'); + } + } +} + + + +void * __boot boot_memset(void *s, int c, size_t count) +{ + char *xs = (char *) s; + + while (count--) + *xs++ = c; + + return s; +} + +void * __boot boot_memzero(void *s, size_t count) +{ + return boot_memset(s, 0, count); +} + + +void __boot boot_memmove(void *dest, const void *src, size_t count) +{ + char *d, *s; + int *di; + int *si; + + if (dest <= src) { + si = (int *) src; + di = (int *) dest; + + while (count & ~3) { + *di++ = *si++; + count -= 4; + } + d = (char *) di; + s = (char *) si; + + if (count & 2) { + *d++ = *s++; + *d++ = *s++; + count ++; + count ++; + } + + if (count & 1) { + *d++ = *s++; + count ++; + } + + } else { + d = (char *) dest + count; + s = (char *) src + count; + + if (count & 1) { + *--d = *--s; + count--; + } + + if (count & 2) { + *--d = *--s; + *--d = *--s; + count--; + count--; + } + + si = (int *) s; + di = (int *) d; + while (count -= 4) + *--di = *--si; + } +} + +void __boot boot_print_cpu_id(void) +{ + boot_printstr("CPU type: "); + boot_printstr(mv->cpu_type_str()); + + boot_printstr(" SID: "); + boot_printint(__mfpr(PR_SID)); + + boot_printstr(" SIDEX: "); + boot_printint(mv->sidex); + + boot_crlf(); +} + diff -Nru a/arch/vax/boot/mmstart.S b/arch/vax/boot/mmstart.S --- a/arch/vax/boot/mmstart.S 1970-01-01 01:00:00 +++ b/arch/vax/boot/mmstart.S 2004-09-28 10:48:52 @@ -0,0 +1,99 @@ +# vax/mm/mmstart.S +# Copyright Nov 1998 atp. +# How to go from mapen=0 to mapen=1 (farmers method) +# This could be simpler if we didnt need to worry about the stack. +# FIXME: all this needs to be initdata.. +# +# An 8 MB SPTE must already be set up + loaded by the time we get here... +# page align this function because we want to map it in P0 space + +#include +#include +#include + +#include "boot_sections.h" + +__BOOT + +.globl VAX_start_mm +.balign 0x200 +VAX_start_mm: + .word 0xffc # protect registers r2->r11 + movl %sp,%r8 # save the stack pointer + movl %fp,%r9 # save the frame pointer + clrl %r4 # clear the decks + clrl %r5 + clrl %r6 + clrl %r10 + movab VAX_start_mm, %r10 # This is where we start mapping + ashl $-9, %r10, %r4 # calculate the pfn of the start of this fn + movab P0_table,%r3 # Phys address of our P0 table + addl2 $PAGE_OFFSET,%r3 # virtual address of our P0 table + ashl $0x02, %r4, %r5 # P0 PTE offset in bytes + subl2 %r5, %r3 # fake P0BR in bytes + ashl $-2, %r5, %r6 # fake P0LR in PTEs + addl2 $0x08, %r6 # add in length of table + mtpr %r3, $PR_P0BR # load P0BR + mtpr %r6, $PR_P0LR # load P0LR + mtpr $PAGE_OFFSET,$PR_P1BR # make sure P1BR is sensible + mtpr $0, $PR_P1LR # and zero length + movab P0_table,%r3 # Phys address of P0table. + movl 0x08,%r2 +P0fill: movl (%r3), %r1 # fill the P0table with the right pfns + addl2 %r4, %r1 + movl %r1, (%r3)+ + sobgtr %r2, P0fill +# +# fixup stack +# + addl2 $PAGE_OFFSET, 8(%sp) # saved ap. + addl2 $PAGE_OFFSET, 12(%sp)# fixup saved fp + addl2 $PAGE_OFFSET, 16(%sp)# fixup saved pc +# active stack +# movl %sp, %r10 +# subl2 $0x80, %r10 +# movl $0x40, %r9 +# jsb hexdump +# halt + addl2 $PAGE_OFFSET, %sp # fixup sp to virtual address + addl2 $PAGE_OFFSET, %fp # fixup fp to virtual address + movab vreloc, %r6 # where we want to end up (phys) + addl2 $PAGE_OFFSET, %r6 # & virt + +# According to ARM, we should invalidate the whole TB here + + mtpr $0, $PR_TBIA # clear translation buffer + mtpr $1, $PR_MAPEN # switch on mm. + jmp (%r6) + halt +vreloc: # made it. now fixup sp fp and stack frame + # cant use prom routines now. + + mtpr $PAGE_OFFSET, $PR_P0BR # Clear out P0BR, P0LR - do not + mtpr $0, $PR_P0LR # need them anymore + mtpr $0, $PR_TBIA # clear tlb after touching BRs. + ret + +.globl PK_str1 +PK_str1: +.align 1 +.ascii "value = %8.8x\n\0" + +# This the initial P0 page table we use to springboard to S0 space +# it is used only for mapping this function. so the enabling +# of virtual memory and jmp to S0 space needs to happen here. +# +# initial flags are user_write, valid +# we align this on a page boundary +.globl P0_table +.balign 0x200 +P0_table: + .int _PAGE_VALID + _PAGE_UW + 0 + .int _PAGE_VALID + _PAGE_UW + 1 + .int _PAGE_VALID + _PAGE_UW + 2 + .int _PAGE_VALID + _PAGE_UW + 3 + .int _PAGE_VALID + _PAGE_UW + 4 + .int _PAGE_VALID + _PAGE_UW + 5 + .int _PAGE_VALID + _PAGE_UW + 6 + .int _PAGE_VALID + _PAGE_UW + 7 + diff -Nru a/arch/vax/boot/startup.c b/arch/vax/boot/startup.c --- a/arch/vax/boot/startup.c 1970-01-01 01:00:00 +++ b/arch/vax/boot/startup.c 2005-08-07 04:04:59 @@ -0,0 +1,93 @@ +/* First C code - started by head.S */ +/* Copyright atp 1998-2001 under the GNU GPL */ + +#include +#include +#include + +/* stuff that is declared in head.S */ +extern unsigned long int phys_start; /* physical address of kernel*/ +extern unsigned long int virt_start; /* virtual address of kernel */ +extern unsigned long int boot_ap; /* argument pointer */ +extern unsigned long int boot_r11; /* rpb pointer */ +extern unsigned long int boot_scb; /* scb pointer */ +extern unsigned long int iomap_base; + +/* head.S copies the RPB into this structure */ +struct rpb_struct boot_rpb; + +extern void start_kernel(void); +extern void guard_int_stack(void); /* arch/vax/kernel/interrupt.c */ +extern void enable_early_printk(void); + +/* + * This is a transitionary function. When things are finally sorted + * the only tasks this function will perform will relate to the interaction + * with VMB and other stuff that needs to go on early before we start_kernel() + * like patchable control store, memory bitmap creation on non-ROM based + * VAXen. + * At present its used for testing the early parts of the kernel startup. + * The other main thing it does is load the rpb and scb global variables, + * and switch on basic paging. The main paging setup is done later. + * + * ok ive changed my mind. We turn on MM in the asm before we hit C code + * (keeps stacks simpler) just like the i386, with a default 8mb system + * page table setup (with a 1:1 mapping of system space. + * + * Things that are temporary have a habit of becoming permanent. + * I've renamed from tmp_start_kernel to vax_start_kernel, as convenient + * bit of arch-specific C code before starting the main start_kernel + * + * atp aug 2001 - This is now permanent, and has been renamed to startup.c + */ + +#define IOMAP_START (PAGE_OFFSET+((iomap_base-swapper_pg_dir[2].br)<<(PAGELET_SHIFT-2))) + + +void vax_start_kernel(void) +{ + /* Set the number of 4k pages */ + max_pfn = max_hwpfn / 8; + + /* Protect us from interrupt stack overflows */ + guard_int_stack(); + + if (mv->post_vm_init) + mv->post_vm_init(); + +#ifdef CONFIG_EARLY_PRINTK + enable_early_printk(); +#endif + + printk(KERN_INFO "Linux/VAX \n"); + +#ifdef __SMP__ + { + static int boot_cpu = 1; + /* "current" has been set up, we need to load it now */ + if (!boot_cpu) + initialize_secondary(); + boot_cpu = 0; + } +#endif + + printk("RPB info: .l_pfncnt=0x%08x, .l_vmb_version=0x%08x, " + ".l_badpgs=0x%08x\n", boot_rpb.l_pfncnt, + boot_rpb.l_vmb_version, boot_rpb.l_badpgs); + + printk("Physical memory: 0x%08x HW pagelets, 0x%08lx pages (%dKB)\n", + max_hwpfn, max_pfn, max_hwpfn/2); + + printk("CPU type: %s, SID: 0x%08x, SIDEX: 0x%08x\n", mv->cpu_type_str(), + __mfpr(PR_SID), mv->sidex); + + printk("VM: mapped physical from 0x%x to 0x%x, iomap from %lx\n", + PAGE_OFFSET, PAGE_OFFSET + (max_hwpfn * 512), + IOMAP_START); + printk("VM: vmalloc from 0x%lx to 0x%lx\n", VMALLOC_START, VMALLOC_END); + printk("VM: ptemap from 0x%lx to 0x%lx for %d processes\n", + TASKPTE_START, TASKPTE_END, TASK_MAXUPRC); + printk("Calling start_kernel()...\n\n"); + start_kernel(); +} + diff -Nru a/arch/vax/configs/ka42_defconfig b/arch/vax/configs/ka42_defconfig --- a/arch/vax/configs/ka42_defconfig 1970-01-01 01:00:00 +++ b/arch/vax/configs/ka42_defconfig 2004-05-11 01:43:52 @@ -0,0 +1,368 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_VAX=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_ELF_KERNEL=y +CONFIG_KCORE_ELF=y +# CONFIG_I2C is not set +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_PREEMPT is not set +CONFIG_CMDLINE="root=/dev/nfs ip=bootp rw debug" +CONFIG_EARLY_PRINTK=y + +# +# VAX CPU types +# +# CONFIG_CPU_KA630 is not set +# CONFIG_CPU_KA640 is not set +# CONFIG_CPU_KA650 is not set +# CONFIG_CPU_KA660 is not set +# CONFIG_CPU_KA410 is not set +CONFIG_CPU_KA42=y +# CONFIG_CPU_KA43 is not set +# CONFIG_CPU_KA46 is not set +# CONFIG_CPU_KA48 is not set +# CONFIG_CPU_KA55 is not set +# CONFIG_CPU_VXT is not set + +# +# Code maturity level options +# +# CONFIG_EXPERIMENTAL is not set +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +CONFIG_KEYBOARD_LKKBD=y +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +CONFIG_MOUSE_VSXXXAA=y +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Graphics support +# +# CONFIG_FB is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# VAX Bus support +# +# CONFIG_QBUS is not set +CONFIG_VSBUS=y + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set +# CONFIG_NETFILTER is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +# CONFIG_NET_ETHERNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Token Ring devices +# + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# Amateur Radio support +# +# CONFIG_HAMRADIO is not set + +# +# IrDA (infrared) support +# +# CONFIG_IRDA is not set + +# +# Bluetooth support +# +# CONFIG_BT is not set + +# +# VAX Network device support +# +CONFIG_VAX_LANCE=y +# CONFIG_VAX_SGEC is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_HFSPLUS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set + +# +# VAX character devices +# +CONFIG_SERIAL=y +CONFIG_SERIAL_CORE=y +CONFIG_DZ=y +# CONFIG_SERIAL_IPR is not set +CONFIG_SERIAL_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_UNIX98_PTY_COUNT=256 +# CONFIG_RTC is not set + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SLAB=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_BUGVERBOSE=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Library routines +# +CONFIG_CRC32=y diff -Nru a/arch/vax/configs/ka43_defconfig b/arch/vax/configs/ka43_defconfig --- a/arch/vax/configs/ka43_defconfig 1970-01-01 01:00:00 +++ b/arch/vax/configs/ka43_defconfig 2003-10-06 03:37:18 @@ -0,0 +1,321 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_VAX=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Processor type and features +# +# CONFIG_SMP is not set +# CONFIG_SMP is not set + +# +# General setup +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_AOUT is not set +CONFIG_ELF_KERNEL=y +CONFIG_KCORE_ELF=y +# CONFIG_EXTRA_ELF_COMPILER is not set +CONFIG_NET=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_KCORE_ELF=y +# CONFIG_KCORE_AOUT is not set +# CONFIG_BINFMT_AOUT is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_PROC_FS=y + +# +# Bus support +# +# CONFIG_QBUS is not set +# CONFIG_UNIBUS is not set +# CONFIG_VAXBI is not set +CONFIG_VSBUS=y +CONFIG_VAX_4000HC=y + +# +# Mass storage support +# +# CONFIG_MSCP is not set +# CONFIG_TMSCP is not set + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_XD is not set +# CONFIG_PARIDE is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_CISS_SCSI_TAPE is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK_DEV=y +# CONFIG_NETFILTER is not set +# CONFIG_FILTER is not set +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_IPV6 is not set +# CONFIG_KHTTPD is not set +# CONFIG_ATM is not set +# CONFIG_VLAN_8021Q is not set + +# +# +# +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_LLC is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# SCSI support +# +CONFIG_SCSI=y + +# +# SCSI support type (disk, tape, CDrom) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y + +# +# SCSI low-level drivers +# +CONFIG_SCSI_VAX_5380=y +# CONFIG_SCSI_VAX_53C94 is not set + +# +# Network device support +# +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_SLIP is not set +CONFIG_PPP=y + +# +# CCP compressors for PPP are only built as modules. +# +CONFIG_VAX_LANCE=y +# CONFIG_SGEC is not set + +# +# File systems +# +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_REISERFS_FS=y +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_ADFS_FS is not set +# CONFIG_ADFS_FS_RW is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_JBD_DEBUG is not set +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +# CONFIG_UMSDOS_FS is not set +CONFIG_VFAT_FS=y +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_TMPFS is not set +CONFIG_RAMFS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +# CONFIG_ZISOFS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS_RW is not set +# CONFIG_HPFS_FS is not set +CONFIG_PROC_FS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVFS_MOUNT is not set +# CONFIG_DEVFS_DEBUG is not set +CONFIG_DEVPTS_FS=y +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX4FS_RW is not set +# CONFIG_ROMFS_FS is not set +CONFIG_EXT2_FS=y +# CONFIG_SYSV_FS is not set +# CONFIG_UDF_FS is not set +# CONFIG_UDF_RW is not set +CONFIG_UFS_FS=y +# CONFIG_UFS_FS_WRITE is not set + +# +# Network File Systems +# +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +CONFIG_ROOT_NFS=y +# CONFIG_NFSD is not set +# CONFIG_NFSD_V3 is not set +CONFIG_SUNRPC=y +CONFIG_LOCKD=y +# CONFIG_SMB_FS is not set +# CONFIG_NCP_FS is not set +# CONFIG_NCPFS_PACKET_SIGNING is not set +# CONFIG_NCPFS_IOCTL_LOCKING is not set +# CONFIG_NCPFS_STRONG is not set +# CONFIG_NCPFS_NFS_NS is not set +# CONFIG_NCPFS_OS2_NS is not set +# CONFIG_NCPFS_SMALLDOS is not set +# CONFIG_NCPFS_NLS is not set +# CONFIG_NCPFS_EXTRAS is not set +# CONFIG_ZISOFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_SMB_NLS is not set +CONFIG_NLS=y + +# +# Native Language Support +# +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# VAX Character devices +# +# CONFIG_VT is not set +CONFIG_SERIAL=y +CONFIG_DZ=y +CONFIG_SERIAL_MTPR=y +CONFIG_SERIAL_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_UNIX98_PTY_COUNT=256 +# CONFIG_RTC is not set + +# +# Kernel hacking +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_PROFILE=y +CONFIG_PROFILE_SHIFT=2 + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_ZLIB_INFLATE is not set +# CONFIG_ZLIB_DEFLATE is not set diff -Nru a/arch/vax/configs/ka46_defconfig b/arch/vax/configs/ka46_defconfig --- a/arch/vax/configs/ka46_defconfig 1970-01-01 01:00:00 +++ b/arch/vax/configs/ka46_defconfig 2004-05-11 01:43:52 @@ -0,0 +1,368 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_VAX=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_ELF_KERNEL=y +CONFIG_KCORE_ELF=y +# CONFIG_I2C is not set +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_PREEMPT is not set +CONFIG_CMDLINE="root=/dev/nfs ip=bootp rw debug" +CONFIG_EARLY_PRINTK=y + +# +# VAX CPU types +# +# CONFIG_CPU_KA630 is not set +# CONFIG_CPU_KA640 is not set +# CONFIG_CPU_KA650 is not set +# CONFIG_CPU_KA660 is not set +# CONFIG_CPU_KA410 is not set +# CONFIG_CPU_KA42 is not set +# CONFIG_CPU_KA43 is not set +CONFIG_CPU_KA46=y +# CONFIG_CPU_KA48 is not set +# CONFIG_CPU_KA55 is not set +# CONFIG_CPU_VXT is not set + +# +# Code maturity level options +# +# CONFIG_EXPERIMENTAL is not set +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +CONFIG_KEYBOARD_LKKBD=y +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +CONFIG_MOUSE_VSXXXAA=y +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Graphics support +# +# CONFIG_FB is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# VAX Bus support +# +# CONFIG_QBUS is not set +CONFIG_VSBUS=y + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set +# CONFIG_NETFILTER is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +# CONFIG_NET_ETHERNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Token Ring devices +# + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# Amateur Radio support +# +# CONFIG_HAMRADIO is not set + +# +# IrDA (infrared) support +# +# CONFIG_IRDA is not set + +# +# Bluetooth support +# +# CONFIG_BT is not set + +# +# VAX Network device support +# +CONFIG_VAX_LANCE=y +# CONFIG_VAX_SGEC is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_HFSPLUS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set + +# +# VAX character devices +# +CONFIG_SERIAL=y +CONFIG_SERIAL_CORE=y +CONFIG_DZ=y +# CONFIG_SERIAL_IPR is not set +CONFIG_SERIAL_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_UNIX98_PTY_COUNT=256 +# CONFIG_RTC is not set + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SLAB=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_BUGVERBOSE=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Library routines +# +CONFIG_CRC32=y diff -Nru a/arch/vax/configs/ka48_defconfig b/arch/vax/configs/ka48_defconfig --- a/arch/vax/configs/ka48_defconfig 1970-01-01 01:00:00 +++ b/arch/vax/configs/ka48_defconfig 2004-05-11 21:45:59 @@ -0,0 +1,310 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_VAX=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_ELF_KERNEL=y +CONFIG_KCORE_ELF=y +# CONFIG_PREEMPT is not set +CONFIG_EARLY_PRINTK=y + +# +# VAX CPU types +# +# CONFIG_CPU_KA630 is not set +# CONFIG_CPU_KA640 is not set +# CONFIG_CPU_KA650 is not set +# CONFIG_CPU_KA660 is not set +# CONFIG_CPU_KA410 is not set +# CONFIG_CPU_KA42 is not set +# CONFIG_CPU_KA43 is not set +CONFIG_CPU_KA46=y +CONFIG_CPU_KA48=y +# CONFIG_CPU_KA55 is not set +# CONFIG_CPU_VXT is not set + +# +# Code maturity level options +# +# CONFIG_EXPERIMENTAL is not set +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# VAX Bus support +# +# CONFIG_QBUS is not set +CONFIG_VSBUS=y + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_BLK_DEV_INITRD is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set +# CONFIG_NETFILTER is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +# CONFIG_NET_ETHERNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Token Ring devices +# + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# Amateur Radio support +# +# CONFIG_HAMRADIO is not set + +# +# IrDA (infrared) support +# +# CONFIG_IRDA is not set + +# +# Bluetooth support +# +# CONFIG_BT is not set + +# +# VAX Network device support +# +CONFIG_VAX_LANCE=y +# CONFIG_VAX_SGEC is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_DEVPTS_FS=y +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Generic Driver Options +# + +# +# VAX character devices +# +CONFIG_SERIAL=y +CONFIG_SERIAL_CORE=y +CONFIG_DZ=y +# CONFIG_SERIAL_IPR is not set +CONFIG_SERIAL_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_UNIX98_PTY_COUNT=256 +# CONFIG_RTC is not set + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SLAB=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_BUGVERBOSE=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y diff -Nru a/arch/vax/configs/ka4x_defconfig b/arch/vax/configs/ka4x_defconfig --- a/arch/vax/configs/ka4x_defconfig 1970-01-01 01:00:00 +++ b/arch/vax/configs/ka4x_defconfig 2004-08-16 13:22:39 @@ -0,0 +1,415 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_VAX=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_ELF_KERNEL=y +CONFIG_KCORE_ELF=y +# CONFIG_I2C is not set +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_PREEMPT is not set +CONFIG_CMDLINE="root=/dev/nfs ip=bootp rw debug" +CONFIG_EARLY_PRINTK=y + +# +# VAX CPU types +# +# CONFIG_CPU_KA630 is not set +# CONFIG_CPU_KA640 is not set +CONFIG_CPU_KA650=y +# CONFIG_CPU_KA660 is not set +# CONFIG_CPU_KA410 is not set +CONFIG_CPU_KA42=y +CONFIG_CPU_KA43=y +CONFIG_CPU_KA46=y +# CONFIG_CPU_KA48 is not set +# CONFIG_CPU_KA55 is not set +# CONFIG_CPU_VXT is not set + +# +# Code maturity level options +# +# CONFIG_EXPERIMENTAL is not set +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +CONFIG_KEYBOARD_LKKBD=y +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +CONFIG_MOUSE_VSXXXAA=y +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# VAX Bus support +# +CONFIG_QBUS=y +CONFIG_VSBUS=y + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_NETFILTER is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +# CONFIG_NET_ETHERNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# VAX Network device support +# +CONFIG_VAX_LANCE=y +# CONFIG_VAX_SGEC is not set +CONFIG_DELQA=y + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_HFSPLUS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Generic Driver Options +# +CONFIG_DEBUG_DRIVER=y + +# +# VAX character devices +# +CONFIG_SERIAL=y +CONFIG_DZ=y +CONFIG_SERIAL_IPR=y +CONFIG_SERIAL_CONSOLE=y +CONFIG_UNIX98_PTY_COUNT=256 + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SLAB=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_VAX_DIAG_LED=y +CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS=y +CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS_ABORT=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff -Nru a/arch/vax/configs/ka650_defconfig b/arch/vax/configs/ka650_defconfig --- a/arch/vax/configs/ka650_defconfig 1970-01-01 01:00:00 +++ b/arch/vax/configs/ka650_defconfig 2004-07-30 02:24:12 @@ -0,0 +1,356 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_VAX=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_ELF_KERNEL=y +CONFIG_KCORE_ELF=y +# CONFIG_I2C is not set +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_PREEMPT is not set +CONFIG_CMDLINE="root=/dev/nfs ip=bootp rw debug" +CONFIG_EARLY_PRINTK=y + +# +# VAX CPU types +# +# CONFIG_CPU_KA630 is not set +# CONFIG_CPU_KA640 is not set +CONFIG_CPU_KA650=y +# CONFIG_CPU_KA660 is not set +# CONFIG_CPU_KA410 is not set +CONFIG_CPU_KA42=y +CONFIG_CPU_KA43=y +CONFIG_CPU_KA46=y +# CONFIG_CPU_KA48 is not set +# CONFIG_CPU_KA55 is not set +# CONFIG_CPU_VXT is not set + +# +# Code maturity level options +# +# CONFIG_EXPERIMENTAL is not set +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Graphics support +# +# CONFIG_FB is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# VAX Bus support +# +CONFIG_QBUS=y +# CONFIG_VSBUS is not set + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_NETFILTER is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +# CONFIG_NET_ETHERNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# VAX Network device support +# +# CONFIG_VAX_LANCE is not set +# CONFIG_VAX_SGEC is not set +CONFIG_DELQA=y + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_HFSPLUS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Generic Driver Options +# +CONFIG_DEBUG_DRIVER=y + +# +# VAX character devices +# +CONFIG_SERIAL=y +CONFIG_SERIAL_CORE=y +# CONFIG_DZ is not set +CONFIG_SERIAL_IPR=y +CONFIG_SERIAL_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_UNIX98_PTY_COUNT=256 +# CONFIG_RTC is not set + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SLAB=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_VAX_DIAG_LED=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff -Nru a/arch/vax/defconfig b/arch/vax/defconfig --- a/arch/vax/defconfig 1970-01-01 01:00:00 +++ b/arch/vax/defconfig 2005-03-28 18:39:51 @@ -0,0 +1,526 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.11 +# Mon Mar 28 17:07:39 2005 +# +CONFIG_VAX=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_ELF_KERNEL=y +CONFIG_KCORE_ELF=y +# CONFIG_I2C is not set +# CONFIG_I2C_ALGOBIT is not set +CONFIG_GENERIC_CALIBRATE_DELAY=y +# CONFIG_PREEMPT is not set +CONFIG_CMDLINE="root=/dev/nfs ip=bootp rw debug" +CONFIG_EARLY_PRINTK=y + +# +# VAX CPU types +# +# CONFIG_CPU_KA630 is not set +# CONFIG_CPU_KA640 is not set +CONFIG_CPU_KA650=y +# CONFIG_CPU_KA660 is not set +# CONFIG_CPU_KA410 is not set +CONFIG_CPU_KA42=y +CONFIG_CPU_KA43=y +CONFIG_CPU_KA46=y +CONFIG_CPU_KA48=y +# CONFIG_CPU_KA49 is not set +# CONFIG_CPU_KA55 is not set +# CONFIG_CPU_VXT is not set + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_ARCH_API_TEST=y + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_KOBJECT_UEVENT is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SHMEM=y +CONFIG_CC_ALIGN_FUNCTIONS=0 +CONFIG_CC_ALIGN_LABELS=0 +CONFIG_CC_ALIGN_LOOPS=0 +CONFIG_CC_ALIGN_JUMPS=0 +# CONFIG_TINY_SHMEM is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_LIBPS2 is not set +# CONFIG_SERIO_RAW is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +CONFIG_KEYBOARD_LKKBD=y +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +CONFIG_MOUSE_VSXXXAA=y +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# Graphics support +# +# CONFIG_FB is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# VAX Bus support +# +CONFIG_QBUS=y +CONFIG_VSBUS=y + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_INITRAMFS_SOURCE="" +# CONFIG_CDROM_PKTCDVD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_ATA_OVER_ETH is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_IP_TCPDIAG=y +# CONFIG_IP_TCPDIAG_IPV6 is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_ETHERTAP is not set + +# +# Ethernet (10 or 100Mbit) +# +# CONFIG_NET_ETHERNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# PCMCIA network device support +# +# CONFIG_NET_PCMCIA is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# VAX Network device support +# +CONFIG_VAX_LANCE=y +# CONFIG_VAX_SGEC is not set +CONFIG_DELQA=y + +# +# SCSI device support +# +CONFIG_SCSI=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +# CONFIG_CHR_DEV_SG is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +# CONFIG_SCSI_MULTI_LUN is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y + +# +# SCSI Transport Attributes +# +CONFIG_SCSI_SPI_ATTRS=y +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set + +# +# SCSI low-level drivers +# +# CONFIG_SCSI_SATA is not set +# CONFIG_SCSI_DEBUG is not set + +# +# VAX SCSI low-level drivers +# +CONFIG_SCSI_VAX_5380=y +# CONFIG_SCSI_VAX_53C94 is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set + +# +# XFS support +# +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +CONFIG_DNOTIFY=y +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_TMPFS_XATTR is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_ODS2_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set + +# +# VAX character devices +# +CONFIG_SERIAL=y +CONFIG_DZ=y +CONFIG_SERIAL_IPR=y +CONFIG_SERIAL_CONSOLE=y + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SLAB=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_VAX_DIAG_LED=y +CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS=y +CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS_ABORT=y + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Hardware crypto devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Library routines +# +# CONFIG_CRC_CCITT is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff -Nru a/arch/vax/kernel/Makefile b/arch/vax/kernel/Makefile --- a/arch/vax/kernel/Makefile 1970-01-01 01:00:00 +++ b/arch/vax/kernel/Makefile 2005-10-03 14:28:17 @@ -0,0 +1,35 @@ +# +# Makefile for the linux kernel. +# +# Note! Dependencies are done automagically +# DON'T put your own dependencies here +# unless it's something special (ie not a .c file). + +extra-y := vmlinux.lds + +obj-y := ptrace.o process.o setup.o regdump.o interrupt.o entry.o time.o \ + ioprobe.o syscall.o signal.o semaphore.o vax_dev_init.o \ + init_task.o reboot.o cpu_generic.o clock.o \ + +obj-y += bootcons/ + +obj-$(CONFIG_CPU_KA42) += cpu_ka42.o +obj-$(CONFIG_CPU_KA43) += cpu_ka43.o +obj-$(CONFIG_CPU_KA46) += cpu_ka46.o +obj-$(CONFIG_CPU_KA48) += cpu_ka48.o +obj-$(CONFIG_CPU_KA49) += cpu_ka49.o +obj-$(CONFIG_CPU_KA52) += cpu_ka52.o +obj-$(CONFIG_CPU_KA55) += cpu_ka55.o +obj-$(CONFIG_CPU_KA62) += cpu_ka62.o +obj-$(CONFIG_CPU_KA410) += cpu_ka410.o +obj-$(CONFIG_CPU_KA630) += cpu_ka630.o +obj-$(CONFIG_CPU_KA640) += cpu_ka640.o +obj-$(CONFIG_CPU_KA650) += cpu_ka650.o +obj-$(CONFIG_CPU_KA660) += cpu_ka660.o +obj-$(CONFIG_CPU_VXT) += cpu_vxt.o + +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_VAX_DIAG_LED) += diag_led.o + +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + diff -Nru a/arch/vax/kernel/asm-offsets.c b/arch/vax/kernel/asm-offsets.c --- a/arch/vax/kernel/asm-offsets.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/asm-offsets.c 2004-06-14 14:12:58 @@ -0,0 +1,44 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed + * to extract and format the required data. + */ + +#include +#include +#include +#include +#include +#include + +#define DEFINE(sym, val) \ + asm volatile ("\n->" #sym " %0 " #val : : "i" (val)) +#define BLANK() asm volatile ("\n->" : : ) + +void foo (void) +{ + DEFINE (MV_PRE_VM_INIT, offsetof (struct vax_mv, pre_vm_init)); + DEFINE (MV_POST_VM_INIT, offsetof (struct vax_mv, post_vm_init)); + DEFINE (MV_PRE_VM_PUTCHAR, offsetof (struct vax_mv, pre_vm_putchar)); + DEFINE (MV_PRE_VM_GETCHAR, offsetof (struct vax_mv, pre_vm_getchar)); + DEFINE (MV_POST_VM_PUTCHAR, offsetof (struct vax_mv, post_vm_putchar)); + DEFINE (MV_POST_VM_GETCHAR, offsetof (struct vax_mv, post_vm_getchar)); + DEFINE (MV_CONSOLE_INIT, offsetof (struct vax_mv, console_init)); + DEFINE (MV_REBOOT, offsetof (struct vax_mv, reboot)); + DEFINE (MV_HALT, offsetof (struct vax_mv, halt)); + DEFINE (MV_MCHECK, offsetof (struct vax_mv, mcheck)); + DEFINE (MV_INIT_DEVICES, offsetof (struct vax_mv, init_devices)); + DEFINE (MV_CPU_TYPE_STR, offsetof (struct vax_mv, cpu_type_str)); + DEFINE (MV_CLOCK_INIT, offsetof (struct vax_mv, clock_init)); + DEFINE (MV_CLOCK_BASE, offsetof (struct vax_mv, clock_base)); + DEFINE (MV_SIDEX, offsetof (struct vax_mv, sidex)); + BLANK (); + DEFINE (ASM_SBR_OFFSET, sizeof (struct pgd_descriptor) * 2 + offsetof (struct pgd_descriptor, br)); + DEFINE (ASM_SLR_OFFSET, sizeof (struct pgd_descriptor) * 2 + offsetof (struct pgd_descriptor, lr)); + BLANK (); + DEFINE (PAGE_OFFSET, PAGE_OFFSET); + BLANK (); + DEFINE (RPB_SIZE, sizeof (struct rpb_struct)); + DEFINE (RPB_PFNCNT_OFFSET, offsetof (struct rpb_struct, l_pfncnt)); +} + diff -Nru a/arch/vax/kernel/bootcons/Makefile b/arch/vax/kernel/bootcons/Makefile --- a/arch/vax/kernel/bootcons/Makefile 1970-01-01 01:00:00 +++ b/arch/vax/kernel/bootcons/Makefile 2005-10-03 14:28:17 @@ -0,0 +1,7 @@ +# +# This Makefile handles boot console code, with either VM switched +# on or off. +# + +obj-$(CONFIG_CONS_PREVM_KA52) += cons_prevm_ka52.o + diff -Nru a/arch/vax/kernel/bootcons/cons_prevm_ka52.c b/arch/vax/kernel/bootcons/cons_prevm_ka52.c --- a/arch/vax/kernel/bootcons/cons_prevm_ka52.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/bootcons/cons_prevm_ka52.c 2005-10-03 14:28:17 @@ -0,0 +1,59 @@ +#include + +static volatile unsigned long *ka52_console = NULL; + +/* + * FIXME: This function gets "unsigned char" instead of "int". All other + * console I/O functions get int, but that looks a bit + * non-intuitive for byte I/O ... + * FIXME: 0x2004aaa8 isn't an official address. I wasn't ad hoc able to + * use the official string-printing function (see + * http://computer-refuge.org/classiccmp/dec94mds/473aamga.pdf, + * pp. B-4 ff.). This address (0x2004aaa8) is internally + * called for the purpose of printing out one byte to the + * console. Also, I was too lazy to properly check register + * usage of the subroutine, so I invalidate them all... + */ +void +ka52_prevm_putchar (unsigned char c) +{ + asm ( + " movzbl %0, %%r0 \n" + " jsb 0x2004aaa8 \n" + : /* nothing */ + : "g"(c) + : "r0", "r1", "r2", "r3", "r4", "r5", + "r6", "r7", "r8", "r9", "r10", "r11" + /* As it seems in theory, R2, R3 and R11 are PUSHRed by this + * subroutine, so (in theory) this shouldn't needed... */ + ); + + return; +} + +void +ka52_console_init (unsigned long address) +{ + ka52_console = ioremap (address, 8 * sizeof (unsigned long)); +} + +void +ka52_postvm_putchar (unsigned char c) +{ + unsigned long temp = c; + +#if 0 + if (ka52_console) + ka52_console[3] = temp; +#endif + + return; +} + +unsigned char +ka52_prevm_getchar (void) +{ + asm ("halt"); + return 0; +} + diff -Nru a/arch/vax/kernel/clock.c b/arch/vax/kernel/clock.c --- a/arch/vax/kernel/clock.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/clock.c 2005-04-25 16:35:02 @@ -0,0 +1,70 @@ +/* arch/vax/kernel/clock.c + * + * Copyright atp 2002. license: GPL + * + * Routines to manipulate the real time clock on VAXen. + * + * There are two sorts of battery backed hardware clocks. There is the + * TODR (time of day register) found on big VAXen, and the familiar + * Dallas CMOS clock on the desktop VAXen. + * + * The init routines are called through the machine vector. See + * cpu_kaxx.c for details of that. The callers are time_init() and + * the rtc clock driver (drivers/char/rtc.c), using macros defined + * in asm/mc146818rtc.h. + * + * Prototypes for some of these functions are in asm/mc146818rtc.h + * and some in asm/clock.h. (The ones that are used in the mv + * initialisation are in clock.h, and the ones used in mc146818rtc.h + * are in that file). + * + */ + +#include +#include /* For ioremap() */ +#include +#include +#include /* for TODR, if anyone feels like implementing it */ +#include +#include /* includes asm/mc146818rtc.h */ + /* - needed for offsets in debug output */ + + +/* this does nothing, and is a placeholder */ +void generic_clock_init(void) +{ + printk (KERN_WARNING "No RTC used\n"); + return; +} + +/* Map the ROM clock page, and put address in mv */ +void ka4x_clock_init(void) +{ + mv->clock_base = ioremap(VSA_CLOCK_BASE, 1); /* 1 page */ + printk("Mapped RTC clock page (v %p p %08x )\n", mv->clock_base, + VSA_CLOCK_BASE); + + printk("RTC date is %2.2d:%2.2d:%4.4d %2.2d:%2.2d:%2.2d\n", + CMOS_READ(RTC_DAY_OF_MONTH), CMOS_READ(RTC_MONTH), + CMOS_READ(RTC_YEAR), CMOS_READ(RTC_HOURS), + CMOS_READ(RTC_MINUTES), CMOS_READ(RTC_SECONDS)); + + return; +} + +unsigned char ka4x_clock_read(unsigned long offset) +{ + if (mv->clock_base) + return mv->clock_base[offset] >> 2; + + return 0; +} + +void ka4x_clock_write(unsigned char val, unsigned long offset) +{ + if (mv->clock_base) + mv->clock_base[offset] = val << 2; + + return; +} + diff -Nru a/arch/vax/kernel/cpu_generic.c b/arch/vax/kernel/cpu_generic.c --- a/arch/vax/kernel/cpu_generic.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_generic.c 2005-05-21 12:00:11 @@ -0,0 +1,216 @@ +/* + * linux/arch/vax/kernel/cpu_generic.c + * + * Copyright (C) 2000 Kenn Humborg + * + * This file contains generic machine vector handlers that are + * useful for multiple CPUs. For example, all CPUs that use + * MTPR-based console I/O can use putchar_mtpr and getchar_mtpr + * from here. + * + * Stuff that is specific to a given CPU can be found in cpu_XXX.c + */ + +/* + * Generic reboot and halt functions are in reboot.c + * CPUs that need to do special stuff in their halt and reboot functions + * should point to their own functions in their machine vector, + * otherwise they can leave NULL in the machine vector slots for these + * functions + * + * atp. This holds for machine check functions too. Leave a NULL if you + * just want a halt instruction on receipt of a machine check. + * See VARM Chapter 5 for details on machine check frames. + */ + + +#include +#include /* For ioremap() */ +#include +#include +#include +#include /* For HALT */ +#include + +/* This is the main machine vector pointer */ +struct vax_mv *mv; + +/************************************************************************/ +/* These functions can be used by implementations that do console I/O + via processor registers PR_TXCS, PR_TXDB, PR_RXCS and PR_RXDB */ + +void mtpr_putchar(int c) +{ + unsigned char xc; + int delay = 100; + + xc = (char) (c & 0xff); + while ((Xmfpr(PR_TXCS) & PR_TXCS_READY) == 0) + /* busy wait */; + + Xmtpr(xc, PR_TXDB); + + /* If the char just printed was a \n or \r, wait a short while. + * Otherwise a printk() followed by a HALT can cause the + * console's halt message to overwrite the text just printed */ + if ((c == '\r') || (c == '\n')) + while (delay--) + /* busy wait */; +} + +int mtpr_getchar(void) +{ + /* Not yet implemented */ + asm("halt"); + return 0; +} + +/************************************************************************/ +/* These functions can be used by implementations that do console I/O + via ROM routines at 0x20040058 and 0x20040044 (KA410, KA42 and KA43 + CPUs). These functions can only be used before VM is enabled. */ + +void ka46_48_49_prom_putchar(int c) +{ + asm( + " movzbl %0, %%r2 # zero-extended byte convert. \n" + " jsb 0x20040068 \n" + : /* nothing */ + : "g"(c) + : "r2"); +} + +int ka46_48_49_prom_getchar(void) +{ + /* Not yet implemented */ + asm("halt"); + return 0; +} + +void ka4x_prom_putchar(int c) +{ + asm( + " movzbl %0, %%r2 # zero-extended byte convert. \n" + " jsb 0x20040058 \n" + : /* nothing */ + : "g"(c) + : "r2"); +} + +int ka4x_prom_getchar(void) +{ + /* Not yet implemented */ + asm("halt"); + return 0; +} + +//#ifdef CONFIG_DZ +/************************************************************************/ +/* These functions can be used by implementations that do console I/O + via a DZ11-compatible chip (KA410, KA42 and KA43 CPUs). These functions can + only be used after VM is enabled and the DZ11 registers have been + mapped by map_dz11_regs(). */ + + +volatile struct dz11_regs __iomem *dz11_addr; + +/* This is the serial line on the DZ11 that we should use as the + console. Normally it is line 3 */ +static unsigned int dz11_line; + +/* + * Stuff a char out of a DZ11-compatible serial chip + */ +void dz11_putchar(int c) +{ + u_int txcs, txdb, done; + + /* + * During early startup, there might be a printk() call inside + * ioremap(), which will be executed while ioremap() hasn't + * finished, so the VM addr isn't yet set... + */ + if (!dz11_addr) + return; + + txdb = txcs = done = 0; + txdb = (c & DZ11_TDR_DATA_MASK); + + /* Stop all I/O activity by clearing MSE */ + dz11_addr->csr = 0; + + /* Enable transmit the relevant line */ + dz11_addr->tcr = DZ11_TCR_LINEENAB0 << dz11_line; + + /* Set line to 9600,8N1 and enable reception */ + dz11_addr->rbuf_lpr.lpr = DZ11_LPR_RXENAB | + DZ11_SPEED_9600 | DZ11_CHARLGTH_8 | dz11_line; + + /* Set Master Scan Enable to allow I/O */ + dz11_addr->csr = DZ11_CSR_MSE; + + /* Wait for Transmit Ready, then stuff char into TDR register */ + do { + txcs = (u_short) dz11_addr->csr; + if (txcs & DZ11_CSR_TRDY) { + /* We should really check that this TRDY is for + * the correct line, and not one of the other lines */ + dz11_addr->msr_tdr.tdr = (u_short) txdb; + done = 1; + } + } while (!done); + + /* Wait again for Transmit Ready */ + while (((txcs = dz11_addr->csr) & DZ11_CSR_TRDY) == 0) + /* wait */; +} + +int dz11_getchar(void) +{ + /* Not yet implemented */ + asm("halt"); + return 0; +} + +void init_dz11_console(unsigned long dz11_phys_addr, unsigned int line) +{ + if (dz11_addr != NULL) + return; + + dz11_addr = ioremap(dz11_phys_addr, sizeof(*dz11_addr)); + dz11_line = line; +} +//#endif /* CONFIG_DZ */ + +#ifdef CONFIG_CPU_VXT +volatile int __iomem *vxt2694_addr = NULL; + +void +vxt2694_putchar (int c) +{ + /* wait for TxRDY */ + while ((vxt2694_addr[1] & 4) == 0) + /* spin */; + + /* send the character */ + vxt2694_addr[3] = c & 0xff; +} + +int +vxt2694_getchar (void) +{ + HALT; + return 0; +} + +void +init_vxt2694_console (unsigned long phys_addr) +{ + if (vxt2694_addr) + return; + + vxt2694_addr = ioremap (phys_addr, 256); + return; +} +#endif /* CONFIG_CPU_VXT */ + diff -Nru a/arch/vax/kernel/cpu_ka41.c b/arch/vax/kernel/cpu_ka41.c --- a/arch/vax/kernel/cpu_ka41.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka41.c 2005-10-03 15:43:24 @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2005 by Jan-Benedict Glaw + * + * This file contains generic machine vector handlers for the + * KA41 CPU of the MicroVAX 3100. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void ka41_post_vm_init(void) +{ +#ifdef CONFIG_DZ + init_dz11_console(0x200A0000, 3); + dz_serial_console_init(); +#endif +} + +static const char *ka41_cpu_type_str(void) +{ + return "KA41"; +} + +struct vax_mv mv_ka41 = { + .post_vm_init = ka41_post_vm_init, + .pre_vm_putchar = ka4x_prom_putchar, + .pre_vm_getchar = ka4x_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .cpu_type_str = ka41_cpu_type_str, + .clock_init = ka4x_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpu_match_ka41 = { + .mv = &mv_ka41, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_CVAX << VAX_SID_FAMILY_SHIFT, + + .sidex_addr = CVAX_SIDEX_ADDR, + .sidex_mask = CVAX_SIDEX_TYPE_MASK, + .sidex_match = CVAX_SIDEX_TYPE_VS3100 << CVAX_SIDEX_TYPE_SHIFT, +}; + +static struct platform_device ka41_vsbus_device = { + .name = "ka4x-vsbus" +}; + +static struct platform_device ka41_diag_led_device = { + .name = "diag_led" +}; + +static int __init ka41_platform_device_init (void) +{ + int retval; + + if (!is_ka41 ()) + return -ENODEV; + + platform_device_register (&ka41_diag_led_device); + + retval = platform_device_register (&ka41_vsbus_device); + if (!retval) { +#ifdef CONFIG_VSBUS + vsbus_add_fixed_device (&ka41_vsbus_device.dev, "lance", 0x200e0000, 5); + vsbus_add_fixed_device (&ka41_vsbus_device.dev, "dz", 0x200a0000, 6); + + /* Register internal SCSI bus */ + vsbus_add_fixed_device (&ka41_vsbus_device.dev, "vax-5380-int", 0x200c0080, 1); + + /* Register external SCSI bus */ + vsbus_add_fixed_device (&ka41_vsbus_device.dev, "vax-5380-ext", 0x200c0180, 0); +#endif + } + + return retval; +} + +arch_initcall (ka41_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka410.c b/arch/vax/kernel/cpu_ka410.c --- a/arch/vax/kernel/cpu_ka410.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka410.c 2005-04-26 00:25:05 @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains machine vector handlers for the + * KA410 CPU in the MicroVAX 2000 machines + * + */ + +#include +#include /* For printk */ +#include +#include +#include +#include /* For clock_init routines */ + + + +static void ka410_post_vm_init(void) +{ +#ifdef CONFIG_DZ + init_dz11_console(0x200A0000, 3); + dz_serial_console_init(); +#endif +} + +static const char *ka410_cpu_type_str(void) +{ + return "KA410"; +} + +struct vax_mv mv_ka410 = { + .post_vm_init = ka410_post_vm_init, + .pre_vm_putchar = ka4x_prom_putchar, + .pre_vm_getchar = ka4x_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .cpu_type_str = ka410_cpu_type_str, + .clock_init = ka4x_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka410 = { + .mv = &mv_ka410, + .sid_mask = VAX_SID_FAMILY_MASK | UVAX2_SID_SUBTYPE_MASK, + .sid_match = (VAX_UVAX2 << VAX_SID_FAMILY_SHIFT) | + (UVAX2_SID_SUBTYPE_KA410 << UVAX2_SID_SUBTYPE_SHIFT), + .sidex_addr = 0, + .sidex_mask = 0x00000000, + .sidex_match = 0x00000000, +}; + diff -Nru a/arch/vax/kernel/cpu_ka42.c b/arch/vax/kernel/cpu_ka42.c --- a/arch/vax/kernel/cpu_ka42.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka42.c 2005-04-26 00:25:05 @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains generic machine vector handlers for the + * KA42 CPUs in the early CVAX-based VAXstation 3100 + * machines (models 10 to 48) + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include +#include +#include +#include /* For clock_init routines */ +#include + +static void ka42_post_vm_init(void) +{ +#define KA42_CADR_S2E 0x80 +#define KA42_CADR_S1E 0x40 +#define KA42_CADR_ISE 0x20 +#define KA42_CADR_DSE 0x10 + __mtpr(KA42_CADR_S2E|KA42_CADR_S1E|KA42_CADR_ISE|KA42_CADR_DSE, PR_CADR); + +#ifdef CONFIG_DZ + init_dz11_console(0x200A0000, 3); + dz_serial_console_init(); +#endif +} + +static const char *ka42_cpu_type_str(void) +{ + return "KA42"; +} + +struct vax_mv mv_ka42 = { + .post_vm_init = ka42_post_vm_init, + .pre_vm_putchar = ka4x_prom_putchar, + .pre_vm_getchar = ka4x_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .cpu_type_str = ka42_cpu_type_str, + .clock_init = ka4x_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpu_match_ka42 = { + .mv = &mv_ka42, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_CVAX << VAX_SID_FAMILY_SHIFT, + + .sidex_addr = CVAX_SIDEX_ADDR, + .sidex_mask = CVAX_SIDEX_TYPE_MASK, + .sidex_match = CVAX_SIDEX_TYPE_VS3100 << CVAX_SIDEX_TYPE_SHIFT, +}; + +static struct platform_device ka42_vsbus_device = { + .name = "ka4x-vsbus" +}; + +static struct platform_device ka42_diag_led_device = { + .name = "diag_led" +}; + +static int __init ka42_platform_device_init(void) +{ + int retval; + + if (!is_ka42()) + return -ENODEV; + + platform_device_register(&ka42_diag_led_device); + + retval = platform_device_register(&ka42_vsbus_device); + if (!retval) { +#ifdef CONFIG_VSBUS + vsbus_add_fixed_device(&ka42_vsbus_device.dev, "lance", 0x200e0000, 5); + vsbus_add_fixed_device(&ka42_vsbus_device.dev, "dz", 0x200a0000, 6); + + /* Register internal SCSI bus */ + vsbus_add_fixed_device(&ka42_vsbus_device.dev, "vax-5380-int", 0x200c0080, 1); + + /* Register external SCSI bus */ + vsbus_add_fixed_device(&ka42_vsbus_device.dev, "vax-5380-ext", 0x200c0180, 0); +#endif + } + + return retval; +} + +arch_initcall(ka42_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka43.c b/arch/vax/kernel/cpu_ka43.c --- a/arch/vax/kernel/cpu_ka43.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka43.c 2005-04-27 00:37:50 @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains generic machine vector handlers for the + * KA43 CPU in the RIGEL-based VAXstation 3100 + * + * 2000/04/01 Mattias Nordlund + * Fixed the cache initializing, added the functions + * ka43_cache_disbale/enable/clear and moved some stuff around. + * atp jun 2001 - machine check implementation + * atp Jul 2001 - diagmem remap functions + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include /* For clock_init routines */ +#include + +/* Internal CPU register space */ +static volatile struct ka43_cpu_regs __iomem *cpu_regs; + +/* + * We keep the cache page remaps handy incase we want to reset the cache + * - see the machine check etc.. + * - perhaps we should bung this in the mv too. + * + * atp jun 01 + */ +static volatile unsigned int __iomem *ka43_ctag_addr; +static volatile unsigned int __iomem *ka43_creg_addr; + +#define MC43_MAX 19 + +static char *ka43_mctype[MC43_MAX + 1] = { + "no error (0)", /* Code 0: No error */ + "FPA: protocol error", /* Code 1-5: FPA errors */ + "FPA: illegal opcode", + "FPA: operand parity error", + "FPA: unknown status", + "FPA: result parity error", + "unused (6)", /* Code 6-7: Unused */ + "unused (7)", + "MMU error (TLB miss)", /* Code 8-9: MMU errors */ + "MMU error (TLB hit)", + "HW interrupt at unused IPL", /* Code 10: Interrupt error */ + "MOVCx impossible state", /* Code 11-13: Microcode errors */ + "undefined trap code (i-box)", + "undefined control store address", + "unused (14)", /* Code 14-15: Unused */ + "unused (15)", + "PC tag or data parity error", /* Code 16: Cache error */ + "data bus parity error", /* Code 17: Read error */ + "data bus error (NXM)", /* Code 18: Write error */ + "undefined data bus state", /* Code 19: Bus error */ +}; + +static void ka43_cache_disable(volatile unsigned int *creg_addr) +{ + __mtpr(KA43_PCS_REFRESH, PR_PCSTS); /* Disable primary cache */ + __mtpr(__mfpr(PR_PCSTS), PR_PCSTS); /* Clear error flags */ + + /* Disable secondary cache */ + *creg_addr = *creg_addr & ~KA43_SESR_CENB; + + /* Clear error flags */ + *creg_addr = KA43_SESR_SERR | KA43_SESR_LERR | KA43_SESR_CERR; +} + +static void ka43_cache_clear(volatile unsigned int *ctag_addr) +{ + int i; + + for (i = 0; i < 256; i++) { + __mtpr(i * 8, PR_PCIDX); + __mtpr(KA43_PCTAG_PARITY, PR_PCTAG); + } + + __mtpr(KA43_PCS_FLUSH | KA43_PCS_REFRESH, PR_PCSTS); + + for (i = 0; i < KA43_CT2_SIZE / sizeof(*ctag_addr); i++) + ctag_addr[i] = 0xff; +} + +static void ka43_cache_enable(volatile unsigned int *creg_addr) +{ + volatile char *membase = (void *) 0x80000000; /* Physical 0x00000000 */ + int i, val; + + /* Enable primary cache */ + __mtpr(KA43_PCS_FLUSH | KA43_PCS_REFRESH, PR_PCSTS); /* Flush */ + + /* Enable secondary cache */ + *creg_addr = KA43_SESR_CENB; + for (i=0; i < 128 * 1024; i++) + val += membase[i]; + + __mtpr(KA43_PCS_ENABLE | KA43_PCS_REFRESH, PR_PCSTS); /* Enable */ +} + +static void ka43_cache_reset(void) +{ + /* + * Resetting the cache involves disabling it, then clear + * it and enable again. + */ + ka43_cache_disable(ka43_creg_addr); + ka43_cache_clear(ka43_ctag_addr); + ka43_cache_enable(ka43_creg_addr); +} + +/* + * Don't call ka43_cache_reset before this function (unlikely). + */ +static void ka43_post_vm_init(void) +{ +#ifdef CONFIG_DZ + init_dz11_console(0x200A0000, 3); + dz_serial_console_init(); +#endif + cpu_regs = ioremap(KA43_CPU_BASE, KA43_CPU_SIZE); + ka43_creg_addr = ioremap(KA43_CH2_CREG, 1); + ka43_ctag_addr = ioremap(KA43_CT2_BASE, KA43_CT2_SIZE); + + /* + * Disable parity on DMA and CPU memory accesses. Don't know what the + * story is with this, but VMS seems do this, too... + */ + cpu_regs->parctl = 0; + + /* + * Resetting the cache involves disabling it, then clear it and + * enable again. + */ + ka43_cache_reset(); +} + + +static const char *ka43_cpu_type_str(void) +{ + return "KA43"; +} + +/* + * If this seems very similar to the NetBSD implementation, then + * it is. After all how many ways can you check a sequence of flags? + */ +static void ka43_mcheck(void *stkframe) +{ + /* Map the frame to the stack */ + struct ka43_mcframe *ka43frame = (struct ka43_mcframe *)stkframe; + + /* Tell us all about it */ + printk("KA43: machine check code %d (= 0x%x)\n", ka43frame->mc43_code, + ka43frame->mc43_code); + printk("KA43: reason: %s\n", ka43_mctype[ka43frame->mc43_code & 0xff]); + printk("KA43: at addr %x, pc %x, psl %x\n", ka43frame->mc43_addr, + ka43frame->mc43_pc, ka43frame->mc43_psl); + + /* FIXME Check restart and first part done flags */ + if ((ka43frame->mc43_code & KA43_MC_RESTART) || + (ka43frame->mc43_psl & KA43_PSL_FPDONE)) { + printk("KA43: recovering from machine-check.\n"); + ka43_cache_reset(); + return; + } + + /* Unknown error state, panic/halt the machine */ + printk("KA43: Machine Check - unknown error state - halting\n"); + printk("\nStack dump\n"); + hex_dump((void *)(&stkframe), 256); + dump_cur_regs(1); + show_cpu_regs(); + machine_halt(); +} + +/* + * Slap the KA43_DIAGMEM bit on an area of S0 memory - used by drivers. + * size is the size of the region in bytes. + */ +void ka43_diagmem_remap(unsigned long int address, unsigned long int size) +{ + int i; + pte_t *p = GET_SPTE_VIRT(address); + + /* + * The KA43 seems to be nicely fscked up... All physical memory + * is accessible from 0x00000000 up (as normal) and also from + * 0x28000000 (KA43_DIAGMEM) in IO space. In order to reliably + * share memory with the LANCE, we _must_ read and write to this + * shared memory via the DIAGMEM region. Maybe this bypasses + * caches or something... If you don't do this you get evil + * "memory read parity error" machine checks. + */ + + /* + * You MUST remember to clear the DIAGMEM bits in these PTEs + * before giving the pages back to free_pages(). + */ + + printk(KERN_DEBUG "KA43: enabling KA43_DIAGMEM for memory from " + "0x%8lx to 0x%8lx\n", address, address + size); + for (i = 0; i < (size >> PAGE_SHIFT); i++, p++) { + set_pte(p, __pte(pte_val(*p) | (KA43_DIAGMEM >> PAGELET_SHIFT))); + __flush_tlb_one(address + i * PAGE_SIZE); + } +} + +struct vax_mv mv_ka43 = { + .post_vm_init = ka43_post_vm_init, + .pre_vm_putchar = ka4x_prom_putchar, + .pre_vm_getchar = ka4x_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .mcheck = ka43_mcheck, + .cpu_type_str = ka43_cpu_type_str, + .clock_init = ka4x_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka43 = { + .mv = &mv_ka43, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_RIGEL << VAX_SID_FAMILY_SHIFT, + .sidex_addr = RIGEL_SIDEX_ADDR, + .sidex_mask = 0x00000000, /* Don't care */ + .sidex_match = 0x00000000, +}; + +static struct platform_device ka43_vsbus_device = { + .name = "ka4x-vsbus" +}; + +static struct platform_device ka43_diag_led_device = { + .name = "diag_led" +}; + +static int __init ka43_platform_device_init(void) +{ + int retval; + + if (!is_ka43()) + return -ENODEV; + + platform_device_register(&ka43_diag_led_device); + + retval = platform_device_register(&ka43_vsbus_device); + if (!retval) { +#ifdef CONFIG_VSBUS + vsbus_add_fixed_device(&ka43_vsbus_device.dev, "lance", 0x200e0000, 5); + vsbus_add_fixed_device(&ka43_vsbus_device.dev, "dz", 0x200a0000, 6); + + /* Register internal SCSI bus */ + vsbus_add_fixed_device(&ka43_vsbus_device.dev, "vax-5380-int", 0x200c0080, 1); + + /* Register external SCSI bus */ + vsbus_add_fixed_device(&ka43_vsbus_device.dev, "vax-5380-ext", 0x200c0180, 0); +#endif + } + + return retval; +} + +arch_initcall(ka43_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka46.c b/arch/vax/kernel/cpu_ka46.c --- a/arch/vax/kernel/cpu_ka46.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka46.c 2005-04-26 00:25:05 @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains generic machine vector handlers for the + * KA46 CPU in the MARIAH-based VAXstation 4000/60 + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include /* for clock_init routines */ +#include + +unsigned long int *ka46_dmamap; + +static void ka46_post_vm_init(void) +{ +#ifdef CONFIG_DZ + init_dz11_console(0x200A0000, 3); + dz_serial_console_init(); +#endif +} + +static const char *ka46_cpu_type_str(void) +{ + return "KA46"; +} + +static void ka46_cache_disable(void) +{ + *(int *)KA46_CCR &= ~KA46_CCR_SPECIO; /* Secondary */ + __mtpr(PCSTS_FLUSH, PR_PCSTS); /* Primary */ + *(int *)KA46_BWF0 &= ~KA46_BWF0_FEN; /* Invalidate filter */ +} + +static void ka46_cache_clear(void) +{ + int *tmp, i; + + /* Clear caches */ + tmp = (void *)KA46_INVFLT; /* Inv filter */ + for (i = 0; i < 32768; i++) + tmp[i] = 0; + + /* Write valid parity to all primary cache entries */ + for (i = 0; i < 256; i++) { + __mtpr(i << 3, PR_PCIDX); + __mtpr(PCTAG_PARITY, PR_PCTAG); + } + + /* Secondary cache */ + tmp = (void *)KA46_TAGST; + for (i = 0; i < KA46_TAGSZ * 2; i += 2) + tmp[i] = 0; +} + +static void ka46_cache_enable(void) +{ + /* Enable cache */ + *(int *)KA46_BWF0 |= KA46_BWF0_FEN; /* Invalidate filter */ + __mtpr(PCSTS_ENABLE, PR_PCSTS); + *(int *)KA46_CCR = KA46_CCR_SPECIO | KA46_CCR_CENA; +} + +static void ka46_pre_vm_init(void) +{ + /* Resetting the cache. */ + ka46_cache_disable(); + ka46_cache_clear(); + ka46_cache_enable(); + + __mtpr(PR_ACCS, 2); /* Enable floating points */ +} + +static void ka46_dma_init(void) +{ + int i; + unsigned int __iomem *base_addr; + + /* + * At present we just map all of the GFP_DMA region + * this is obviously wasteful + */ + + /* Grab a block of 128kb */ + ka46_dmamap = (unsigned long int *)__get_free_pages(GFP_DMA, 5); + if (ka46_dmamap == NULL) { + printk(KERN_ERR "KA46 DMA unable to allocate map\n"); + return; + } + + /* + * Map all 16MB of I/O space to low 16MB of + * memory (the GFP_DMA region) + */ + base_addr = ioremap(KA46_DMAMAP, 0x4); + *base_addr = (unsigned int)ka46_dmamap; + for (i = 0; i < 0x8000; i++) + ka46_dmamap[i] = 0x80000000 | i; + + iounmap(base_addr); + + return; +} + +static void ka46_init_devices(void) +{ + printk("ka46: init_devices\n"); + + /* Initialise the DMA area */ + ka46_dma_init(); +} + +struct vax_mv mv_ka46 = { + .pre_vm_init = ka46_pre_vm_init, + .post_vm_init = ka46_post_vm_init, + .pre_vm_putchar = ka46_48_49_prom_putchar, + .pre_vm_getchar = ka46_48_49_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .init_devices = ka46_init_devices, + .cpu_type_str = ka46_cpu_type_str, + .clock_init = ka4x_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka46 = { + .mv = &mv_ka46, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_MARIAH << VAX_SID_FAMILY_SHIFT, + .sidex_addr = MARIAH_SIDEX_ADDR, + .sidex_mask = 0x00000000, + .sidex_match = 0x00000000, +}; + +static struct platform_device ka46_vsbus_device = { + .name = "ka4x-vsbus" +}; + +static struct platform_device ka46_diag_led_device = { + .name = "diag_led" +}; + +static int __init ka46_platform_device_init(void) +{ + int retval; + + if (!is_ka46()) + return -ENODEV; + + platform_device_register(&ka46_diag_led_device); + + retval = platform_device_register(&ka46_vsbus_device); + if (!retval) { +#ifdef CONFIG_VSBUS + vsbus_add_fixed_device(&ka46_vsbus_device.dev, "lance", 0x200e0000, 1); + vsbus_add_fixed_device(&ka46_vsbus_device.dev, "dz", 0x200a0000, 4); +#endif + } + + return retval; +} + +arch_initcall(ka46_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka48.c b/arch/vax/kernel/cpu_ka48.c --- a/arch/vax/kernel/cpu_ka48.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka48.c 2005-04-26 00:25:05 @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2004 B�rczi G�bor (Gabucino) + * Based on cpu_ka46.c Copyright (C) 2000 Kenn Humborg + * + * This file contains generic machine vector handlers for the + * KA48 CPU in the VAXstation 4000/VLC + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include /* For clock_init routines */ +#include + +static void ka48_post_vm_init(void) +{ +#ifdef CONFIG_DZ + init_dz11_console(0x200A0000, 3); + dz_serial_console_init(); +#endif +} + +static const char *ka48_cpu_type_str(void) +{ + return "KA48"; +} + +static void ka48_cache_disable(void) +{ + long *par_ctl = (long *)KA48_PARCTL; + + __mtpr(0, PR_CADR); /* Disable */ + *par_ctl &= ~KA48_PARCTL_INVENA; /* Clear? Invalid enable */ + __mtpr(2, PR_CADR); /* Flush */ +} + +static void ka48_cache_clear(void) +{ + int *tmp, i; + + /* Clear caches */ + tmp = (void *)KA48_INVFLT; /* Inv filter */ + for (i = 0; i < KA48_INVFLTSZ / sizeof(int); i++) + tmp[i] = 0; +} + +static void ka48_cache_enable(void) +{ + /* Enable cache */ + long *par_ctl = (long *)KA48_PARCTL; + + *par_ctl |= KA48_PARCTL_INVENA; /* Enable ???? */ + __mtpr(4, PR_CADR); /* enable cache */ + *par_ctl |= (KA48_PARCTL_AGS | /* AGS? */ + KA48_PARCTL_NPEN | /* N? Parity Enable */ + KA48_PARCTL_CPEN); /* Cpu parity enable */ +} + +static void ka48_pre_vm_init(void) +{ + /* + * Resetting the cache involves disabling it, then clear it and enable + * again. + */ + ka48_cache_disable(); + ka48_cache_clear(); + ka48_cache_enable(); + __mtpr(PR_ACCS, 2); /* Enable floating points */ +} + +static void ka48_dma_init(void) +{ + int i; + unsigned int __iomem *base_addr; + unsigned long int *ka48_dmamap; + + /* + * At present we just map all of the GFP_DMA region + * this is obviously wasteful... + */ + + /* Grab a block of 128kb */ + ka48_dmamap = (unsigned long int *)__get_free_pages(GFP_DMA, 5); + if (ka48_dmamap == NULL) { + printk(KERN_ERR "KA48 DMA unable to allocate map\n"); + return; + } + + /* + * Map all 16MB of I/O space to low 16MB of memory (the GFP_DMA + * region) + */ + base_addr = ioremap(KA48_DMAMAP, 0x4); + *base_addr = (unsigned int)ka48_dmamap; + for (i = 0; i < 0x8000; i++) + ka48_dmamap[i] = 0x80000000 | i; + iounmap(base_addr); + + return; +} + +static void ka48_init_devices(void) +{ + printk("ka48: init_devices\n"); + + /* Initialise the DMA area */ + ka48_dma_init(); +} + +struct vax_mv mv_ka48 = { + .pre_vm_init = ka48_pre_vm_init, + .post_vm_init = ka48_post_vm_init, + .pre_vm_putchar = ka46_48_49_prom_putchar, + .pre_vm_getchar = ka46_48_49_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .init_devices = ka48_init_devices, + .cpu_type_str = ka48_cpu_type_str, + .clock_init = ka4x_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka48 = { + .mv = &mv_ka48, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_SOC << VAX_SID_FAMILY_SHIFT, + .sidex_addr = SOC_SIDEX_ADDR, + .sidex_mask = SOC_SIDEX_TYPE_MASK, + .sidex_match = SOC_SIDEX_TYPE_KA48 << SOC_SIDEX_TYPE_SHIFT, +}; + +static struct platform_device ka48_vsbus_device = { + .name = "ka4x-vsbus" +}; + +static int __init ka48_platform_device_init(void) +{ + int retval; + + if (!is_ka48()) + return -ENODEV; + + retval = platform_device_register(&ka48_vsbus_device); + if (!retval) { + vsbus_add_fixed_device(&ka48_vsbus_device.dev, "lance", 0x200e0000, 1); + } + + return retval; +} + +arch_initcall(ka48_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka49.c b/arch/vax/kernel/cpu_ka49.c --- a/arch/vax/kernel/cpu_ka49.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka49.c 2005-04-27 08:51:53 @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2004 by Jan-Benedict Glaw + * + * This file contains a machine vector for the KA49 CPU. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include /* for clock_init routines */ +#include + +static void ka49_post_vm_init(void) +{ + int start, slut; + +#ifdef CONFIG_DZ + init_dz11_console (0x25000000, 3); + dz_serial_console_init (); +#endif + /* + * Enable Caches + */ +#define PR_CCTL 0x0a +#define CCTL_ENABLE 0x00000001 +#define CCTL_SSIZE 0x00000002 +#define CCTL_VSIZE 0x00000004 +#define CCTL_SW_ETM 0x40000000 +#define CCTL_HW_ETM 0x80000000 + +#define PR_BCETSTS 0xa3 +#define PR_BCEDSTS 0xa6 +#define PR_NESTS 0xae + +#define PR_VMAR 0xd0 +#define PR_VTAG 0xd1 +#define PR_ICSR 0xd3 +#define ICSR_ENABLE 0x01 + +#define PR_PCCTL 0xf8 +#define PCCTL_P_EN 0x10 /* Primary Cache Enable */ +#define PCCTL_I_EN 0x02 /* Instruction Cache Enable */ +#define PCCTL_D_EN 0x01 /* Data Cache Enable */ + + /* + * Caches off + */ + __mtpr (0, PR_ICSR); + __mtpr (0, PR_PCCTL); + __mtpr (__mfpr (PR_CCTL) | CCTL_SW_ETM, PR_CCTL); + + /* + * Invalidate Caches + */ + __mtpr (__mfpr (PR_CCTL) | 0x10, PR_CCTL); /* Set Cache Size */ + __mtpr (__mfpr (PR_BCETSTS), PR_BCETSTS); /* Clear Error Bits */ + __mtpr (__mfpr (PR_BCEDSTS), PR_BCEDSTS); /* Clear Error Bits */ + __mtpr (__mfpr (PR_NESTS), PR_NESTS); /* Clear Error Bits */ + + /* + * Flush Cache Lines + */ + start = 0x01400000; + slut = 0x01440000; + for (; start < slut; start += 0x20) + __mtpr (0, start); + __mtpr ((__mfpr (PR_CCTL) & ~(CCTL_SW_ETM | CCTL_ENABLE)) | CCTL_HW_ETM, PR_CCTL); + + /* + * Clear Tag and Valid + */ + start = 0x01000000; + slut = 0x01040000; + for (; start < slut; start += 0x20) + __mtpr (0, start); + __mtpr (__mfpr (PR_CCTL) | 0x10 | CCTL_ENABLE, PR_CCTL); /* Enable BCache */ + + /* + * Clear Primary Cache (2nd level, 8KB, on-CPU) + */ + start = 0x01800000; + slut = 0x01802000; + for (; start < slut; start += 0x20) + __mtpr (0, start); + + /* + * Flush Instruction Cache + */ + flush_icache (); + + /* + * Enable Primary Cache + */ + __mtpr (PCCTL_P_EN | PCCTL_I_EN | PCCTL_D_EN, PR_PCCTL); + + /* + * Enable Virtual Instruction Cache (1st level, 2KB, on-CPU) + */ + start = 0x00000000; + slut = 0x00000800; + for (; start < slut; start += 0x20) { + __mtpr (start, PR_VMAR); + __mtpr (0, PR_VTAG); + } + __mtpr (ICSR_ENABLE, PR_ICSR); + + return; +} + +static const char *ka49_cpu_type_str(void) +{ + return "KA49"; +} + +static void ka49_init_devices(void) +{ + printk ("ka49: init_devices\n"); +} + +struct vax_mv mv_ka49 = { + .post_vm_init = ka49_post_vm_init, + .pre_vm_putchar = ka46_48_49_prom_putchar, + .pre_vm_getchar = ka46_48_49_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .init_devices = ka49_init_devices, + .cpu_type_str = ka49_cpu_type_str, + .clock_init = ka4x_clock_init, + .nicr_required = 1, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka49 = { + .mv = &mv_ka49, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_NVAX << VAX_SID_FAMILY_SHIFT, + .sidex_addr = NVAX_SIDEX_ADDR, + .sidex_mask = 0xffffffff, /* Don't yet know how to interpret + SID + SIDEX, so keep it tight */ + .sidex_match = 0x04010002, +}; + +static struct platform_device ka49_vsbus_device = { + .name = "ka4x-vsbus" +}; + +static struct platform_device ka49_diag_led_device = { + .name = "diag_led" +}; + +static int __init ka49_platform_device_init(void) +{ + int retval; + + if (!is_ka49()) + return -ENODEV; + + platform_device_register (&ka49_diag_led_device); + + retval = platform_device_register (&ka49_vsbus_device); + if (!retval) { +#ifdef CONFIG_VSBUS + vsbus_add_fixed_device(&ka49_vsbus_device.dev, "sgec", 0x20008000, 1); + vsbus_add_fixed_device(&ka49_vsbus_device.dev, "dz", 0x25000000, 4); +#endif + } + + return retval; +} + +arch_initcall (ka49_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka52.c b/arch/vax/kernel/cpu_ka52.c --- a/arch/vax/kernel/cpu_ka52.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka52.c 2005-10-03 15:43:24 @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2005 by Jan-Benedict Glaw + * + * This file contains generic machine vector handler for the + * KA52 CPU (used in VAXstations 4000 Model 100A) + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include /* for clock_init routines */ +#include + +/* From bootcons/ */ +extern void ka52_prevm_putchar (unsigned char c); +extern void ka52_console_init (unsigned long address); +extern void ka52_postvm_putchar (unsigned char c); + + + +static void ka52_post_vm_init(void) +{ +#if 0 + //ka52_console_init (0x20140080); + ka52_console_init (0x25000000); +#endif +#ifdef CONFIG_DZ + init_dz11_console(0x25000000, 3); + dz_serial_console_init(); +#endif +} + +static const char *ka52_cpu_type_str(void) +{ + return "KA52"; +} + +static void ka52_pre_vm_init(void) +{ + //__mtpr(PR_ACCS, 2); /* Enable floating points */ +} + +static void +ka52_mcheck (void *stkframe) +{ + return; +} + +struct vax_mv mv_ka52 = { + .pre_vm_init = ka52_pre_vm_init, + .post_vm_init = ka52_post_vm_init, + .pre_vm_putchar = ka52_prevm_putchar, + .pre_vm_getchar = ka46_48_49_prom_getchar, + .post_vm_putchar = ka46_48_49_prom_putchar /*ka52_postvm_putchar*/, + .post_vm_getchar = dz11_getchar, + .cpu_type_str = ka52_cpu_type_str, + //.clock_init = ka4x_clock_init, + .mcheck = ka52_mcheck, + .nicr_required = 1, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka52 = { + .mv = &mv_ka52, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_NVAX << VAX_SID_FAMILY_SHIFT, + .sidex_addr = NVAX_SIDEX_ADDR, + .sidex_mask = 0x00000000, + .sidex_match = 0x00000000, +}; + +static struct platform_device ka52_vsbus_device = { + .name = "ka4x-vsbus" +}; + +static struct platform_device ka52_diag_led_device = { + .name = "diag_led" +}; + +static int __init ka52_platform_device_init(void) +{ + int retval; + + if (!is_ka52 ()) + return -ENODEV; + + platform_device_register (&ka52_diag_led_device); + + retval = platform_device_register (&ka52_vsbus_device); + if (!retval) { +#ifdef CONFIG_VSBUS + vsbus_add_fixed_device (&ka52_vsbus_device.dev, "lance", 0x200e0000, 1); + vsbus_add_fixed_device (&ka52_vsbus_device.dev, "dz", 0x200a0000, 4); +#endif + } + + return retval; +} + +arch_initcall (ka52_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka55.c b/arch/vax/kernel/cpu_ka55.c --- a/arch/vax/kernel/cpu_ka55.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka55.c 2005-04-26 00:25:05 @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains generic machine vector handlers for the + * KA5 CPU in the NVAX-based MicroVAX 3100 Model 85 + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include +#include /* For clock_init routines */ + +static void ka55_prom_putchar(int c) +{ + asm ( + " movl $0x2014044b, %%r11 # console page addr \n" + "1: jsb *0x20(%%r11) # ready to TX? \n" + " blbc %%r0, 1b \n" + " movl %0, %%r1 \n" + " jsb *0x24(%%r11) # TX char in R11 \n" + : /* no outputs */ + : "g"(c) + : "r0", "r1", "r11"); +} + +static int ka55_prom_getchar(void) +{ + /* Not yet implemented */ + asm("halt"); + return 0; +} + +static void ka55_post_vm_init(void) +{ +#ifdef CONFIG_DZ + init_dz11_console(0x25000000, 3); + dz_serial_console_init(); +#endif +} + +static const char *ka55_cpu_type_str(void) +{ + return "KA55"; +} + +static void ka55_init_devices(void) +{ +} + +struct vax_mv mv_ka55 = { + .post_vm_init = ka55_post_vm_init, + .pre_vm_putchar = ka55_prom_putchar, + .pre_vm_getchar = ka55_prom_getchar, + .post_vm_putchar = dz11_putchar, + .post_vm_getchar = dz11_getchar, + .init_devices = ka55_init_devices, + .cpu_type_str = ka55_cpu_type_str, + .clock_init = generic_clock_init, +}; + +#warning "KA55 needs a struct cpumatch" + +#warning "KA55 needs a platform_device_init function" + diff -Nru a/arch/vax/kernel/cpu_ka62.c b/arch/vax/kernel/cpu_ka62.c --- a/arch/vax/kernel/cpu_ka62.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka62.c 2005-10-03 15:43:24 @@ -0,0 +1,32 @@ +/* + * Experimental CPU vector for my 6000/320 + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include /* For clock_init routines */ + +static const char *ka62_cpu_type_str(void) +{ + return "KA62"; +} + +struct vax_mv mv_ka62 = { + .pre_vm_putchar = mtpr_putchar, + .pre_vm_getchar = mtpr_getchar, + .post_vm_putchar = mtpr_putchar, + .post_vm_getchar = mtpr_getchar, + .cpu_type_str = ka62_cpu_type_str, + .clock_init = generic_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka62 = { + .mv = &mv_ka62, + .sid_mask = 0xffffffff, + .sid_match = 0x0a000005, + .sidex_addr = 0, +}; + diff -Nru a/arch/vax/kernel/cpu_ka630.c b/arch/vax/kernel/cpu_ka630.c --- a/arch/vax/kernel/cpu_ka630.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka630.c 2005-04-26 00:25:05 @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains machine vector handlers for the + * KA630 CPU in the MicroVAX II machines + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include /* For clock_init routines */ + +static const char *ka630_cpu_type_str(void) +{ + return "KA630"; +} + +struct vax_mv mv_ka630 = { + .pre_vm_putchar = mtpr_putchar, + .pre_vm_getchar = mtpr_getchar, + .post_vm_putchar = mtpr_putchar, + .post_vm_getchar = mtpr_getchar, + .cpu_type_str = ka630_cpu_type_str, + .clock_init = generic_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka630 = { + .mv = &mv_ka630, + .sid_mask = VAX_SID_FAMILY_MASK | UVAX2_SID_SUBTYPE_MASK, + .sid_match = (VAX_UVAX2 << VAX_SID_FAMILY_SHIFT) | + (UVAX2_SID_SUBTYPE_KA630 << UVAX2_SID_SUBTYPE_SHIFT), + .sidex_addr = 0, + .sidex_mask = 0x00000000, + .sidex_match = 0x00000000, +}; + +static struct cpu_match __CPU_MATCH cpumatch_charon = { + .mv = &mv_ka630, + .sid_mask = VAX_SID_FAMILY_MASK | UVAX2_SID_SUBTYPE_MASK, + .sid_match = (VAX_UVAX2 << VAX_SID_FAMILY_SHIFT) | + (UVAX2_SID_SUBTYPE_KA630 << UVAX2_SID_SUBTYPE_SHIFT), + .sidex_addr = 0, + .sidex_mask = 0x00000000, + .sidex_match = 0x00000000, +}; + diff -Nru a/arch/vax/kernel/cpu_ka640.c b/arch/vax/kernel/cpu_ka640.c --- a/arch/vax/kernel/cpu_ka640.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka640.c 2005-04-26 00:25:05 @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2000 Mattias Nordlund + * + * This file contains machine vector handlers for the + * KA640 CPU in the MicroVAX 3400 series machines + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include /* For clock_init routines */ + +static unsigned int *ka640_cacr = (unsigned int *)0x20084000; + +static void ka640_pre_vm_init(void) +{ + __mtpr(0, PR_CADR); +} + +static void ka640_post_vm_init(void) +{ +#define KA640_CADR_S2E 0x80 /* Enable set 2 of level 1 cache */ +#define KA640_CADR_S1E 0x40 /* Enable set 1 of level 1 cache */ +#define KA640_CADR_ISE 0x20 /* Enable instruction caching in level 1 cache */ +#define KA640_CADR_DSE 0x10 /* Enable data caching in level 1 cache */ + + /* + * Writing to PR_CADR on the CVAX chip implicitly clears + * the level 1 cache + */ + __mtpr(KA640_CADR_S2E|KA640_CADR_S1E|KA640_CADR_ISE|KA640_CADR_DSE, PR_CADR); +} + +static const char *ka640_cpu_type_str(void) +{ + return "KA640"; +} + +struct vax_mv mv_ka640 = { + .pre_vm_init = ka640_pre_vm_init, + .post_vm_init = ka640_post_vm_init, + .pre_vm_putchar = mtpr_putchar, + .pre_vm_getchar = mtpr_getchar, + .post_vm_putchar = mtpr_putchar, + .post_vm_getchar = mtpr_getchar, + .cpu_type_str = ka640_cpu_type_str, + .clock_init = generic_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka640 = { + .mv = &mv_ka640, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_CVAX << VAX_SID_FAMILY_SHIFT, + + .sidex_addr = CVAX_SIDEX_ADDR, + + .sidex_mask = CVAX_SIDEX_TYPE_MASK | CVAX_Q22_SUBTYPE_MASK, + .sidex_match = (CVAX_SIDEX_TYPE_Q22 << CVAX_SIDEX_TYPE_SHIFT) | + (CVAX_Q22_SUBTYPE_KA640 << CVAX_Q22_SUBTYPE_SHIFT), +}; + diff -Nru a/arch/vax/kernel/cpu_ka650.c b/arch/vax/kernel/cpu_ka650.c --- a/arch/vax/kernel/cpu_ka650.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka650.c 2005-04-26 00:25:05 @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains machine vector handlers for the + * KA650 CPU in the MicroVAX III series machines + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include + +#include +#include +#include +#include /* For clock_init routines */ + +static unsigned int *ka650_cacr = (unsigned int *) 0x20084000; + +static void ka650_pre_vm_init(void) +{ + /* + * Disable the level 1 and level 2 caches. My docs say that the + * caches are disabled automatically at power up and when DCOK + * is negated when the processor is halted. The firmware BOOT + * command might also do this, but I can't find any docs to + * prove this. + */ + __mtpr(0, PR_CADR); + + *ka650_cacr = 0; + + /* + * We need to clear out the second level cache at some point. + * On the KA650, you do this by writing directly to the cache + * diagnostic space at 0x10000000 (physical). The cache enable + * bit is also set here, but the cache won't actually start + * caching until the level 1 cache is enabled in post_vm_init() + */ +#define KA650_CACR_CPE 0x20 /* Level 2 cache parity error (write to clear) */ +#define KA650_CACR_CEN 0x10 /* Level 2 cache enable */ +#define KA650_L2CACHE_DIAG_ADDR 0x10000000 +#define KA650_L2CACHE_DIAG_SIZE 0x00010000 + memset((void *)KA650_L2CACHE_DIAG_ADDR, 0, KA650_L2CACHE_DIAG_SIZE); + *ka650_cacr = KA650_CACR_CPE | KA650_CACR_CEN; +} + +static void ka650_post_vm_init(void) +{ +#define KA650_CADR_S2E 0x80 /* Enable set 2 of level 1 cache */ +#define KA650_CADR_S1E 0x40 /* Enable set 1 of level 1 cache */ +#define KA650_CADR_ISE 0x20 /* Enable instruction caching in level 1 cache */ +#define KA650_CADR_DSE 0x10 /* Enable data caching in level 1 cache */ + + /* + * Writing to PR_CADR on the CVAX chip implicitly clears + * the level 1 cache. + */ + __mtpr(KA650_CADR_S2E|KA650_CADR_S1E|KA650_CADR_ISE|KA650_CADR_DSE, PR_CADR); +} + +static const char *ka650_cpu_type_str(void) +{ + return "KA650"; +} + +struct vax_mv mv_ka650 = { + .pre_vm_init = ka650_pre_vm_init, + .post_vm_init = ka650_post_vm_init, + .pre_vm_putchar = mtpr_putchar, + .pre_vm_getchar = mtpr_getchar, + .post_vm_putchar = mtpr_putchar, + .post_vm_getchar = mtpr_getchar, + .cpu_type_str = ka650_cpu_type_str, + .clock_init = generic_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka650 = { + .mv = &mv_ka650, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_CVAX << VAX_SID_FAMILY_SHIFT, + + .sidex_addr = CVAX_SIDEX_ADDR, + + .sidex_mask = CVAX_SIDEX_TYPE_MASK | CVAX_Q22_SUBTYPE_MASK, + .sidex_match = (CVAX_SIDEX_TYPE_Q22 << CVAX_SIDEX_TYPE_SHIFT) | + (CVAX_Q22_SUBTYPE_KA650 << CVAX_Q22_SUBTYPE_SHIFT), +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka655 = { + .mv = &mv_ka650, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_CVAX << VAX_SID_FAMILY_SHIFT, + + .sidex_addr = CVAX_SIDEX_ADDR, + + .sidex_mask = CVAX_SIDEX_TYPE_MASK | CVAX_Q22_SUBTYPE_MASK, + .sidex_match = (CVAX_SIDEX_TYPE_Q22 << CVAX_SIDEX_TYPE_SHIFT) | + (CVAX_Q22_SUBTYPE_KA655 << CVAX_Q22_SUBTYPE_SHIFT), +}; + +static struct platform_device ka650_cqbic_device = { + .name = "cqbic" +}; + +static struct platform_device ka650_iprcons_device = { + .name = "iprcons" +}; + +static int __init ka650_platform_device_init(void) +{ + if (!is_ka650()) + return -ENODEV; + + platform_device_register(&ka650_cqbic_device); + platform_device_register(&ka650_iprcons_device); + + return 0; +} + +arch_initcall(ka650_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_ka660.c b/arch/vax/kernel/cpu_ka660.c --- a/arch/vax/kernel/cpu_ka660.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_ka660.c 2005-10-22 18:31:04 @@ -0,0 +1,69 @@ +/* + * This file contains machine vector handlers for the + * KA660 CPU in the VAXserver 4000-200 machines. + * + * For the VAXserver machines I have, the SID is 14000006 and + * the sidex is 01370502. The sidex seems to have a simlar + * breakdown that a CVAX with a Q22 bus has. Bootstrap indicates + * a firmware rev 3.7 supporting this assumption. If anyone knows + * differently, let me know. + * + */ + +#include /* For NULL */ +#include /* For printk */ +#include +#include + +#include +#include +#include +#include /* For clock_init routines */ + +static const char *ka660_cpu_type_str(void) +{ + return "KA660"; +} + +struct vax_mv mv_ka660 = { + .pre_vm_putchar = mtpr_putchar, + .pre_vm_getchar = mtpr_getchar, + .post_vm_putchar = mtpr_putchar, + .post_vm_getchar = mtpr_getchar, + .cpu_type_str = ka660_cpu_type_str, + .clock_init = generic_clock_init, +}; + +static struct cpu_match __CPU_MATCH cpumatch_ka660 = { + .mv = &mv_ka660, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_SOC << VAX_SID_FAMILY_SHIFT, + + .sidex_addr = SOC_SIDEX_ADDR, + + .sidex_mask = SOC_SIDEX_TYPE_MASK | SOC_Q22_SUBTYPE_MASK, + .sidex_match = (SOC_SIDEX_TYPE_Q22 << SOC_SIDEX_TYPE_SHIFT) | + (SOC_Q22_SUBTYPE_KA660 << SOC_Q22_SUBTYPE_SHIFT), +}; + +static struct platform_device ka660_cqbic_device = { + .name = "cqbic" +}; + +static struct platform_device ka660_iprcons_device = { + .name = "iprcons" +}; + +static int __init ka660_platform_device_init(void) +{ + if (!is_ka660()) + return -ENODEV; + + platform_device_register(&ka660_cqbic_device); + platform_device_register(&ka660_iprcons_device); + + return 0; +} + +arch_initcall(ka660_platform_device_init); + diff -Nru a/arch/vax/kernel/cpu_vxt.c b/arch/vax/kernel/cpu_vxt.c --- a/arch/vax/kernel/cpu_vxt.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/cpu_vxt.c 2005-04-25 12:39:55 @@ -0,0 +1,78 @@ +/* + * This file contains machine vector handlers for the + * VXT CPU in the VXT2000 machines. + * + * From mailing list messages the SID is 14000006 but NetBSD uses 14000008. + * + * This may work for other ka48 based systems, + * + * may 2002. It looks as if the 20040058 address is right for prom output. + */ + +#warning recent VXT work in 2.4 needs to be pulled over + +#include /* For NULL */ +#include /* For printk */ +#include +#include +#include +#include +#include +#include /* For clock_init routines */ + +static void vxt_pre_vm_init(void) +{ +} + +static void vxt_post_vm_init(void) +{ + init_vxt2694_console (0x200a0000); +} + +static const char *vxt_cpu_type_str(void) +{ + if (mv->sidex == 0x08050002 /* FIXME */) + return "VXT2000+"; + else + return "probably VXT2000"; +} + +struct vax_mv mv_vxt = { + .pre_vm_init = vxt_pre_vm_init, + .post_vm_init = vxt_post_vm_init, + .pre_vm_putchar = ka4x_prom_putchar, + .pre_vm_getchar = ka4x_prom_getchar, + .post_vm_putchar = vxt2694_putchar, + .post_vm_getchar = vxt2694_getchar, + .cpu_type_str = vxt_cpu_type_str, + .clock_init = generic_clock_init, + .keep_early_console = 1, +}; + +static struct cpu_match __CPU_MATCH cpu_vxt = { + .mv = &mv_vxt, + .sid_mask = VAX_SID_FAMILY_MASK, + .sid_match = VAX_SOC << VAX_SID_FAMILY_SHIFT, + + .sidex_addr = SOC_SIDEX_ADDR, + + .sidex_mask = SOC_SIDEX_TYPE_MASK, + .sidex_match = SOC_SIDEX_TYPE_VXT << SOC_SIDEX_TYPE_SHIFT +}; + +static struct platform_device vxt_diag_led_device = { + .name = "diag_led" +}; + +static int __init vxt_platform_device_init (void) +{ + if (!is_vxt()) + return -ENODEV; + + platform_device_register (&vxt_diag_led_device); + + return 0; +} + +arch_initcall (vxt_platform_device_init); + diff -Nru a/arch/vax/kernel/diag_led.c b/arch/vax/kernel/diag_led.c --- a/arch/vax/kernel/diag_led.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/diag_led.c 2005-10-03 14:16:47 @@ -0,0 +1,235 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * This driver is licensed under the terms of the GNU General Public + * License Version 2 (GPLv2) or any later version. + * + * (C) 2004 by Jan-Benedict Glaw + */ + +#define DIAG_LED_DEBUG + + +MODULE_AUTHOR ("Jan-Benedict Glaw "); +MODULE_LICENSE ("GPL"); +MODULE_DESCRIPTION ("Hackish driver for VAXens diagnostic LEDs"); + +static volatile uint8_t __iomem *diag; +static uint8_t state; +static int inverted; + + +/* + * This function tries to find a base address. If you get a message + * that your system isn't yet supported, add the correct address + * right here. + */ +static unsigned long +diag_led_get_base (void) +{ + inverted = 0; + + if (is_ka46 ()) { + inverted = 1; + return DIAG_LED_KA46_BASE; + } else if (is_ka42 ()) { + inverted = 1; + return DIAG_LED_KA42_BASE; + } else if (is_ka48 ()) { + inverted = 1; + return DIAG_LED_KA48_BASE; + } else if (is_ka49 ()) { + inverted = 1; + return DIAG_LED_KA49_BASE; + } else if (is_ka52 ()) { + inverted = 1; + return DIAG_LED_KA52_BASE; + } else if (is_vxt ()) { + inverted = 1; + return DIAG_LED_VXT_BASE; +#if 0 + } else if (is_ka670 ()) { + inverted = 1; + return DIAG_LED_KA670_BASE; +#endif + } else if (is_ka43 ()) { + inverted = 1; + return DIAG_LED_KA43_BASE; + } else { + printk (KERN_ERR "diag_led: No base address known for your machine yet!\n"); + return 0; + } +} + +/* + * A binary "1" for a lit LED, a binary "0" for an off LED + */ +int +diag_led_set_state (uint8_t new_state) +{ + if (!diag) + return -ENODEV; + + if (inverted) { + *diag = new_state ^ 0xff; + state = new_state; + } else { + *diag = new_state; + state = new_state; + } + + return 0; +} + +uint8_t +diag_led_get_state (void) +{ + if (!diag) { + printk (KERN_ERR "Attention, there's no diag LEDs known on " + "your system!!!\n"); + dump_stack (); + return 0; + } + + return state; +} + +/* + * led_num = 0 --> first LED + * led_num = 1 --> second LED + * led_num = 2 --> third LED + * ... + */ +int +diag_led_on (int led_num) +{ + uint8_t new_state; + + if (led_num < 0 || led_num > 7) { + printk (KERN_ERR "led_num out of range!\n"); + dump_stack (); + return -EINVAL; + } + + new_state = diag_led_get_state () | (1 << led_num); + + return diag_led_set_state (new_state); +} + +/* + * led_num = 0 --> first LED + * led_num = 1 --> second LED + * led_num = 2 --> third LED + * ... + */ +int +diag_led_off (int led_num) +{ + uint8_t new_state; + + if (led_num < 0 || led_num > 7) { + printk (KERN_ERR "led_num out of range!\n"); + dump_stack (); + return -EINVAL; + } + + new_state = diag_led_get_state () & ~(1 << led_num); + + return diag_led_set_state (new_state); +} + +#ifdef DIAG_LED_DEBUG +static void +diag_led_knight_rider (void) +{ + int i, j; + + for (i = 0; i < 10; i++) { + for (j = 0; j < 7; j++) { + diag_led_set_state (1 << j); + mdelay (30); + } + for (j = 7; j > 1; j--) { + diag_led_set_state (1 << j); + mdelay (30); + } + } + + return; +} +#endif /* DIAG_LED_DEBUG */ + +/* + * Find memory base and map that address + */ +int __init +diag_led_probe (struct device *busdev) +{ + unsigned long base_address = diag_led_get_base (); + + if (!base_address) + return -ENODEV; + + diag = ioremap (base_address, 1); + if (!diag) { + /* FIXME: Register with /proc/iomem */ + printk (KERN_ERR "Failed to ioremap (0x%08lx, 2)\n", base_address); + return -ENOMEM; + } + + printk (KERN_INFO "Using diagnostic LEDs at 0x%08lx (virt 0x%p)\n", + base_address, diag); +#ifdef DIAG_LED_DEBUG + diag_led_knight_rider (); +#endif /* DIAG_LED_DEBUG */ + + diag_led_set_state (0x00); + + return 0; +} + +/* + * unmap the diag LEDs + */ +void __exit +diag_led_exit (void) +{ + if (diag) { + printk (KERN_INFO "Shutting down diag LEDs at virt 0x%p\n", + diag); + iounmap ((void *) diag); + } + + return; +} + +static struct device_driver diag_led_driver = { + .name = "diag_led", + .bus = &platform_bus_type, + .probe = diag_led_probe, +}; + +static int __init +diag_led_init (void) +{ + return driver_register (&diag_led_driver); +} + + +EXPORT_SYMBOL (diag_led_set_state); +EXPORT_SYMBOL (diag_led_get_state); +EXPORT_SYMBOL (diag_led_on); +EXPORT_SYMBOL (diag_led_off); + +module_init (diag_led_init); +module_exit (diag_led_exit); + diff -Nru a/arch/vax/kernel/early_printk.c b/arch/vax/kernel/early_printk.c --- a/arch/vax/kernel/early_printk.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/early_printk.c 2005-04-26 00:25:05 @@ -0,0 +1,61 @@ +/* + * Rudimentary console driver for early printk output. + * This depends on the CPU's machine vector having a working + * post_vm_putchar(). + * If a CPU can support early printk, it should call one + * of the init_early_printk_XXX() functions (at the bottom + * of this file) from the mv->post_vm_init() function + */ + +#include +#include +#include + +static int early_console_enabled; + +static void early_console_write(struct console *cons, const char *p, + unsigned int len) +{ + while (len--) { + if (*p == '\n') + mv->post_vm_putchar('\r'); + + mv->post_vm_putchar(*p++); + } +} + +struct console early_console = { + .name = "VAXcons", + .write = early_console_write, + .flags = CON_PRINTBUFFER, +}; + +void __init enable_early_printk(void) +{ + if (!mv->post_vm_putchar) + /* Cannot support early printk */ + return; + + early_console_enabled = 1; + register_console(&early_console); + + printk("Early console enabled\n"); +} + +void __init disable_early_printk(void) +{ + if (early_console_enabled) { + if (mv->keep_early_console) + printk (KERN_WARNING "Not disabling early console " + "because it's still needed!\n"); + else { + printk (KERN_INFO "Disabling early console. If this " + "is the last text you see, try to " + "append \"console=ttyS0\" to the " + "kernel command line\n"); + unregister_console(&early_console); + early_console_enabled = 0; + } + } +} + diff -Nru a/arch/vax/kernel/entry.S b/arch/vax/kernel/entry.S --- a/arch/vax/kernel/entry.S 1970-01-01 01:00:00 +++ b/arch/vax/kernel/entry.S 2005-10-31 14:27:18 @@ -0,0 +1,286 @@ +/* + * entry.S for the VAX architecture + * Copyright Dec 1998 atp. + * Copyright 2000, Kenn Humborg + * 2001 atp. Additions for Machine check handling. + * Copyright 2004, Jan-Benedict Glaw + */ + +#include +#include +#include + +/* + * irqvec_handler is the generic handler for all interrupts and + * exceptions for which a driver (or other code) has registered + * a handler. Except machine checks. We are responsible for + * + * o saving all registers + * + * o determining the address of the irqvector that called + * us (actually we get the address of a field in the middle + * of the irqvector structure) + * + * o passing this and other useful into to do_irq_excep() + * in vax/kernel/interrupt.c + * + * o cleaning up the stack and dismissing the interrupt or + * exception + * + * See Documentation/vax/interrupts.txt for the gory details + */ +//.globl irqvec_handler +//irqvec_handler: +ENTRY(irqvec_handler) + /* + * At this point stack looks like: + * + * SP: handler_PC (inside the irqvector) + * (maybe) exception info + * saved PC + * saved PSL + * + * Here is where it starts to get a bit twisted (I _love_ + * the VAX instruction set!). + * + * We need to duplicate the saved PC and PSL to form then + * end of the pt_regs struct. However, there may be some + * exception info between SP and the saved PC/PSL. The number + * of longwords of this exception info is available in the + * irqvector structure (at our "return address") + */ + + pushl %r0 /* Push R0 to have a free working register */ + + /* + * Get number of exception info longwords into R0. + * Remember that this value is stored immediately after the + * JSB instruction in the irqvector, so the "return address" + * points to it + */ + movl *4(%sp), %r0 + addl2 $3, %r0 /* R0 now contains number of longwords between + * top of stack and saved PSL */ + + pushl (%sp)[%r0] /* Push saved PSL again */ + pushl (%sp)[%r0] /* Push saved PC again */ + extzv $22, $2, 4(%sp), -(%sp) /* Extract PREVMODE field from saved + * PSL and save on stack (because we + * don't have any spare registers yet) */ + cmpzv $24, $2, 8(%sp), (%sp) /* Is PREVMODE == CURMODE? */ + beql same_mode + mfpr (%sp),(%sp) /* HACK ALERT! The processor modes are 0..3 + * for kernel..user mode. The stack pointer + * internal processor registers are also 0..3 + * for kernel..user mode. So, using the + * PREVMODE value we just saved on the stack, + * we overwrite it with the correct stack + * pointer register. Net result: the saved_sp + * here is the correct stack pointer for the + * processor mode before the exception occurred */ + brb sp_saved +same_mode: + moval 16(%sp)[%r0],(%sp) /* Exception is not changing modes. Therefore + * we calculate how far up the stack the SP + * was pointing when the exception occurred. */ +sp_saved: + pushr $0x3ffe /* Push FP to R1 */ + pushl 64(%sp) /* Duplicate saved R0 */ + + /* + * The stack now looks like: + * + * SP: saved R0 + * SP+4 saved R1 + * ... + * SP+56 saved SP for previous mode + * SP+60 saved PC + * SP+64 saved PSL + * SP+68 saved R0 + * SP+72 handler_PC (inside the irqvector) + * SP+76 (maybe) exception info + * saved PC + * saved PSL + * + * Now build the argument list for do_irq_excep(). We need + * to pass the saved PC within the irqvector, the address of the + * pt_regs and the address of the exception info. Be careful + * when you modify this code, we're counting stack locations + * to get the right offsets here... + */ + pushal 76(%sp) /* address of exception info */ + pushl 76(%sp) /* handler_PC */ + pushal 8(%sp) /* start of pt_regs */ + + calls $3, do_irq_excep + +.globl ret_from_syscall +ret_from_syscall: + /* + * Now we need to restore all registers, clear the stack down to + * the original saved PC/PSL and dismiss the interrupt. The + * stack currently looks like the same as above. + * + * Copy the duplicate saved R0 (part of the struct pt_regs) down to + * the other saved R0 that we'll be restoring from soon. This allows + * exception handlers to modify R0 in the thread of execution that + * triggered the exception. Note that the offset from SP is 64, not + * 68 as you'd expect from the stack layout shown above. This is + * because the SP is incremented by 4 while evaluating the first + * operand. + */ + movl (%sp)+, 64(%sp) + popr $0x3ffe /* Restore registers R1 up to FP */ + + /* + * The stack now looks like: + * + * SP: saved SP for previous mode + * +4 saved PC (maybe modified by exception handler) + * +8 saved PSL (end of struct pt_regs) + * +12 saved R0 (maybe modified by exception handler) + * +16 handler_PC (inside the irqvector) + * +20 (maybe) exception info + * ... + * saved PC (original saved by CPU) + * saved PSL + * + * Copy duplicate saved PC (part of the struct pt_regs) down over + * the original saved PC saved by the CPU. This allows the exception + * handlers to change the PC of the code that triggered the exception. + * This is tricky because of the exception info that may be present + * on the stack. First get the size of the exception info into R0 + */ + movl *16(%sp), %r0 + + /* + * Now move the saved PC down over the original (the 20-byte offset + * takes care of skipping over the top 5 items on the stack, and then + * the R0 index skips over the exception info. + */ + movl 4(%sp), 20(%sp)[%r0] + + /* + * D.A. May 2001 - we need to copy the PSL down, to + * get to usermode originally as we make up a new PSL + * in start_thread and we need the CPU to believe it + */ + movl 8(%sp), 24(%sp)[%r0] + + /* + * The stack now looks like: + * + * SP: saved SP for previous mode + * +4 saved PC (maybe modified by exception handler) + * +8 saved PSL (end of struct pt_regs) + * +12 saved R0 (maybe modified by exception handler) + * +16 handler_PC (inside the irqvector) + * +20 (maybe) exception info + * ... + * saved PC (maybe modified by exception handler) + * saved PSL + * + * Now move the saved R0 (r0+1) longwords down the stack, + * leaving it just before the saved PC, overwriting either the + * saved handler_PC or the end of the exception info ... + */ + movl 12(%sp), 16(%sp)[%r0] + /* ... and clear stack down to this point */ + moval 16(%sp)[%r0], %sp + + /* + * Stack now looks like + * + * SP: saved R0 + * +4 saved PC + * +8 saved PSL + */ + + /* Restore R0 and dismiss exception */ + movl (%sp)+, %r0 + mtpr $31, $PR_IPL + rei + + +ENTRY(ret_from_fork) + /* + * A newly-created thread starts here when it is first + * scheduled. R0 will contain the previous task (the one + * that we just scheduled away from on this CPU). + */ + pushl %r0 + calls $1, schedule_tail + brb ret_from_syscall + + +/* + * irqvec_stray is the generic handler for all exceptions and interrupts + * for which there is no registered handler. We just save all registers, + * and call unhandled_exception(), passing it the return address saved + * by the JSB instruction that got us here. This JSB instruction should + * be in the struct stray_handler[] array in interrupt.c + */ +ENTRY(irqvec_stray) + /* + * At this point stack looks like: + * + * SP: handler_PC (inside the stray_handler struct) + * (maybe) exception info + * saved PC + * saved PSL + */ + pushr $0x3fff /* Save FP to R0 */ + pushl 56(%sp) /* copy return address (handler_PC) */ + calls $1, unhandled_exception + + /* + * If unhandled_exception() returns, then we must be + * autoprobing interrupt vectors, and this exception + * must have been an interrupt. Therefore, there is + * no exception info on the stack, so it's safe to + * continue + */ + popr $0x3fff /* restore R0 to FP */ + moval 4(%sp), %sp /* Remove handler_PC */ + rei + + +/* + * mcheck_handler is the handler for machine check exceptions. + * This is here, because its easier to special case it, and deal with + * the machine dependent number of longwords than warp the generic + * registration methods to deal with it. + */ +.align 2 /* ensure longword alignment */ +.globl machine_check_handler +machine_check_handler: + /* + * Note this doesn't use the usual exception registration, as we don't + * know in advance how many longwords of exception info have been pushed. + * + * The interrupt stack at this point looks like this: + * + * SP: handler_PC (inside the irqvector) + * longword count of exception info + * exception longwords + * : + * PC + * PSL + */ + addl2 $4, %sp /* Discard the pointer into the irqvector */ + tstl probe_resume_addr /* Are we executing an iospace_probeX() call? */ + bneq probing /* Branch if yes */ + + pushr $0x3f /* push all registers in case we can restart */ + pushab 24(%sp) /* address of stack slot which holds byte count */ + calls $1, machine_check /* in reboot.c */ + popr $0x3f /* spring them off */ + addl2 (%sp)+,%sp /* get rid of the machine check frame */ + rei /* dismiss */ +probing: + addl2 (%sp)+,%sp /* get rid of the machine check frame */ + mtpr $0x0f, $PR_MCESR /* clear the machine check error summary register, + * (taken from NetBSD) */ + movl probe_resume_addr, (%sp) + rei /* Return to the handler address */ + diff -Nru a/arch/vax/kernel/init_task.c b/arch/vax/kernel/init_task.c --- a/arch/vax/kernel/init_task.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/init_task.c 2005-07-31 16:31:26 @@ -0,0 +1,41 @@ +#include +#include +#include +#include +#include + +#include +#include +#include /* INIT_MMAP */ + + +/* This is copied from i386 for now. I don't know what we'll + need to change yet. KPH 2000-04-25 */ + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); + +/* + * Initial thread structure. + * + * We need to make sure that this is 8192-byte aligned due to the + * way process stacks are handled. This is done by having a special + * "init_task" linker map entry.. + * + * Note that once we drop from IPL 31 to IPL 0 during init, we'll + * be using the stack inside this union as the kernel stack. + */ +union thread_union init_thread_union + __attribute__((__section__(".data.init_task"))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); + diff -Nru a/arch/vax/kernel/interrupt.c b/arch/vax/kernel/interrupt.c --- a/arch/vax/kernel/interrupt.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/interrupt.c 2005-04-25 16:35:02 @@ -0,0 +1,798 @@ +/* + * This file handles the interrupts and exceptions. + * + * It also contains the interrupt stack. Eventually, there will + * need to be a separate interrupt stack per-cpu, within the + * per-cpu data structures. + * + * FIXME: We should use the new interrupt architecture. It looks like + * a closer match to the VAX SCB. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "interrupt.h" /* Local, private definitions */ + +int do_signal(sigset_t *oldset, struct pt_regs *regs); /* signal.c */ + +unsigned char __attribute__((__aligned__(PAGE_SIZE))) interrupt_stack[NR_CPUS][INT_STACK_SIZE]; + +union scb_and_device_vectors __attribute__((__aligned__(PAGE_SIZE))) scb; + +/* + * Statically-defined pool of irqvector structures. This will go once + * we have a working kmalloc()/kfree(). + * + * Actually, it's not that simple... trap_init() is called before the + * slab caches are initialized so we can't call kmalloc() this early + * in initialization. What we could do is statically allocate a small + * number of irqvectors here (enough for trap_init() and friends) and + * then kmalloc() vectors on demand later. + * + * An entry in the list is free if the dest_addr field is zero, and + * is in use if non-zero. + */ +struct irqvector irqvectors[NR_IRQVECTORS]; + +/* Default handlers for each SCB vector */ +static struct stray_handler stray_handlers[NR_IRQS]; + +/* Non-zero when autoprobing interrupt vectors */ +static int autoprobing; + + +void guard_int_stack(void) +{ + void *stack_base; + unsigned long base_pfn; + pte_t *base_pte_addr; + + /* + * Do we need more than a page for the int stack? + * Yes, if we want a guard page. + */ + if (INT_STACK_SIZE <= PAGE_SIZE) { + printk("Interrupt stack too small, must be > PAGE_SIZE\n"); + machine_halt(); + } + + stack_base = interrupt_stack + smp_processor_id(); + base_pfn = MAP_NR(stack_base); + + base_pte_addr = GET_SPTE_VIRT(stack_base); + + /* + * Set first page of interrupt stack area to kernel read, thus + * trapping any writes to this page. This will catch attempts + * to overflow the interrupt stack before they can do any damage. + */ + set_pte(base_pte_addr, pfn_pte(base_pfn, __pgprot(_PAGE_KR|_PAGE_VALID))); + + __flush_tlb_one(stack_base); +} + +static void setup_scb(void) +{ + int i; + extern void irqvec_stray(void); + + for (i = 0; i < NR_IRQS; i++) { + stray_handlers[i].inst_jsb = 0x16; /* JSB opcode */ + stray_handlers[i].inst_addr_mode = 0x9F; /* absolute */ + stray_handlers[i].dest_addr = irqvec_stray; + stray_handlers[i].flags = 0; + + SCB_VECTOR(i) = &stray_handlers[i].inst_jsb; + } + + flush_icache(); +} + +/* Register the machine check handler. */ +void register_mcheck_handler(void) +{ + extern void machine_check_handler(struct pt_regs *regs, void *unused); + struct irqvector *vector; + unsigned char *inside_vec; + + /* First register things properly so that the irq functions don't get upset */ + if (register_excep_handler(SCB_MCHECK, "Machine Check (SCB_MCHECK: machine_check_handler)", machine_check_handler, 2, 1)) { + printk("Panic: unable to register machine check handler\n"); + machine_halt(); + } + + /* + * Install the specific machine check handler in entry.S + * We override the value set up above, in register_excep_handler, as + * its easier than special casing all the exception info sizing. + */ + inside_vec = (unsigned char *) ((unsigned long)(SCB_VECTOR(SCB_MCHECK)) & ~0x3); + vector = (struct irqvector *)(inside_vec - + offsetof(struct irqvector, inst_jsb)); + vector->dest_addr = machine_check_handler; +} + +void trap_init(void) +{ + struct exception_entry { + unsigned int exception_number; + unsigned char *exception_name; + void (*exception_handler)(struct pt_regs *, void *); + unsigned int exception_info_size; + unsigned int use_interrupt_stack; + } exception[] = { + { SCB_BPT, "Breakpoint fault (SCB_BPT: bpt_handler)", bpt_handler, 0, 0, }, + { SCB_XFC, "Reserved instruction (SCB_XFC: reserved_instr_handler)", reserved_instr_handler, 0, 0, }, + { SCB_CHMK, "CHMK trap (SCB_CHMK: syscall_handler)", syscall_handler, 1, 0, }, + { SCB_ARITH, "Arithmetic fault (SCB_ARITH: arith_handler)", arith_handler, 1, 0, }, + { SCB_RESAM, "Reserved addressing mode (SCB_RESAM: resam_handler)", resam_handler, 0, 0, }, + { SCB_RESOP, "Reserved operand (SCB_RESOP: reserved_operand_handler)", reserved_operand_handler, 0, 0, }, + { SCB_TPEND, "Trace Pending (SCB_TPEND: tpend_handler)", tpend_handler, 0, 0, }, + { SCB_ACCVIO, "Access violation (SCB_ACCVIO: page_fault_handler)", page_fault_handler, 2, 0, }, + /* Perhaps this should be done in CPU-specific code? */ + { SCB_MEMCORR, "Memory corrected read (SCB_MEMCORR: corrected_read_handler)", corrected_read_handler, 0, 0, }, + { SCB_RESINSTR, "Reserved instruction (SCB_RESINSTR: reserved_instr_handler)", reserved_instr_handler, 0, 0, }, + { SCB_TRANS_INVAL, "Translation not valid (SCB_TRANS_INVAL: page_fault_handler)", page_fault_handler, 2, 0, }, + }; + int i; + + /* + * Initialize the SCB with the stray interrupt/exception + * handlers. Some of these will be overridden later + * as device drivers hook up to their interrupts. + */ + setup_scb(); + + /* + * And tell the hardware to use this SCB + */ + __mtpr(__pa(&scb), PR_SCBB); + + /* + * Register the machine check handler. This is a special case due to + * the machine specific exception info which is not fixed sized. + */ + register_mcheck_handler(); + + /* + * Now register all exception handlers + */ + for (i = 0; i < ARRAY_SIZE (exception); i++) { + if (register_excep_handler(exception[i].exception_number, + exception[i].exception_name, + exception[i].exception_handler, + exception[i].exception_info_size, + exception[i].use_interrupt_stack)) { + printk("Panic: unable to register \"%s\" handler\n", + exception[i].exception_name); + machine_halt(); + } + } +} + +void init_IRQ(void) +{ + /* Nothing to do... Already done by trap_init */ +} + +/* + * This is the handler for reserved operand faults and aborts. + * Eventually this will have to check if the fault was from user + * mode or kernel mode and either throw a SIGILL or panic. + */ +void reserved_operand_handler(struct pt_regs *regs, void *unused) +{ + printk("\nReserved operand fault at PC=%08lx\n", regs->pc); + + + printk("\nStack dump\n"); + hex_dump((void *)(regs->sp), 256); + + show_regs(regs); + show_cpu_regs(); + + if (user_mode(regs)) { + force_sig(SIGILL,current); + return; + } + machine_halt(); +} + +/* + * This is the handler for reserved instruction exceptions. + * Eventually this will have to check if the fault was from user + * mode or kernel mode and either throw a SIGILL or panic. + */ +void reserved_instr_handler(struct pt_regs *regs, void *unused) +{ + unsigned short instr = *(unsigned short *)(regs->pc); + + if ((instr == 0xfeff) || (instr == 0xfdff)) { + printk("\nKernel bugcheck at PC=%08lx\n", regs->pc); + } else { + printk("\nReserved instruction at PC=%08lx\n", regs->pc); + } + + printk("\nStack dump\n"); + hex_dump((void *)(regs->sp), 256); + dump_stack(); + show_regs(regs); + show_cpu_regs(); + + if (user_mode(regs)) { + force_sig(SIGILL,current); + return; + } + + machine_halt(); +} + +/* This is the handler for break points */ +void bpt_handler(struct pt_regs *regs, void *unused) +{ + siginfo_t info; +#if 0 + printk("\nbp sending SIGTRAP\n"); + + printk("\nBreakpoint at PC=%08lx at %08lX\n", regs->pc, ®s->pc); + + printk("\nStack dump\n"); + hex_dump((void *)(regs->sp), 256); + show_regs(regs); + show_cpu_regs(); +#endif + if (user_mode(regs)) { + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_addr = (void *) (regs->pc); + force_sig_info(SIGTRAP, &info,current); + return; + } + + machine_halt(); + force_sig(SIGTRAP, current); +} + +/* This is the handler for break points */ +void tpend_handler(struct pt_regs *regs, void *unused) +{ + siginfo_t info; + + regs->psl.t = 0; + +#if 0 + printk("\ntpend sending SIGTRAP\n"); + printk("\nTrace Pending at PC=%08lx at %08lX\n", regs->pc, ®s->pc); + printk("\nStack dump\n"); + hex_dump((void *)(regs->sp), 256); + show_regs(regs); + show_cpu_regs(); +#endif + + if (user_mode(regs)) { + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_addr = (void *) (regs->pc); + force_sig_info(SIGTRAP, &info, current); + return; + } + + machine_halt(); + force_sig(SIGTRAP, current); +} + +/* + * This is the handler for reserved addressing mode exceptions. + * Eventually this will have to check if the fault was from user + * mode or kernel mode and either throw a SIGILL or panic. + */ +void resam_handler(struct pt_regs *regs, void *unused) +{ + unsigned short instr = * (unsigned short *) (regs->pc); + + if ((instr == 0xfeff) || (instr == 0xfdff)) { + printk("\nKernel bugcheck at PC=%08lx\n", regs->pc); + } else { + printk("\nReserved addressing mode fault at PC=%08lx\n", regs->pc); + } + + printk("\nStack dump\n"); + hex_dump((void *)(regs->sp), 256); + dump_stack(); + show_regs(regs); + show_cpu_regs(); + + if (user_mode(regs)) { + force_sig(SIGILL,current); + return; + } + + machine_halt(); +} + +/* This is the handler for corrected memory read errors */ +void corrected_read_handler(struct pt_regs *regs, void *unused) +{ + printk("Corrected memory read error. " + "RAM failing or cache incorrectly initialized?\n"); +} + +/* This is the handler for arithmetic faults */ +static char *arith_faults[] = { + "none", + "Integer Overflow", + "Integer Division by Zero", + "Floating Overflow Trap", + "Floating or Decimal Division by Zero Trap", + "Floating Underflow Trap", + "Decimal Overflow", + "Subscript Range", + "Floating Overflow Fault", + "Floating or Decimal Division by Zero Fault", + "Floating Underflow Fault", +}; + +void arith_handler(struct pt_regs *regs, void *excep_info) +{ + int code = *(unsigned int *)(excep_info); + + printk("Arithmetic Fault at PC=%8lx, %s, (code=%x)\n", regs->pc, + arith_faults[code], code); + /* FIXME: need to code up the info for user handler */ + if (user_mode(regs)) { + force_sig(SIGFPE, current); + return; + } +} + +/* + * This function gets called from irqvec_stray when we get an exception or + * interrupt that doesn't have a handler in the SCB. The argument is the + * saved PC value from the JSB instruction in the stray_handler structure. + * From this value, we can find the address of the struct stray_handler, + * and thus the vector number. + * + * This will also be used to auto-probe interrupt vectors. probe_irq_on() + * will clear the STRAY_EXCEPTION_FIRED flag on each stray handler above 64 + * (adapter and device vectors). Then probe_irq_off() will look for a + * vector with this bit set. + */ +int unhandled_exception(unsigned char *retaddr) +{ + struct stray_handler *handler; + unsigned int vec_num; + + handler = (struct stray_handler *) (retaddr + - offsetof(struct stray_handler, flags)); + + vec_num = handler - stray_handlers; + + if (autoprobing && vec_num >= FIRST_ADAPTER_VECTOR) { + stray_handlers[vec_num].flags |= STRAY_EXCEPTION_FIRED; + return 0; + } + + printk("\nUnhandled interrupt or exception number 0x%04x (SCB offset 0x%04x)\n", + vec_num, vec_num * 4); + + printk("\nStack dump:\n"); + vax_dump_stack(DUMP_STACK_CALLER); + + dump_cur_regs(DUMP_REGS_CALLER); + show_cpu_regs(); + + machine_halt(); +} + + +/* + * This is the equivalent of handle_IRQ_event() on x86. There is no + * need to walk the list of irqactions as in x86 because we don't have + * shared interrupts on the VAX. + */ +static inline void dispatch_irq(struct pt_regs *regs, struct irqvector *vec) +{ + struct irqaction *action; + int vec_num; + + action = &vec->action; + vec_num = vec->vec_num; + + kstat_cpu(smp_processor_id()).irqs[vec_num]++; + action->handler(vec_num, action->dev_id, regs); + + if (action->flags & SA_SAMPLE_RANDOM) + add_interrupt_randomness(vec_num); +} + +/* + * This is called once we know that an interrupt or exception is actually + * an interrupt. + */ +static inline void do_irq(struct pt_regs *regs, struct irqvector *vec) +{ + int flags; + + /* Fake a single-priority-level interrupt system by raising IPL + to 31 for _any_ interrupt. This is such a waste of the VAX's + hardware capabilities... */ + + irq_enter(); + + local_irq_save(flags); + + dispatch_irq(regs, vec); + irq_exit(); + + local_irq_restore(flags); + + if (local_softirq_pending()) + do_softirq(); +} + +static inline void do_exception(struct pt_regs *regs, struct irqvector *vec, void *excep_info) +{ + kstat_cpu(smp_processor_id()).irqs[vec->vec_num]++; + vec->excep_handler(regs, excep_info); +} + +/* + * This is called from irqvec_handler in entry.S. At this point, inside_vec + * points to the excep_info_size field of the relevant struct irqvector. + * Locate the actual struct irqvector and dispatch the interrupt or + * exception. + * + * "Understanding the Linux Kernel" by Bovet & Cesati from O'Reilly + * contains the best explanation I've found for the various exit paths + * from this function. + */ +void do_irq_excep(struct pt_regs *regs, void *inside_vec, void *excep_info) +{ + struct irqvector *vec; + + vec = (struct irqvector *) (inside_vec + - offsetof(struct irqvector, excep_info_size)); + + /* + * If the excep_handler field of the irqvector is NULL, + * then this is an interrupt vector. Dispatch it via the + * irqaction struct. + */ + if (vec->excep_handler != NULL) { +// printk("exception: vec=%p handler %p excep_info=%p(%d)\n",vec,vec->excep_handler,excep_info,*(int *)excep_info); + do_exception(regs, vec, excep_info); + if (vec == scb.scb.chmk) { + goto ret_from_sys_call; + } else { + goto ret_from_exception; + } + } else { + do_irq(regs, vec); + goto ret_from_intr; + } + +ret_from_sys_call: + if (local_softirq_pending()) { + do_softirq(); + goto ret_from_intr; + } + goto ret_with_reschedule; + +ret_from_exception: + if (local_softirq_pending()) + do_softirq(); + +ret_from_intr: + if (__psl.prevmode == 0) { + /* returning to kernel mode */ + goto done; + } + +ret_with_reschedule: +// printk("syscall: pid %d need_resched %d sigpending %d state %d\n",current->pid,current->need_resched,current->sigpending,current->state); + if (need_resched()) { + schedule(); + goto ret_from_sys_call; + } + + /* Check for pending signals */ + if (test_tsk_thread_flag(current, TIF_SIGPENDING)) { + /* FIXME: do we need to check the IPL here (i386 does a sti here) */ + /* FIXME: oldset? */ + do_signal(0, regs); + } + +// printk("syscall: out of c code\n"); +done: + return; +} + +/* + * These two functions, alloc_irqvector() and free_irqvector(), are temporary + * until we have a working kmalloc. We have a statically-allocated array of + * irqvector structures. An entry is free if the dest_addr field is NULL, + * it is in use otherwise. + */ +static struct irqvector *alloc_irqvector(void) +{ + int i; + int flags; + struct irqvector *vec; + + local_irq_save(flags); + + for (i=0, vec=irqvectors; idest_addr == NULL) { + vec->dest_addr = (void *) 0xffffffff; + local_irq_restore(flags); + return vec; + } + } + + local_irq_restore(flags); + return NULL; +} + +static void free_irqvector(struct irqvector *vec) +{ + memset(vec, 0, sizeof(*vec)); +} + +static int scb_vec_free(unsigned int vec_num) +{ + unsigned char *stray_start; + unsigned char *stray_end; + + stray_start = &stray_handlers[0].inst_jsb; + stray_end = &stray_handlers[NR_IRQS].inst_jsb; + + if ((SCB_VECTOR(vec_num) >= stray_start) && + (SCB_VECTOR(vec_num) < stray_end)) { + return 1; + } else { + return 0; + } +} + +static int hook_scb_vector(unsigned int vec_num, struct irqvector *vec, + unsigned int use_interrupt_stack) +{ + unsigned char *new_vector; + int flags; + extern void irqvec_handler(void); + + local_irq_save(flags); + + if (!scb_vec_free(vec_num)) { + local_irq_restore(flags); + printk("hook_scb_vector: SCB vector %04x (%p) already in use\n", + vec_num, SCB_VECTOR(vec_num)); + return -EBUSY; + } + + vec->vec_num = vec_num; + + vec->inst_jsb = 0x16; /* JSB */ + vec->inst_addr_mode = 0x9f; /* absolute addressing mode */ + vec->dest_addr = irqvec_handler; + + vec->orig_scb_vector = SCB_VECTOR(vec_num); + + new_vector = &vec->inst_jsb; + + if (use_interrupt_stack) { + /* + * LSB set in SCB vector tells CPU to service event + * on interrupt stack regardless of current stack. + */ + new_vector++; + } + + SCB_VECTOR(vec_num) = new_vector; + + flush_icache(); + + local_irq_restore(flags); + return 0; +} + +int request_irq(unsigned int irq, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, const char * devname, void *dev_id) +{ + int retval; + struct irqvector *vector; + + if (irq >= NR_IRQS) + return -EINVAL; + if (!handler) + return -EINVAL; + + vector = alloc_irqvector(); + if (!vector) + return -ENOMEM; + + vector->action.handler = handler; + vector->action.flags = irqflags; + vector->action.mask = CPU_MASK_NONE; + vector->action.name = devname; + vector->action.next = NULL; + vector->action.dev_id = dev_id; + + vector->excep_info_size = 0; + vector->excep_handler = NULL; + + retval = hook_scb_vector(irq, vector, 1); + + if (retval) + free_irqvector(vector); + + return retval; +} + +static void unhook_scb_vector(unsigned int vec_num, void *dev_id) +{ + int flags; + struct irqvector *vector; + unsigned char *inside_vec; + + local_irq_save(flags); + + if (scb_vec_free(vec_num)) { + local_irq_restore(flags); + printk("unhook_scb_vector: SCB vector %04x already free\n", vec_num); + return; + } + + inside_vec = SCB_VECTOR(vec_num); + + /* We must mask off the bottom two bits. They have meaning to + to the hardware, and are not part of the actual target address */ + + inside_vec = (unsigned char *) ((unsigned long) (inside_vec) & ~0x3); + + vector = (struct irqvector *) (inside_vec + - offsetof(struct irqvector, inst_jsb)); + + if (dev_id != vector->action.dev_id) { + local_irq_restore(flags); + printk("unhook_scb_vector: dev_id mismatch (expected %p, currently %p)\n", + dev_id, vector->action.dev_id); + return; + } + + SCB_VECTOR(vec_num) = vector->orig_scb_vector; + + local_irq_restore(flags); + + free_irqvector(vector); +} + +void free_irq(unsigned int irq, void *dev_id) +{ + if (irq >= NR_IRQS) + return; + + unhook_scb_vector(irq, dev_id); +} + +unsigned long probe_irq_on(void) +{ + int i; + int flags; + + local_irq_save(flags); + + for (i = FIRST_ADAPTER_VECTOR; i < NR_IRQS; i++) + stray_handlers[i].flags &= ~STRAY_EXCEPTION_FIRED; + + autoprobing = 1; + local_irq_restore(flags); + + return 1; +} + +int probe_irq_off(unsigned long mask) +{ + int i; + int vec_found; + int nr_vecs; + int flags; + + nr_vecs = 0; + vec_found = 0; + + local_irq_save(flags); + + for (i = FIRST_ADAPTER_VECTOR; i < NR_IRQS; i++) { + if (stray_handlers[i].flags & STRAY_EXCEPTION_FIRED) { + vec_found = i; + nr_vecs++; + } + } + autoprobing=0; + local_irq_restore(flags); + + if (nr_vecs > 1) { + vec_found = -vec_found; + } + + return vec_found; +} + +int register_excep_handler(unsigned int vec_num, char *exception_name, + void (*handler)(struct pt_regs *, void *), + unsigned int exception_info_size, unsigned int use_interrupt_stack) +{ + int retval; + struct irqvector *vector; + + if (vec_num >= NR_IRQS) + return -EINVAL; + if (!handler) + return -EINVAL; + + vector = alloc_irqvector(); + + if (!vector) + return -ENOMEM; + + vector->excep_info_size = exception_info_size; + vector->excep_handler = handler; + vector->action.name = exception_name; /* Needed to stop get_irq_list dying */ + /* FIXME: This doesn't set dev_id or other members of the irqaction structure... */ + + retval = hook_scb_vector(vec_num, vector, use_interrupt_stack); + + if (retval) + free_irqvector(vector); + + return retval; +} + +int show_interrupts(struct seq_file *p, void *v) +{ + int i = * (loff_t *) v; + struct irqvector *vector; + unsigned char *inside_vec; + + if (i < NR_IRQS && !scb_vec_free (i)) { + + inside_vec = SCB_VECTOR (i); + /* + * We must mask off the bottom two bits. They have + * meaning to the hardware, and are not part of + * the actual target address + */ + inside_vec = (unsigned char *) ((unsigned long) (inside_vec) & ~0x3); + vector = (struct irqvector *) (inside_vec + - offsetof (struct irqvector, inst_jsb)); + if (vector->action.name == NULL) + seq_printf (p, "%4d: %8d no interrupt vector name\n", vector->vec_num, 0); + else + seq_printf (p, "%4d: %8d %s\n", vector->vec_num, kstat_irqs(i), vector->action.name); + } + + return 0; +} + +/* Empty for now. See arch/i386/kernel/irq.c for what this should do. */ +void init_irq_proc(void) +{ +} + diff -Nru a/arch/vax/kernel/interrupt.h b/arch/vax/kernel/interrupt.h --- a/arch/vax/kernel/interrupt.h 1970-01-01 01:00:00 +++ b/arch/vax/kernel/interrupt.h 2004-06-14 14:20:54 @@ -0,0 +1,74 @@ +#ifndef _VAX_KERNEL_INTERRUPT_H +#define _VAX_KERNEL_INTERRUPT_H + +/* + * This file contains private definitions for the interrupt and + * exception handling code in interrupt.c. I don't expect that + * anyone else should need these definitions. If so, then we'll + * have to move them to include/asm-vax instead. + */ + +#include + +/* This is the max number of exception and interrupt handlers we can + handle. You can increase this as far as NR_IRQS if you need to. */ +#define NR_IRQVECTORS 64 + +/* Initially, we use one of these to catch each vector in the SCB. + When an exception or interrupt handler is registered, a struct + irqvector is used instead */ + +struct stray_handler { + unsigned char inst_jsb; /* JSB instruction, 0x16 */ + unsigned char inst_addr_mode; /* Absolute addr mode, 0x9F */ + void *dest_addr; /* Address of irqvec_stray */ + unsigned short flags; /* Used for probe_irq() */ +} __attribute__ ((__packed__)); + +/* Bits in stray_handler.flags */ +#define STRAY_EXCEPTION_FIRED 1 + +/* The VAX architecture defines interrupt vectors 64 and higher to be + adapter and device vectors and are implementation dependent. Vectors + in this region can be autoprobed. */ +#define FIRST_ADAPTER_VECTOR 64 + + +/* The irqvector structure is the VAX-specific equivalent of the + Linux irqaction structure. In fact, it has so much in common + that it contains an irqaction... + + It is also used to vector exceptions, for which the excep_handler + field is used. + + See Documentation/vax/interrupts.txt for more info on how this + all works. */ + +struct irqvector { + unsigned char inst_jsb; /* JSB instruction, 0x16 - MUST be long-aligned*/ + unsigned char inst_addr_mode; /* Absolute addressing mode, 0x9F */ + void *dest_addr; /* Address of irqvec_handler in entry.S */ + unsigned long excep_info_size; /* This MUST follow dest_addr, irqvec_handler + depends on it. */ + unsigned short vec_num; /* Offset into SCB (in longwords, not bytes) */ + struct irqaction action; /* Linux's normal interrupt vector structure */ + void (*excep_handler)(struct pt_regs *, void *); + unsigned char *orig_scb_vector; /* Original stray handler from SCB, restored when + vector is un-hooked */ +} __attribute__ ((__packed__)); + + +/* And declarations of some standard interrupt handlers */ + +extern void accvio_handler(struct pt_regs *regs, void *excep_info); +extern void page_fault_handler(struct pt_regs *regs, void *excep_info); +extern void reserved_operand_handler(struct pt_regs *regs, void *excep_info); +extern void reserved_instr_handler(struct pt_regs *regs, void *excep_info); +extern void corrected_read_handler(struct pt_regs *regs, void *excep_info); +extern void syscall_handler(struct pt_regs *regs, void *excep_info); +extern void resam_handler(struct pt_regs *regs, void *unused); +extern void arith_handler(struct pt_regs *regs, void *excep_info); +extern void bpt_handler(struct pt_regs *regs, void *excep_info); +extern void tpend_handler(struct pt_regs *regs, void *excep_info); + +#endif /* _VAX_KERNEL_INTERRUPT_H */ diff -Nru a/arch/vax/kernel/ioprobe.c b/arch/vax/kernel/ioprobe.c --- a/arch/vax/kernel/ioprobe.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/ioprobe.c 2004-05-30 11:36:17 @@ -0,0 +1,92 @@ +/* + * Functions for checking if addresses in IO space exist. Used + * to probe for devices. Non-existent addresses trigger a machine + * check, which is dismissed immediately if probe_resume_addr is + * non-zero + * + * Inspired by NetBSD/vax. + */ + +#include +#include + +void *probe_resume_addr; + +int iospace_probeb(void *virt_addr) +{ + int valid = 1; + unsigned int flags; + + local_irq_save(flags); + + __asm__ ( + " movl %2, %%r1 \n" + " movab probeb_resume, %1 \n" + " tstb (%%r1) \n" + " brb probeb_good \n" + "probeb_resume: \n" + " clrl %0 \n" + "probeb_good: \n" + : "=g"(valid), "=g"(probe_resume_addr) + : "g" (virt_addr), "0"(valid) + : "r0", "r1"); + + probe_resume_addr = NULL; + + local_irq_restore(flags); + + return valid; +} + +int iospace_probew(void *virt_addr) +{ + int valid = 1; + unsigned int flags; + + local_irq_save(flags); + + __asm__ ( + " movl %2, %%r1 \n" + " movab probew_resume, %1 \n" + " tstw (%%r1) \n" + " brb probew_good \n" + "probew_resume: \n" + " clrl %0 \n" + "probew_good: \n" + : "=g"(valid), "=g" (probe_resume_addr) + : "g"(virt_addr), "0"(valid) + : "r0", "r1"); + + probe_resume_addr = NULL; + + local_irq_restore(flags); + + return valid; +} + +int iospace_probel(void *virt_addr) +{ + int valid = 1; + unsigned int flags; + + local_irq_save(flags); + + __asm__ ( + " movl %2, %%r1 \n" + " movab probel_resume, %1 \n" + " tstb (%%r1) \n" + " brb probel_good \n" + "probel_resume: \n" + " clrl %0 \n" + "probel_good: \n" + : "=g"(valid), "=g"(probe_resume_addr) + : "g"(virt_addr), "0"(valid) + : "r0", "r1"); + + probe_resume_addr = NULL; + + local_irq_restore(flags); + + return valid; +} + diff -Nru a/arch/vax/kernel/module.c b/arch/vax/kernel/module.c --- a/arch/vax/kernel/module.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/module.c 2005-04-25 15:11:56 @@ -0,0 +1,163 @@ +/* + * Kernel module help for VAX. + * Copyright (C) 2001 Rusty Russell. + * Copyright (C) 2003-2004 Jan-Benedict Glaw + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA + */ + +#include +#include +#include +#include +#include +#include + +#define DEBUG_VAX_MODULE_LOADER +#ifdef DEBUG_VAX_MODULE_LOADER +#define DEBUGP(fmt...) printk(fmt) +#else +#define DEBUGP(fmt...) +#endif + + +/* + * Allocate RAM for a module + */ +void * +module_alloc (unsigned long size) +{ + if (size == 0) + return NULL; + return vmalloc (size); +} + +/* + * Free memory returned from module_alloc + */ +void +module_free (struct module *mod, void *module_region) +{ + vfree (module_region); +} + +/* + * Nothing special + */ +int +module_frob_arch_sections (Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + return 0; +} + +/* + * Do the hard work - relocate + */ +int +apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, + unsigned int relsec, struct module *me) +{ + unsigned int i; + Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + + DEBUGP ("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* + * This is where to make the change + */ + location = (void *) sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + /* + * This is the symbol it is referring to. Note that all + * undefined symbols have been resolved. + */ + sym = (Elf32_Sym *) sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_VAX_32: + DEBUGP (KERN_ERR "R_VAX_32: loc=%p, v=0x%d\n", + (void *) *location, sym->st_value); + *location += sym->st_value; + break; + + case R_VAX_PC32: + case R_VAX_PLT32: + DEBUGP (KERN_ERR "R_VAX_P%s32: loc=%p, v=0x%d\n", + ELF32_R_TYPE(rel[i].r_info) == R_VAX_PC32? + "C": "LT", + (void *) *location, sym->st_value); + *location += sym->st_value - (uint32_t)location; + break; + + case R_VAX_GOT32: + DEBUGP (KERN_ERR "R_VAX_GOT32: loc=%p, v=0x%d\n", + (void *) *location, sym->st_value); + /* FIXME */ + printk (KERN_ERR "R_VAX_GOT32 not yet implemented\n"); + return -ENOEXEC; + break; + + default: + DEBUGP (KERN_ERR "module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + break; + } + } + + return 0; +} + +int +apply_relocate_add (Elf32_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, struct module *me) +{ + printk (KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name); + return -ENOEXEC; +} + +extern void apply_alternatives(void *start, void *end); + +int +module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *me) +{ + printk (KERN_ERR "Omitted apply_alternatives()...\n"); +#if 0 + const Elf_Shdr *s; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + /* look for .altinstructions to patch */ + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + void *seg; + if (strcmp(".altinstructions", secstrings + s->sh_name)) + continue; + seg = (void *)s->sh_addr; + apply_alternatives(seg, seg + s->sh_size); + } +#endif /* 0 */ + return 0; +} + +void +module_arch_cleanup (struct module *mod) +{ +} + diff -Nru a/arch/vax/kernel/process.c b/arch/vax/kernel/process.c --- a/arch/vax/kernel/process.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/process.c 2005-10-03 14:35:54 @@ -0,0 +1,285 @@ +/* + * This file contains the standard functions that the arch-independent + * kernel expects for process handling and scheduling + */ + +#define __KERNEL_SYSCALLS__ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +#undef VAX_PROCESS_DEBUG + +void cpu_idle(void) +{ + /* Endless idle loop with no priority at all */ + + while (1) { + /* Although we are an idle CPU, we do not want to + get into the scheduler unnecessarily. */ + if (need_resched()) { + schedule(); + } + } +} + + +void default_idle(void) +{ + /* nothing */ +} + +struct task_struct *__switch_to(struct task_struct* prev, struct task_struct* next) +{ + unsigned long pcbb; /* physical address of new pcb */ + +#ifdef VAX_PROCESS_DEBUG + printk("vax_switch_to: switching %p, pid %d, state %ld -> %p, pid %d, state %ld, pc %08lx\n", + prev, prev->pid, prev->state, next, next->pid, next->state, next->thread.pcb.pc); +#endif + /* We should check that __pa((prev)->thread.pcb) == PR_PCBB */ + + /* Get phys address of next process pcb */ + pcbb = virt_to_phys(&next->thread.pcb); + + /* + * When 'next' starts running, R0 will hold the task pointer + * for the process we just switched away from. This will end + * up in R0 at ret_from_fork, for new processes and will be + * the return value from this function for existing processes + */ + next->thread.pcb.r0 = (unsigned long) prev; + + /* svpctx should deal with writing the stuff into *prev */ + asm( + " movpsl -(%%sp) \n" + " pushab 1f \n" + " mtpr %2, %3 # Raise IPL to 31 \n" + " svpctx # Causes switch to interrupt stack \n" + " mtpr %0, %1 # Load pcbb into PR_PCCB \n" + " ldpctx # Loads registers and switches back to \n" + " # kernel stack. Also leaves PC/PSL of \n" + " # new process on kernel stack for an \n" + " # immediate REI \n" + " rei \n" + "1: ret # return now before anything munges R0 \n" + : /* no outputs */ + : "r"(pcbb), "g"(PR_PCBB), "g"(31), "g"(PR_IPL)); + + /* Never get to here because of the RET instruction above */ + return NULL; +} + +/* + * This _must_ match the stack layout in effect at ret_from_syscall + * in entry.S. + * + * We do a bit of a hack here. The handler_PC (i.e. the saved PC + * value from the JSB in the irqvector structure) normally points + * to the excep_info_size member of the irqvector. When we build + * the fake stack frame for the new thread, we don't have an + * irqvector available. So what we do is pretend we have one longword + * of exception info, we put the value 1 into this longword and we + * point the handler_PC field at this 'exception info'. + */ + +struct new_thread_stack { + struct pt_regs regs; + unsigned long saved_r0; /* Will be overwritten by regs->r0 */ + unsigned long *excep_info_size; /* Must point to excep_info */ + unsigned long excep_info; /* Must contain the value 1 */ + unsigned long saved_pc; /* Will be overwritten by regs->pc */ + struct psl_fields saved_psl; +}; + +/* Defined in entry.S */ +extern void ret_from_fork(void); + +int copy_thread(int unused1, unsigned long clone_flags, unsigned long usp, + unsigned long unused2, + struct task_struct *p, struct pt_regs *regs) +{ + struct new_thread_stack *child_stack; + struct pt_regs *child_regs; + void *stack_top; + + stack_top = (unsigned char *)(p->thread_info) + THREAD_SIZE; + stack_top -= 4; + + child_stack = (struct new_thread_stack *)(stack_top) - 1; + +#ifdef VAX_PROCESS_DEBUG + printk("copy_thread: pid %d, task 0x%08lx, kstack_top %p, " + "usp 0x%08lx, ksp %p\n", p->pid, (unsigned long) p, + stack_top, usp, child_stack); +#endif + + child_regs = &child_stack->regs; + + *child_regs = *regs; + child_regs->r0 = 0; /* fork() returns 0 in child */ + + child_stack->excep_info = 1; + child_stack->excep_info_size = &child_stack->excep_info; + child_stack->saved_psl = regs->psl; + + p->thread.pcb.ksp = (unsigned long)child_stack; + p->thread.pcb.usp = usp; + p->thread.pcb.pc = (unsigned long)ret_from_fork; + p->thread.pcb.psl = __psl; + + /* + * New thread must start with IPL 31 to prevent any interrupts + * from occuring between the time it is first scheduled (in __switch_to + * above) and when ret_from_fork calls schedule_tail(). If an + * interrupt comes in during this time, schedule() might get called + * from do_irq_excep() before schedule_tail() has released the + * runqueue lock (in finish_task_switch) + */ + p->thread.pcb.psl.ipl = 31; + + /* + * We could speed this up by loading the register values into + * the PCB and start the new thread just before the REI in + * entry.S, letting the regular context switching load the + * registers from the PCB. However, once signal and bottom-half + * handling go into the ret_from_syscall path, then things might + * change. So I'll stick with this 'obviously correct' method + * for now. KPH 2000-10-30 + */ + + return 0; +} + +void flush_thread(void) +{ + /* + * I don't really know what's supposed to go in here. It + * gets called just after exec(), so I guess we reset any + * VAX-specific thread state here + */ +} + +/* + * Create a kernel thread + */ +pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) +{ + asm( + " movl %2,%%r2 \n" + " movl %3,%%r3 \n" + " clrl -(%%sp) \n" + " movl %0, -(%%sp) \n" + " pushl $0x2 \n" + " movl %%sp, %%ap \n" + " chmk %1 \n" + " tstl %%r0 \n" + " beql child \n" + " ret \n" + "child: \n" + " pushl %%r3 \n" + " calls $1, *%%r2 \n" + " pushl %%r0 \n" + " movl %%sp, %%ap \n" + " chmk %4 \n" + : /* no outputs */ + : "g"(flags | CLONE_VM), "g"(__NR_clone), "g"(fn), "g"(arg), "g"(__NR_exit) + : "r0", "r2", "r3"); + + /* + * We never actually get here - there is a RET embedded above which + * returns in the parent, and the child exits with the CHMK __NR_exit + */ + return 0; +} + +int sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs *regs) +{ + int retval; + + if (!newsp) + newsp = regs->sp; + +#ifdef VAX_PROCESS_DEBUG + printk("sys_clone: calling do_fork(0x%08lx, 0x%08lx, 0x%p)\n", + clone_flags, newsp, regs); +#endif + retval = do_fork(clone_flags, newsp, regs, 0, NULL, NULL); + +#ifdef VAX_PROCESS_DEBUG + printk("sys_clone: do_fork() returned pid %d\n", retval); +#endif + return retval; +} + +int sys_fork(struct pt_regs *regs) +{ + return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); +} + +int sys_vfork(struct pt_regs *regs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); +} + +/* + * sys_execve() executes a new program. + * + */ +int sys_execve(char *filename, char **argv, char **envp, + struct pt_regs *regs) +{ + int error; + char *tmpname; + + tmpname = getname(filename); + error = PTR_ERR(tmpname); + if (IS_ERR(tmpname)) + goto out; + + error = do_execve(tmpname, argv, envp, regs); + if (error == 0) + current->ptrace &= ~PT_DTRACE; + putname(tmpname); +out: + return error; +} + +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) +{ + /* no FPU support .. YET - D.A. 25 Feb 2001 */ + return 0; +} + +void start_thread(struct pt_regs *regs, unsigned long new_pc, + unsigned long new_sp) +{ +#ifdef VAX_PROCESS_DEBUG + printk("PID %d: starting thread pc=0x%08lx new_sp=0x%08lx regs->sp=" + "0x%08lx\n", current->pid, new_pc, new_sp, regs->sp); +#endif + set_fs(USER_DS); + regs->pc = new_pc + 2; + regs->sp = new_sp; + regs->ap = new_sp; + regs->fp = new_sp; + regs->psl.prevmode = PSL_MODE_USER; + regs->psl.accmode = PSL_MODE_USER; + /* write the sp into the user stack pointer register */ + __mtpr(new_sp, PR_USP); +} + diff -Nru a/arch/vax/kernel/ptrace.c b/arch/vax/kernel/ptrace.c --- a/arch/vax/kernel/ptrace.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/ptrace.c 2005-10-31 15:10:21 @@ -0,0 +1,368 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 Ross Biro + * Copyright (C) Linus Torvalds + * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle + * Copyright (C) 1996 David S. Miller + * Copyright (C) 2001 David Airlie, VAX Porting Project + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "interrupt.h" + +#include +#include +#include +#include + +#define VAX_MAX_NUM_VALS_TO_CHECK 5 + +extern struct irqvector irqvectors[NR_IRQVECTORS]; +/* + * search out does damn regs.. that variable length exception info is evil I + * tell you evil. + */ +static struct pt_regs *ptrace_find_vax_regs(struct task_struct *child) +{ + struct pt_regs *regs_ptr; + unsigned long *chk_excep_addr; + void *stack_top, *excep_addr; + int num_execp_params; + int i; + + // printk("child sp is %8lX\n", child->thread.pcb.ksp); + stack_top = child->thread_info + 1; + stack_top -= 4; /* jump down over PC and PSL which are always there */ + + /* hack attack - apologies to anyone who has just eaten + * this is the worst code I've written since the code this code + * is replacing ... - Dave. + */ + + /* start after the PC/PSL, and look for an exception vector + if we move to malloced vectors this is screwed */ + chk_excep_addr = (unsigned long *) *(unsigned long *) stack_top; + for (i = 0; i (unsigned long *) irqvectors + && chk_excep_addr < (unsigned long *) (irqvectors + NR_IRQVECTORS)) + break; + } + + if (i == VAX_MAX_NUM_VALS_TO_CHECK) { + printk("Cannot find exception handler address\n"); + return NULL; + } + + num_execp_params = *chk_excep_addr; + + regs_ptr = excep_addr - 4 - sizeof(struct pt_regs); + + return regs_ptr; +} + +static int putreg(struct task_struct *child, unsigned long regno, + unsigned long value) +{ + struct pt_regs *regs_ptr; + + regs_ptr = ptrace_find_vax_regs(child); + if (!regs_ptr) + return 0; + + if ((regno >> 2) == PT_SP) { + child->thread.pcb.usp = value; + return 0; + } + + switch (regno >> 2) { + case 0 ... 16: + // retval = *(((unsigned long *)regs_ptr) + (regno>>2)); + *(((unsigned long *) regs_ptr) + (regno >> 2)) = value; + // *(unsigned long *)((&child->thread.pcb)+4+(regno>>2))=value; + break; + default: + printk("putreg for %lu failed\n", regno); + break; + } + + return 0; +} + +static unsigned long getreg(struct task_struct *child, unsigned long regno) +{ + unsigned long retval = ~0UL; + struct pt_regs *regs_ptr; + + /* Call helper function to get registers for the VAX */ + regs_ptr = ptrace_find_vax_regs(child); + if (!regs_ptr) + return 0; + + if ((regno >> 2) == PT_SP) { + retval = child->thread.pcb.usp; + return retval; + } + + switch (regno >> 2) { + case 0 ... 16: + retval = *(((unsigned long *) regs_ptr) + (regno >> 2)); + break; + default: + printk("getreg for %lu failed\n", regno); + retval = 0; + break; + } +// printk("getreg for %ld returned %8lX\n", regno>>2, retval); + return retval; +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + /* make sure the single step bit is not set. */ + /* FIXME: */ +} + +asmlinkage long sys_ptrace(long request, long pid, long addr, long data) +{ + struct task_struct *child; + int res; + extern void save_fp(void*); + + lock_kernel(); +#if 0 + printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n", + (int) request, (int) pid, (unsigned long) addr, + (unsigned long) data); +#endif + if (request == PTRACE_TRACEME) { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) { + res = -EPERM; + goto out; + res = security_ptrace(current->parent, current); + if (res) + goto out; + } + /* set the ptrace bit in the process flags. */ + current->ptrace |= PT_PTRACED; + res = 0; + goto out; + } + res = -ESRCH; + read_lock(&tasklist_lock); + child = find_task_by_pid(pid); + if (child) + get_task_struct(child); + read_unlock(&tasklist_lock); + if (!child) + goto out; + + res = -EPERM; + if (pid == 1) /* you may not mess with init */ + goto out; + + if (request == PTRACE_ATTACH) { + if (child == current) + goto out_tsk; + if ((!child->mm->dumpable || + (current->uid != child->euid) || + (current->uid != child->suid) || + (current->uid != child->uid) || + (current->gid != child->egid) || + (current->gid != child->sgid) || + (current->gid != child->gid) || + (!cap_issubset(child->cap_permitted, + current->cap_permitted)) || + (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE)) + goto out_tsk; + /* the same process cannot be attached many times */ + if (child->ptrace & PT_PTRACED) + goto out_tsk; + child->ptrace |= PT_PTRACED; + + write_lock_irq(&tasklist_lock); + if (child->parent != current) { + REMOVE_LINKS(child); + child->parent = current; + SET_LINKS(child); + } + write_unlock_irq(&tasklist_lock); + + send_sig(SIGSTOP, child, 1); + res = 0; + goto out_tsk; + } + res = -ESRCH; + if (!(child->ptrace & PT_PTRACED)) + goto out_tsk; + if (child->state != TASK_STOPPED) { + if (request != PTRACE_KILL) + goto out_tsk; + } + if (child->parent != current) + goto out_tsk; + switch (request) { + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: { + unsigned long tmp; + int copied; + + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + res = -EIO; + if (copied != sizeof(tmp)) + break; + res = put_user(tmp,(unsigned long *) data); + + goto out; + } + + /* Read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: { + unsigned long tmp; + + res = -EIO; + if ((addr & 3) || addr < 0 || addr > sizeof(struct user) - 3) + break; + + tmp=0; + if (addr < 16 * sizeof(unsigned long)) + tmp = getreg(child, addr); + + res = put_user(tmp, (unsigned long *) data); + goto out; + } + + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + res = 0; + if (access_process_vm(child, addr, &data, sizeof(data), 1) + == sizeof(data)) + break; + res = -EIO; + goto out; + + case PTRACE_POKEUSR: { + // struct pt_regs *regs; + int res = 0; + res = -EIO; + if ((addr & 3) || addr < 0 || addr > sizeof(struct user) - 3) + break; + + if (addr < 17 * sizeof(long)) { + res = putreg(child, addr, data); + break; + } + /* We need to be very careful here. We implicitly + want to modify a portion of the task_struct, and we + have to be selective about what portions we allow someone + to modify. */ + break; + } + + case PTRACE_SYSCALL: /* Continue and stop at next (return from) syscall */ + case PTRACE_CONT: { /* Restart after signal. */ + res = -EIO; + if ((unsigned long) data > _NSIG) + break; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + + child->exit_code = data; + wake_up_process(child); + res = 0; + break; + } + + /* + * Make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + res = 0; + if (child->exit_state == EXIT_ZOMBIE) /* already dead */ + break; + child->exit_code = SIGKILL; + wake_up_process(child); + break; + + case PTRACE_SINGLESTEP: { + unsigned long tmp; + struct psl_fields *psl; + + res = -EIO; + if ((unsigned long) data > _NSIG) + break; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + if ((child->ptrace & PT_DTRACE)==0) + child->ptrace |= PT_DTRACE; + + tmp = getreg(child, PT_PSL<<2); + psl = (struct psl_fields *)&tmp; + psl->t = 1; + putreg(child, PT_PSL << 2, *(unsigned long *) psl); + // printk("tmp is %8lX, psl is now %8lX\n", tmp, *(unsigned long *)psl); + + child->exit_code=data; + wake_up_process(child); + res = 0; + break; + } + + case PTRACE_DETACH: /* detach a process that was attached. */ + res = ptrace_detach(child, data); + break; + + default: + res = -EIO; + goto out; + } +out_tsk: + put_task_struct(child); +out: + unlock_kernel(); + + return res; +} + +asmlinkage void syscall_trace(void) +{ + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + if (!(current->ptrace & PT_PTRACED)) + return; + + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0)); + + /* + * This isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} + diff -Nru a/arch/vax/kernel/reboot.c b/arch/vax/kernel/reboot.c --- a/arch/vax/kernel/reboot.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/reboot.c 2005-04-26 00:25:05 @@ -0,0 +1,77 @@ +/* + * This file contains the standard functions that the arch-independent + * kernel expects for halting, rebooting and powering off the machine. + * It also contains the machine check dispatcher. + * + * The real work will be done by cpu-specific code via the machine + * vector. Eventually... + */ + +#include +#include + +#include +#include + +extern void show_cpu_regs(void); + +void machine_halt(void) +{ + if (!mv->halt) { + printk("machine_halt: cpu-specific halt not implemented" + " - HALTing\n"); + HALT; + while (1) + /* wait */; + } + + mv->halt(); + while (1) + /* wait */; +} + +void machine_restart(char *cmd) +{ + if (!mv->reboot) { + printk("machine_restart: cpu-specific reboot not implemented" + " - HALTing\n"); + HALT; + } + + mv->reboot(); + while (1) + /* wait */; +} + +void machine_power_off(void) +{ + if (!mv->halt) { + printk("machine_power_off: cpu-specific halt not implemented" + " - HALTing\n"); + HALT; + while (1) + /* wait */; + } + + mv->halt(); + while (1) + /* wait */; +} + +/* + * This is called directly, from entry.S + * It checks for a cpu specific machine check handler and hands over to it. + * Otherwise it will just halt, as there is no way to recover without a + * sensible cpu specific routine + */ +void machine_check(void *stkframe) +{ + if (!mv->mcheck) { + printk("Machine Check - CPU specific handler not implemented - halting\n"); + show_cpu_regs(); + machine_halt(); + } + + mv->mcheck(stkframe); +} + diff -Nru a/arch/vax/kernel/regdump.c b/arch/vax/kernel/regdump.c --- a/arch/vax/kernel/regdump.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/regdump.c 2005-04-26 00:25:05 @@ -0,0 +1,278 @@ +/* + * This file contains functions for dumping register and stack + * contents. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +void show_regs(struct pt_regs *regs) +{ + struct psl_fields *psl; + unsigned int raw_psl; + static char *modes = "KESU"; + + printk("\n r0 %08lx r1 %08lx r2 %08lx r3 %08lx\n", + regs->r0, regs->r1, regs->r2, regs->r3); + + printk(" r4 %08lx r5 %08lx r6 %08lx r7 %08lx\n", + regs->r4, regs->r5, regs->r6, regs->r7); + + printk(" r8 %08lx r9 %08lx r10 %08lx r11 %08lx\n", + regs->r8, regs->r9, regs->r10, regs->r11); + + printk(" ap %08lx fp %08lx sp %08lx pc %08lx\n", + regs->ap, regs->fp, regs->sp, regs->pc); + + raw_psl = RAW_PSL(regs->psl); + psl = ®s->psl; + + printk(" psl %08x ipl %d mode %c (prev %c) %s%s%s%s%s%s%s%s%s%s%s%s\n", + raw_psl, psl->ipl, + modes[psl->accmode], modes[psl->prevmode], + psl->cm ? "CM " : "", + psl->tp ? "TP " : "", + psl->fpd ? "FPD " : "", + psl->is ? "IS " : "", + psl->dv ? "DV " : "", + psl->fu ? "FU " : "", + psl->iv ? "IV " : "", + psl->t ? "T " : "", + psl->n ? "N " : "", + psl->z ? "Z " : "", + psl->v ? "V " : "", + psl->c ? "C " : ""); + + if (raw_psl & PSL_MBZ_MASK) { + printk(" *** PSL MBZ fields not zero: %08x ***\n", + raw_psl & PSL_MBZ_MASK); + } +} + +void show_cpu_regs(void) +{ + unsigned int p0br = __mfpr(PR_P0BR); + unsigned int p0lr = __mfpr(PR_P0LR); + unsigned int p1br = __mfpr(PR_P1BR); + unsigned int p1lr = __mfpr(PR_P1LR); + + unsigned int sbr = __mfpr(PR_SBR); + unsigned int slr = __mfpr(PR_SLR); + unsigned int pcbb = __mfpr(PR_PCBB); + unsigned int scbb = __mfpr(PR_SCBB); + + unsigned int astlvl = __mfpr(PR_ASTLVL); + unsigned int sisr = __mfpr(PR_SISR); + unsigned int mapen = __mfpr(PR_MAPEN); + unsigned int sid = __mfpr(PR_SID); + + unsigned int isp = __mfpr(PR_ISP); + unsigned int ksp = __mfpr(PR_KSP); + unsigned int esp = __mfpr(PR_ESP); + unsigned int ssp = __mfpr(PR_SSP); + unsigned int usp = __mfpr(PR_USP); + + printk("\n p0br %08x sbr %08x astlvl %08x\n", + p0br, sbr, astlvl); + + printk(" p0lr %08x slr %08x sisr %08x\n", + p0lr, slr, sisr); + + printk(" p1br %08x pcbb %08x mapen %08x\n", + p1br, pcbb, mapen); + + printk(" p1lr %08x scbb %08x sid %08x\n\n", + p1lr, scbb, sid); + + printk(" isp %08x ksp %08x esp %08x ssp %08x usp %08x\n", + isp, ksp, esp, ssp, usp); +} + +void hex_dump(void *addr, unsigned int bytes) +{ + unsigned int *p = addr; + unsigned int i; + unsigned int x; + + for (i=0; i < bytes / 4; i++) { + if (i % 4 == 0) { + printk(" %08lx ", (unsigned long)(p+i)); + } + if ((unsigned int)(p+i)] ", addr); + print_symbol("%s\n", addr); + } + } + + printk("\n"); + + return; +} + +void dump_stack(void) +{ + unsigned long stack; + + vax_dump_stack (0); + show_stack (current, &stack); +} + +void vax_dump_stack(unsigned int frames) +{ + unsigned int reg_count; + unsigned int save_mask; + + unsigned int *target_sp; + + struct vax_call_frame *fp; + struct vax_call_frame *target_fp; + struct vax_arglist *ap; + struct vax_arglist *target_ap; + + /* Why doesn't asm("fp") on the declaration work + as advertised? */ + asm("movl %%fp, %0" : "=g"(fp) : ); + asm("movl %%ap, %0" : "=g"(ap) : ); + + /* First frame we look at is our own */ + target_fp = fp; + target_ap = ap; + + while (frames--) { + /* Get the saved AP from the current frame */ + target_ap = target_fp->saved_ap; + + /* Then move up to the next frame */ + target_fp = target_fp->saved_fp; + } + + /* We need to know how many registers were saved + in this call frame */ + save_mask = target_fp->save_mask; + + reg_count = 0; + while (save_mask) { + if (save_mask&1) { + reg_count++; + } + save_mask >>= 1; + } + + /* Skip back over the saved registers */ + target_sp = target_fp->saved_reg + reg_count; + + if (target_fp->calls) { + /* Skip over argument list on stack */ + target_sp += (target_ap->argc + 1); + } + + hex_dump(target_sp, 256); +} + +void dump_cur_regs(unsigned int frames) +{ + struct vax_call_frame *fp = NULL; + struct pt_regs regs; + unsigned int num_saved; + unsigned int reg; + unsigned int raw_psl; + + /* Grab the current registers */ + asm ("movq %%r0, %0" : "=g"(regs.r0) : ); + asm ("movq %%r2, %0" : "=g"(regs.r2) : ); + asm ("movq %%r4, %0" : "=g"(regs.r4) : ); + asm ("movq %%r6, %0" : "=g"(regs.r6) : ); + asm ("movq %%r8, %0" : "=g"(regs.r8) : ); + asm ("movq %%r10, %0" : "=g"(regs.r10) : ); + asm ("movq %%ap, %0" : "=g"(regs.ap) : ); + asm ("movq %%sp, %0" : "=g"(regs.sp) : ); + asm ("movpsl %0" : "=g"(regs.psl) : ); + + asm("movl %%fp, %0" : "=g"(fp) : ); + + /* We always pull saved registers from our + own stack frame */ + frames++; + while (frames--) { + + /* Get the saved PSW bits and mergs them into + the PSL */ + raw_psl = RAW_PSL(regs.psl); + + raw_psl &= ~0x7ff0; + raw_psl |= (fp->psw << 4); + + RAW_PSL(regs.psl) = raw_psl; + + regs.ap = (unsigned int)fp->saved_ap; + regs.fp = (unsigned int)fp->saved_fp; + regs.pc = (unsigned int)fp->saved_pc; + + /* Now we need to restore any general registers that + were saved in this frame */ + num_saved = 0; + for (reg=0; reg<12; reg++) { + if (fp->save_mask & (1<saved_reg[num_saved]; + num_saved++; + } + } + + /* Then move up to the next frame */ + fp = fp->saved_fp; + } + + show_regs(®s); +} + +/* Little convenience function -- temporary debugging aid - atp */ +void vaxpanic(char *reason) +{ + if (reason) + printk(KERN_CRIT "panic: %s\n", reason); + + printk(KERN_CRIT "\nStack dump\n"); + hex_dump((void *)__mfpr(PR_KSP), 256); + show_stack (current, NULL); + dump_cur_regs(1); /* us and parent */ + show_cpu_regs(); + machine_halt(); +} + diff -Nru a/arch/vax/kernel/semaphore.c b/arch/vax/kernel/semaphore.c --- a/arch/vax/kernel/semaphore.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/semaphore.c 2005-04-26 00:25:05 @@ -0,0 +1,165 @@ +/* + * Updated for new rwsem.h 2.4.4, Mar 2002 atp. + * + * VAX version (based on S390 version) + * Copyright (C) 2001, Kenn Humborg + * + * S390 version + * Copyright (C) 1998 IBM Corporation + * Author(s): Martin Schwidefsky + * + * Derived from "linux/arch/i386/kernel/semaphore.c + * Copyright (C) 1999, Linus Torvalds + * + */ +#include + +#include +#include + +/* + * Semaphores are implemented using a two-way counter: + * The "count" variable is decremented for each process + * that tries to acquire the semaphore, while the "sleeping" + * variable is a count of such acquires. + * + * Notably, the inline "up()" and "down()" functions can + * efficiently test if they need to do any extra work (up + * needs to do something only if count was negative before + * the increment operation. + * + * "sleeping" and the contention routine ordering is + * protected by the semaphore spinlock. + * + * Note that these functions are only called when there is + * contention on the lock, and as such all this is the + * "non-critical" part of the whole semaphore business. The + * critical part is the inline stuff in + * where we want to avoid any extra jumps and calls. + */ + +/* + * Logic: + * - only on a boundary condition do we need to care. When we go + * from a negative count to a non-negative, we wake people up. + * - when we go from a non-negative count to a negative do we + * (a) synchronize with the "sleeper" count and (b) make sure + * that we're on the wakeup list before we synchronize so that + * we cannot lose wakeup events. + */ + +void __up(struct semaphore *sem) +{ + wake_up(&sem->wait); +} + +static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED; + +void __down(struct semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + tsk->state = TASK_UNINTERRUPTIBLE; + add_wait_queue_exclusive(&sem->wait, &wait); + + spin_lock_irq(&semaphore_lock); + sem->sleepers++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irq(&semaphore_lock); + + schedule(); + tsk->state = TASK_UNINTERRUPTIBLE; + spin_lock_irq(&semaphore_lock); + } + spin_unlock_irq(&semaphore_lock); + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + wake_up(&sem->wait); +} + +int __down_interruptible(struct semaphore *sem) +{ + int retval = 0; + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + tsk->state = TASK_INTERRUPTIBLE; + add_wait_queue_exclusive(&sem->wait, &wait); + + spin_lock_irq(&semaphore_lock); + sem->sleepers ++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * With signals pending, this turns into + * the trylock failure case - we won't be + * sleeping, and we* can't get the lock as + * it has contention. Just correct the count + * and exit. + */ + if (signal_pending(current)) { + retval = -EINTR; + sem->sleepers = 0; + atomic_add(sleepers, &sem->count); + break; + } + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock. The + * "-1" is because we're still hoping to get + * the lock. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irq(&semaphore_lock); + + schedule(); + tsk->state = TASK_INTERRUPTIBLE; + spin_lock_irq(&semaphore_lock); + } + spin_unlock_irq(&semaphore_lock); + tsk->state = TASK_RUNNING; + remove_wait_queue(&sem->wait, &wait); + wake_up(&sem->wait); + return retval; +} + +/* + * Trylock failed - make sure we correct for + * having decremented the count. + */ +int __down_trylock(struct semaphore *sem) +{ + unsigned long flags; + int sleepers; + + spin_lock_irqsave(&semaphore_lock, flags); + sleepers = sem->sleepers + 1; + sem->sleepers = 0; + + /* + * Add "everybody else" and us into it. They aren't + * playing, because we own the spinlock. + */ + if (!atomic_add_negative(sleepers, &sem->count)) + wake_up(&sem->wait); + + spin_unlock_irqrestore(&semaphore_lock, flags); + return 1; +} + diff -Nru a/arch/vax/kernel/setup.c b/arch/vax/kernel/setup.c --- a/arch/vax/kernel/setup.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/setup.c 2005-05-24 09:55:13 @@ -0,0 +1,213 @@ +/* + * Copyright (C) 1995 Linus Torvalds + * VAX port copyright atp 1998. + */ + +/* + * Bootup setup stuff. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +extern char *kernel_cmd_line; /* Kernel command line from head.S */ +char command_line[COMMAND_LINE_SIZE]; + +/* Defined in arch/vax/mm/init.c */ +extern void paging_init(void); + +/* Linker will put this at the end of the kernel image */ +extern char _end; + +struct vaxcpu vax_cpu; + +/* + * Get CPU information for use by the procfs. + */ +static int show_cpuinfo(struct seq_file *m, void *v) +{ + seq_printf(m, "cpu\t\t: VAX\n" + "cpu type\t: %s\n" + "cpu sid\t\t: 0x%08x\n" + "cpu sidex\t: 0x%08x\n" + "page size\t: %ld\n" + "BogoMIPS\t: %lu.%02lu\n", + (char *)mv->cpu_type_str(), + __mfpr(PR_SID), + mv->sidex, + PAGE_SIZE, + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return (void*)(*pos == 0); +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + +static int vax_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + machine_halt(); + return NOTIFY_DONE; +} + +static struct notifier_block vax_panic_block = { + .notifier_call = vax_panic_notifier, + .priority = INT_MIN /* may not return; must be done last */ +}; + +void __init setup_arch(char **cmdline_p) +{ + unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 }; + unsigned int max_dma; + unsigned long bootmap_size; + unsigned long region_start; + unsigned long region_len; + + notifier_chain_register(&panic_notifier_list, &vax_panic_block); + + /* + * Save the command line from the boot block, before it gets + * stomped on. + */ + memcpy(command_line, kernel_cmd_line,(COMMAND_LINE_SIZE-1)); + *cmdline_p = command_line; + /* Save unparsed command line copy for /proc/cmdline */ + memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE-1); + saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; + printk("kernel_cmd_line %8p\n%s\n",kernel_cmd_line,kernel_cmd_line); + + /* Get the SID */ + vax_cpu.sid = __mfpr(PR_SID); + + /* We expand the system page table in paging_init, so + * it comes before the bootmem allocator. */ + paging_init(); + + /* Initialize bootmem */ + + /* We don't have any holes in our physical memory layout, + so we throw everything into the bootmem allocator. + Eventually, we will get smarter and use the bad page lists + provided by the console ROM to map out faulty memory. + This also has the side effect of placing the bootmem bitmap + at the start of physical memory. init_bootmem() also + marks every page as reserved. We have to explicitly free + available memory ourselves. (max_pfn comes from RPB.) */ + +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) +#define PAGEALIGNUP(x) (((x) + PAGE_SIZE-1) & ~(PAGE_SIZE-1)) +#define PAGEALIGNDN(x) ((x) & ~(PAGE_SIZE-1)) +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) + + bootmap_size = init_bootmem(0, max_pfn); + + printk("bootmap size = %8.8lx\n", bootmap_size); + + /* + * Available memory is now the region from the end of the + * bootmem bitmap to the start of the kernel and from the + * end of the SPT to the end of memory + */ + region_start = PAGEALIGNUP(bootmap_size); + region_len = PAGEALIGNDN(KERNEL_START_PHYS) - region_start; + + printk("Calling free_bootmem(start=%08lx, len=%08lx)\n", + region_start, region_len); + free_bootmem(region_start, region_len); + + region_start = PAGEALIGNUP(__pa(SPT_BASE + SPT_SIZE)); + region_len = PAGEALIGNDN((max_pfn << PAGE_SHIFT)) - region_start; + + printk("Calling free_bootmem(start=%08lx, len=%08lx)\n", + region_start, region_len); + free_bootmem(region_start, region_len); + + max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; + + /* max_pfn is the number of 4K PTEs */ + if (max_pfn < max_dma) { + zones_size[ZONE_DMA] = max_pfn; + } else { + zones_size[ZONE_DMA] = max_dma; + zones_size[ZONE_NORMAL] = max_pfn - max_dma; + } + + free_area_init(zones_size); + + /* + * Set up the initial PCB. We can refer to current because head.S + * has already set us up on the kernel stack of task 0. + */ + __mtpr(__pa(¤t->thread.pcb), PR_PCBB); + + memset(¤t->thread.pcb, 0, sizeof(current->thread.pcb)); + current->thread.pcb.astlvl = 4; + + /* swapper_pg_dir is a 4 x pgd_t array */ + SET_PAGE_DIR(current, swapper_pg_dir); + + /* No root filesystem yet */ + ROOT_DEV = Root_NFS; + + /* + * Inserted by D.A. - 8 Jun 2001 - THIS IS NECESSARY + * if not correct. + */ + flush_tlb(); + + /* + * Identify the flock of penguins. + */ + +#ifdef __SMP__ + setup_smp(); +#endif + +#ifdef CONFIG_VT +#ifdef CONFIG_DUMMY_CONSOLE + /* + * We need a dummy console up at cons_init time, otherwise there'll + * be no VTs allocated for the real fbdev console to later take over. + */ + conswitchp = &dummy_con; +#endif +#endif +} + diff -Nru a/arch/vax/kernel/signal.c b/arch/vax/kernel/signal.c --- a/arch/vax/kernel/signal.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/signal.c 2005-04-25 16:35:02 @@ -0,0 +1,720 @@ +/* + * linux/arch/vax/kernel/signal.c + * + * From arch/cris/kernel/signal.c + * + * Based on arch/i386/kernel/signal.c by + * Copyright (C) 1991, 1992 Linus Torvalds + * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * + * + * Ideas also taken from arch/arm. + * + * Copyright (C) 2000 Axis Communications AB + * + * Authors: Bjorn Wesen (bjornw@axis.com) + * VAX port atp@pergamentum.com. + * + David Airlie Copyright (C) 2003 + * See syscall.c for details of the call stack layout etc... + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#undef DEBUG_SIG + +/* FIXME: Check this & fixup other regs, like r0 */ +#define RESTART_VAX_SYSCALL(regs) { (regs)->pc -= 4; } + + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +int do_signal(sigset_t *oldset, struct pt_regs *regs); + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ +int +sys_sigsuspend(struct pt_regs *regs, old_sigset_t mask) +{ + sigset_t saveset; + + mask &= _BLOCKABLE; + spin_lock_irq(¤t->sighand->siglock); + saveset = current->blocked; + siginitset(¤t->blocked, mask); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + regs->r0 = -EINTR; + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (do_signal(&saveset, regs)) + return -EINTR; + } +} + +/* + * atp - it is a little confusing, looking at other ports, as to what the arguments to + * this function are. I'm assuming two args, plus our pushed pt_regs set up by syscall + */ +int +sys_rt_sigsuspend(struct pt_regs *regs,sigset_t *unewset, size_t sigsetsize) +{ + sigset_t saveset, newset; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&newset, unewset, sizeof(newset))) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + + spin_lock_irq(¤t->sighand->siglock); + saveset = current->blocked; + current->blocked = newset; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + regs->r0 = -EINTR; + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (do_signal(&saveset, regs)) + return -EINTR; + } +} + +int +sys_sigaction(int sig, const struct old_sigaction *act, + struct old_sigaction *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + if (act) { + old_sigset_t mask; + if (!access_ok(VERIFY_READ, act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + return -EFAULT; + __get_user(new_ka.sa.sa_flags, &act->sa_flags); + __get_user(mask, &act->sa_mask); + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + return -EFAULT; + __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + } + + return ret; +} + +int +sys_sigaltstack(const stack_t *uss, stack_t *uoss) +{ + struct pt_regs *regs = (struct pt_regs *) &uss; + + return do_sigaltstack(uss, uoss, regs->sp); +} + + +/* + * Do a signal return; undo the signal stack. + */ +struct sigframe { + int sig; + struct sigcontext sc; + unsigned long extramask[_NSIG_WORDS-1]; + unsigned char retcode[20]; /* trampoline code */ +}; + +struct rt_sigframe { + int sig; + struct siginfo *pinfo; + void *puc; + struct siginfo info; + struct ucontext uc; + unsigned char retcode[20]; /* trampoline code */ +}; + +static int +restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) +{ + unsigned int err = 0; + + /* + * Restore the regs from &sc->regs (same as sc, since regs is first) + * (sc is already checked for VERIFY_READ since the sigframe was + * checked in sys_sigreturn previously). + */ + + if (__copy_from_user(regs, sc, sizeof(struct pt_regs))) + goto badframe; + + /* FIXME: check user mode flag in restored regs PSW */ + + /* + * Restore the old USP as it was before we stacked the sc etc. + * (we cannot just pop the sigcontext since we aligned the sp and + * stuff after pushing it). + */ + + /* FIXME: check process stack */ + + /* + * TODO: the other ports use regs->orig_XX to disable syscall checks + * after this completes, but we don't use that mechanism. maybe we can + * use it now ? + */ + + return err; + +badframe: + return 1; +} + + +asmlinkage int sys_sigreturn(struct pt_regs *regs) +{ + struct sigframe *frame = (struct sigframe *) (regs->sp); + sigset_t set; + + /* + * Since we stacked the signal on a dword boundary, + * then frame should be dword aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (((long) frame) & 3) + goto badframe; + + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__get_user(set.sig[0], &frame->sc.oldmask) + || (_NSIG_WORDS > 1 + && __copy_from_user(&set.sig[1], &frame->extramask, + sizeof(frame->extramask)))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->sc)) + goto badframe; + + /* TODO: SIGTRAP when single-stepping as in arm ? */ + + return regs->r0; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + + +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe *frame = (struct rt_sigframe *) (regs->sp-8); + sigset_t set; + stack_t st; + + /* + * Since we stacked the signal on a dword boundary, + * then frame should be dword aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (((long) frame) & 3) + goto badframe; + + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) + goto badframe; + + if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) + goto badframe; + /* + * It is more difficult to avoid calling this function than to + * call it and ignore errors. + */ + do_sigaltstack(&st, NULL, (regs->sp)); + + return regs->r0; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +/* + * Set up a signal frame. + */ +static int +setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask) +{ + int err = 0; + + /* copy the regs. they are first in sc so we can use sc directly */ + + err |= __copy_to_user(sc, regs, sizeof(struct pt_regs)); + + /* then some other stuff */ + + err |= __put_user(mask, &sc->oldmask); + + return err; +} + +/* figure out where we want to put the new signal frame - usually on the stack */ + +static inline void * +get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) +{ + unsigned long sp = regs->sp; + + /* This is the X/Open sanctioned signal stack switching. */ + if (ka->sa.sa_flags & SA_ONSTACK) { + if (! on_sig_stack(sp)) + sp = current->sas_ss_sp + current->sas_ss_size; + } + + /* make sure the frame is dword-aligned */ + + sp &= ~3; + + return (void *)(sp - frame_size); +} + +/* Grab and setup a signal frame. + * + * Basically we stack a lot of state info, and arrange for the + * user-mode program to return to the kernel using either a + * trampoline which performs the syscall sigreturn, or a provided + * user-mode trampoline. + */ +static void setup_frame(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs *regs) +{ + struct sigframe *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + +#ifdef DEBUG_SIG + printk("setup_frame: pid %d, sig %d, regs %p, regs->sp %p, frame %p, sigaction %p\n",current->pid,sig,regs,regs->sp,frame,ka); + show_regs(regs); +#endif + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + /* write the signal onto the stack */ + err |= __put_user(sig, (unsigned int *) &frame->sig); + err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); + if (err) + goto give_sigsegv; + + if (_NSIG_WORDS > 1) { + err |= __copy_to_user(frame->extramask, &set->sig[1], + sizeof(frame->extramask)); + } + if (err) + goto give_sigsegv; + + /* + * Set up to return from userspace. If provided, use a stub + * already in userspace. + * + * We do this differently to other ports. Each function has a two + * byte RSM (due to the calling convention). Each sighandler will + * expect to be CALLS'd and will RET from that. So we cant just muck + * about with PC's on the stack like the i386. So we use the + * trampoline code on the stack a bit more. The easiest way to skip + * around all this is to calls the signal handler, and then either + * calls the restorer, or chmk to sys_sigreturn. + */ + + /* CALLS $1, */ + err |= __put_user(0xfb, (char *) (frame->retcode + 0)); + err |= __put_user(0x01, (char *) (frame->retcode + 1)); + /* (absolute address)*/ + err |= __put_user(0x9f, (char *) (frame->retcode + 2)); + /* sighandler */ + err |= __put_user(((unsigned long) ka->sa.sa_handler), + (unsigned long *) (frame->retcode + 3)); + + if (ka->sa.sa_flags & SA_RESTORER) { + /* CALLS $0,*/ + err |= __put_user(0xfb, (char *) (frame->retcode + 7)); + err |= __put_user(0x00, (char *) (frame->retcode + 8)); + /* (absolute address)*/ + err |= __put_user(0x9f, (char *) (frame->retcode + 9)); + /* restorer */ + err |= __put_user(((unsigned long) ka->sa.sa_restorer), + (unsigned long *) (frame->retcode + 10)); + /* plus a halt */ + err |= __put_user(0x00, (char *) (frame->retcode + 14)); + } else { + /* + * Perform a syscall to sys_sigreturn. First set up the + * argument list to avoid confusing it. + */ + + /* pushl $0x0 */ + err |= __put_user(0xdd, (char *) (frame->retcode + 7)); + err |= __put_user(0x00, (char *) (frame->retcode + 8)); + /* movl sp, ap */ + err |= __put_user(0xd0, (char *) (frame->retcode + 9)); + err |= __put_user(0x5e, (char *) (frame->retcode + 10)); + err |= __put_user(0x5c, (char *) (frame->retcode + 11)); + /* chmk __NR_sigreturn; */ + err |= __put_user(0xbc, (char *) (frame->retcode + 12)); + err |= __put_user(0x8f, (char *) (frame->retcode + 13)); + err |= __put_user(__NR_sigreturn, (short *) (frame->retcode + 14)); + /* plus a halt */ + err |= __put_user(0x00, (char *)(frame->retcode+16)); + } + + if (err) + goto give_sigsegv; + +#ifdef DEBUG_SIG + printk("setup_frame: pid %d, frame->retcode %p, sa_handler %p\n", + current->pid, + frame->retcode, + ka->sa.sa_handler); +#endif + /* Set up registers for signal handler. */ + regs->pc = (unsigned long) frame->retcode; /* What we enter NOW. */ + regs->fp = regs->sp; + regs->sp = (unsigned int) frame; + __mtpr(frame,PR_USP); /* and into to the register, ready for REI */ + +#ifdef DEBUG_SIG + printk("setup_frame: pid %d, regs->pc %8lx, regs->sp %8lx, regs->ap %8lx\n", + current->pid, + regs->pc, + regs->sp, + regs->ap); + { + unsigned char c[4]; + __get_user(c[0], (char *) &frame->sig + 0); + __get_user(c[1], (char *) &frame->sig + 1); + __get_user(c[2], (char *) &frame->sig + 2); + __get_user(c[3], (char *) &frame->sig + 3); + printk("setup_frame: %p %1x %p %1x %p %1x %p %1x\n", + &frame->sig + 0, c[0], + &frame->sig + 1, c[1], + &frame->sig + 2, c[2], + &frame->sig + 3, c[3]); + } + { + unsigned char c[4]; + __get_user(c[0], (char *) frame->retcode + 0); + __get_user(c[1], (char *) frame->retcode + 1); + __get_user(c[2], (char *) frame->retcode + 2); + __get_user(c[3], (char *) frame->retcode + 3); + printk("setup_frame: %p %1x %p %1x %p %1x %p %1x\n", + frame->retcode + 0, c[0], + frame->retcode + 1, c[1], + frame->retcode + 2, c[2], + frame->retcode + 3, c[3]); + } + { + unsigned char c[4]; + __get_user(c[0], (char *) frame->retcode + 4); + __get_user(c[1], (char *) frame->retcode + 5); + __get_user(c[2], (char *) frame->retcode + 6); + __get_user(c[3], (char *) frame->retcode + 7); + printk("setup_frame: %p %1x %p %1x %p %1x %p %1x\n", + frame->retcode + 4, c[0], + frame->retcode + 5, c[1], + frame->retcode + 6, c[2], + frame->retcode + 7, c[3]); + } + { + unsigned char c[4]; + __get_user(c[0], (char *) frame->retcode + 8); + __get_user(c[1], (char *) frame->retcode + 9); + __get_user(c[2], (char *) frame->retcode + 10); + __get_user(c[3], (char *) frame->retcode + 11); + printk("setup_frame: %p %1x %p %1x %p %1x %p %1x\n", + frame->retcode + 8, c[0], + frame->retcode + 9, c[1], + frame->retcode + 10, c[2], + frame->retcode + 11, c[3]); + } + { + unsigned char c[4]; + __get_user(c[0], (char *) frame->retcode + 12); + __get_user(c[1], (char *) frame->retcode + 13); + __get_user(c[2], (char *) frame->retcode + 14); + __get_user(c[3], (char *) frame->retcode + 15); + printk("setup_frame: %p %1x %p %1x %p %1x %p %1x\n", + frame->retcode + 12, c[0], + frame->retcode + 13, c[1], + frame->retcode + 14, c[2], + frame->retcode + 15, c[3]); + } +#endif + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs * regs) +{ + struct rt_sigframe *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + +#ifdef DEBUG_SIG + printk("setup_rt_frame: pid %d, sig %d, regs %p, regs->sp %p, frame %p, sigaction %p\n",current->pid,sig,regs,regs->sp,frame,ka); + show_regs(regs); +#endif + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + err |= __put_user(sig, (unsigned int *)&frame->sig); + err |= __put_user(&frame->info, &frame->pinfo); + err |= __put_user(&frame->uc, &frame->puc); + + err |= copy_siginfo_to_user(&frame->info, info); + if (err) + goto give_sigsegv; + + /* Clear all the bits of the ucontext we don't use. */ + err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + goto give_sigsegv; + + /* + * Set up to return from userspace. If provided, use a stub + * already in userspace. + */ + + /* + * We do this differently to other ports. Each function has a two byte RSM. + * (due to the calling convention). Each sighandler will expect to be + * CALLS'd and will RET from that. So we cant just muck about with PC's on the + * stack like the i386. So we use the trampoline code on the stack a bit more. + * The easiest way to skip around all this is to calls the signal + * handler, and then either calls the restorer, or chmk to sys_sigreturn. + */ + + /* CALLS $3, */ + err |= __put_user(0xfb, (char *) (frame->retcode + 0)); + err |= __put_user(0x03, (char *) (frame->retcode + 1)); + /* (absolute address)*/ + err |= __put_user(0x9f, (char *) (frame->retcode + 2)); + /* sighandler */ + err |= __put_user(((unsigned long) ka->sa.sa_handler), + (unsigned long *) (frame->retcode + 3)); + + if (ka->sa.sa_flags & SA_RESTORER) { + /* CALLS $0,*/ + err |= __put_user(0xfb, (char *) (frame->retcode + 7)); + err |= __put_user(0x00, (char *) (frame->retcode + 8)); + /* (absolute address)*/ + err |= __put_user(0x9f, (char *) (frame->retcode + 9)); + /* restorer */ + err |= __put_user(((unsigned long) ka->sa.sa_restorer), + (unsigned long *) (frame->retcode + 10)); + /* plus a halt */ + err |= __put_user(0x00, (char *) (frame->retcode + 14)); + } else { + /* + * Perform a syscall to sys_sigreturn. First set up the + * argument list to avoid confusing it. + */ + + /* pushl $0x0 */ + err |= __put_user(0xdd, (char *) (frame->retcode + 7)); + err |= __put_user(0x00, (char *) (frame->retcode + 8)); + /* movl sp, ap */ + err |= __put_user(0xd0, (char *) (frame->retcode + 9)); + err |= __put_user(0x5e, (char *) (frame->retcode + 10)); + err |= __put_user(0x5c, (char *) (frame->retcode + 11)); + /* chmk __NR_sigreturn; */ + err |= __put_user(0xbc, (char *) (frame->retcode + 12)); + err |= __put_user(0x8f, (char *) (frame->retcode + 13)); + err |= __put_user(__NR_rt_sigreturn, (short *) (frame->retcode + 14)); + /* plus a halt */ + err |= __put_user(0x00, (char *) (frame->retcode + 16)); + } + + if (err) + goto give_sigsegv; + + /* TODO what is the current->exec_domain stuff and invmap ? */ + +#ifdef DEBUG_SIG + printk("setup_rt_frame: pid %d, frame->retcode %p, sa_handler %p usp %8lX\n", + current->pid, + frame->retcode, + ka->sa.sa_handler, + __mfpr(PR_USP)); +#endif /* Set up registers for signal handler */ + + regs->pc = (unsigned long) frame->retcode; /* what we enter NOW */ + regs->fp = regs->sp; + regs->sp = (unsigned int)frame; /* what we enter LATER */ + __mtpr(frame, PR_USP); + + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +/* + * OK, we're invoking a handler. + */ +static inline void +handle_signal(int canrestart, unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) +{ + /* Are we from a system call? */ + if (canrestart) { + /* If so, check system call restarting.. */ + switch (regs->r0) { + case -ERESTART_RESTARTBLOCK: + current_thread_info()->restart_block.fn = do_no_restart_syscall; + /* fallthrough */ + + case -ERESTARTNOHAND: + /* ERESTARTNOHAND means that the syscall should only be + restarted if there was no handler for the signal, and since + we only get here if there is a handler, we dont restart */ + regs->r0 = -EINTR; + break; + + case -ERESTARTSYS: + /* ERESTARTSYS means to restart the syscall if there is no + handler or the handler was registered with SA_RESTART */ + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->r0 = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + /* ERESTARTNOINTR means that the syscall should be called again + after the signal handler returns. */ + RESTART_VAX_SYSCALL(regs); + } + } + + /* Set up the stack frame */ +#ifdef DEBUG_SIG + printk("handle_signal: setup_frame(sig=%d,flags=%d,ka=%p,oldset=%d,regs=%p)\n",sig,ka->sa.sa_flags,ka,oldset,regs); +#endif + if (ka->sa.sa_flags & SA_SIGINFO) + setup_rt_frame(sig, ka, info, oldset, regs); + else + setup_frame(sig, ka, oldset, regs); + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigaddset(¤t->blocked,sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + */ +int do_signal(sigset_t *oldset, struct pt_regs *regs) +{ + siginfo_t info; + int signr; + int canrestart; + struct k_sigaction ka; + + /* + * We want the common case to go fast, which + * is why we may in certain cases get here from + * kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return 1; + + /* FIXME: */ + canrestart=regs->r0; +#ifdef DEBUG_SIG + printk("do_signal: pid %d,canrestart %d, current->sigpending %d,current->blocked %d ", current->pid,canrestart,current->sigpending,current->blocked); +#endif + if (!oldset) + oldset = ¤t->blocked; + + + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + if (signr > 0) { + /* Whee! Actually deliver the signal. */ + handle_signal(canrestart, signr, &ka, &info, oldset, regs); + return 1; + } + + /* Did we come from a system call? */ + if (canrestart) { + /* Restart the system call - no handlers present */ + if (regs->r0 == -ERESTARTNOHAND + || regs->r0 == -ERESTARTSYS + || regs->r0 == -ERESTARTNOINTR) { + RESTART_VAX_SYSCALL(regs); + } + } + + return 0; +} + diff -Nru a/arch/vax/kernel/syscall.c b/arch/vax/kernel/syscall.c --- a/arch/vax/kernel/syscall.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/syscall.c 2005-10-31 15:10:21 @@ -0,0 +1,529 @@ +/* + * This file handles syscalls. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "interrupt.h" /* Local, private definitions */ + +/* ./arch/vax/kernel/syscall.c */ +extern int sys_pipe(unsigned long *fildes); +extern unsigned long sys_mmap(unsigned long addr, size_t len, int prot, + int flags, int fd, off_t offset); +extern int sys_ipc (uint call, int first, int second, int third, void *ptr, + long fifth); +extern asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, unsigned long fd, + unsigned long pgoff); +/* ./arch/vax/kernel/process.c */ +extern int sys_fork(struct pt_regs regs); +extern int sys_vfork(struct pt_regs *regs); +extern int sys_execve(char *filename, char **argv, char **envp, + struct pt_regs *regs); +extern int sys_clone(unsigned long clone_flags, unsigned long newsp, + struct pt_regs *regs); +/* ./arch/vax/kernel/ptrace.c */ +extern asmlinkage long sys_ptrace(long request, long pid, long addr, long data); +/* ./arch/vax/kernel/signal.c */ +extern int sys_sigaction(int sig, const struct old_sigaction *act, + struct old_sigaction *oact); +extern int sys_sigsuspend(struct pt_regs *regs, old_sigset_t mask); +extern asmlinkage int sys_sigreturn(struct pt_regs *regs); +extern asmlinkage int sys_rt_sigreturn(struct pt_regs *regs); +extern int sys_rt_sigsuspend(struct pt_regs *regs,sigset_t *unewset, + size_t sigsetsize); +extern int sys_sigaltstack(const stack_t *uss, stack_t *uoss); +/* ./kernel/signal.c */ +extern asmlinkage long sys_rt_sigaction(int sig, + const struct sigaction __user *act, + struct sigaction __user *oact, size_t sigsetsize); + + +static struct { + unsigned long *sc_func; + unsigned int nr_args; +} syscall[] = { +#define SC(num, func, args) \ + [num] = { \ + .sc_func = (unsigned long *) &func, \ + .nr_args = args, \ + } + SC (__NR_exit, sys_exit, 1), + SC (__NR_fork, sys_fork, 0), + SC (__NR_read, sys_read, 3), + SC (__NR_write, sys_write, 3), + SC (__NR_open, sys_open, 3), + SC (__NR_close, sys_close, 1), + SC (__NR_waitpid, sys_waitpid, 3), + SC (__NR_creat, sys_creat, 2), + SC (__NR_link, sys_link, 2), + SC (__NR_unlink, sys_unlink, 1), + SC (__NR_execve, sys_execve, 3), + SC (__NR_chdir, sys_chdir, 1), + SC (__NR_time, sys_time, 1), + SC (__NR_mknod, sys_mknod, 3), + SC (__NR_chmod, sys_chmod, 2), + SC (__NR_lchown, sys_lchown16, 3), + SC (__NR_lseek, sys_lseek, 3), + SC (__NR_getpid, sys_getpid, 0), + SC (__NR_mount, sys_mount, 5), + SC (__NR_umount, sys_oldumount, 2), + SC (__NR_setuid, sys_setuid16, 1), + SC (__NR_getuid, sys_getuid16, 0), + SC (__NR_stime, sys_stime, 1), + SC (__NR_ptrace, sys_ptrace, 4), + SC (__NR_alarm, sys_alarm, 1), + SC (__NR_pause, sys_pause, 0), + SC (__NR_utime, sys_utime, 2), + SC (__NR_access, sys_access, 2), + SC (__NR_nice, sys_nice, 1), + SC (__NR_sync, sys_sync, 0), + SC (__NR_kill, sys_kill, 2), + SC (__NR_rename, sys_rename, 2), + SC (__NR_mkdir, sys_mkdir, 2), + SC (__NR_mkdir, sys_mkdir, 2), + SC (__NR_rmdir, sys_rmdir, 1), + SC (__NR_dup, sys_dup, 1), + SC (__NR_pipe, sys_pipe, 1), + SC (__NR_times, sys_times, 1), + SC (__NR_brk, sys_brk, 1), + SC (__NR_setgid, sys_setgid16, 1), + SC (__NR_getgid, sys_getgid16, 0), + SC (__NR_signal, sys_signal, 2), + SC (__NR_geteuid, sys_geteuid16, 0), + SC (__NR_getegid, sys_getegid16, 0), + SC (__NR_acct, sys_acct, 1), + SC (__NR_umount2, sys_oldumount, 2), + SC (__NR_ioctl, sys_ioctl, 3), + SC (__NR_fcntl, sys_fcntl, 3), + SC (__NR_setpgid, sys_setpgid, 2), + SC (__NR_umask, sys_umask, 1), + SC (__NR_chroot, sys_chroot, 1), + SC (__NR_ustat, sys_ustat, 2), + SC (__NR_dup2, sys_dup2, 2), + SC (__NR_getppid, sys_getppid, 0), + SC (__NR_getpgrp, sys_getpgrp, 0), + SC (__NR_setsid, sys_setsid, 0), + SC (__NR_sigaction, sys_sigaction, 3), + SC (__NR_sgetmask, sys_sgetmask, 0), + SC (__NR_ssetmask, sys_ssetmask, 1), + SC (__NR_setreuid, sys_setreuid16, 2), + SC (__NR_setregid, sys_setregid16, 2), + SC (__NR_sigsuspend, sys_sigsuspend, 1), + SC (__NR_sigpending, sys_sigpending, 1), + SC (__NR_sethostname, sys_sethostname, 2), + SC (__NR_setrlimit, sys_setrlimit, 2), + SC (__NR_old_getrlimit, sys_old_getrlimit, 2), + SC (__NR_getrusage, sys_getrusage, 2), + SC (__NR_gettimeofday, sys_gettimeofday, 2), + SC (__NR_settimeofday, sys_settimeofday, 2), + SC (__NR_getgroups, sys_getgroups16, 2), + SC (__NR_setgroups, sys_setgroups16, 2), + SC (__NR_symlink, sys_symlink, 2), + SC (__NR_readlink, sys_readlink, 3), + SC (__NR_uselib, sys_uselib, 1), + SC (__NR_swapon, sys_swapon, 2), + SC (__NR_reboot, sys_reboot, 4), + SC (__NR_mmap, sys_mmap, 6), + SC (__NR_munmap, sys_munmap, 2), + SC (__NR_truncate, sys_truncate, 2), + SC (__NR_ftruncate, sys_ftruncate, 2), + SC (__NR_fchmod, sys_fchmod, 2), + SC (__NR_fchown, sys_fchown16, 3), + SC (__NR_getpriority, sys_getpriority, 2), + SC (__NR_setpriority, sys_setpriority, 3), + SC (__NR_statfs, sys_statfs, 2), + SC (__NR_fstatfs, sys_fstatfs, 2), + SC (__NR_socketcall, sys_socketcall, 2), + SC (__NR_syslog, sys_syslog, 3), + SC (__NR_setitimer, sys_setitimer, 3), + SC (__NR_getitimer, sys_getitimer, 2), + SC (__NR_stat, sys_newstat, 2), + SC (__NR_lstat, sys_newlstat, 2), + SC (__NR_fstat, sys_newfstat, 2), + SC (__NR_vhangup, sys_vhangup, 0), + SC (__NR_wait4, sys_wait4, 4), + SC (__NR_swapoff, sys_swapoff, 2), + SC (__NR_sysinfo, sys_sysinfo, 1), + SC (__NR_ipc, sys_ipc, 6), + SC (__NR_fsync, sys_fsync, 1), + SC (__NR_sigreturn, sys_sigreturn, 0), + SC (__NR_clone, sys_clone, 2), + SC (__NR_setdomainname, sys_setdomainname, 2), + SC (__NR_uname, sys_newuname, 1), + SC (__NR_adjtimex, sys_adjtimex, 1), + SC (__NR_mprotect, sys_mprotect, 3), + SC (__NR_sigprocmask, sys_sigprocmask, 3), + SC (__NR_init_module, sys_init_module, 5), + SC (__NR_delete_module, sys_delete_module, 3), + SC (__NR_quotactl, sys_quotactl, 4), + SC (__NR_getpgid, sys_getpgid, 1), + SC (__NR_fchdir, sys_fchdir, 1), + SC (__NR_bdflush, sys_bdflush, 2), + SC (__NR_sysfs, sys_sysfs, 3), + SC (__NR_personality, sys_personality, 1), + SC (__NR_setfsuid, sys_setfsuid16, 1), + SC (__NR_setfsgid, sys_setfsgid16, 1), + SC (__NR__llseek, sys_llseek, 5), + SC (__NR_getdents, sys_getdents, 3), + SC (__NR__newselect, sys_select, 5), + SC (__NR_flock, sys_flock, 2), + SC (__NR_msync, sys_msync, 3), + SC (__NR_readv, sys_readv, 3), + SC (__NR_writev, sys_writev, 3), + SC (__NR_getsid, sys_getsid, 1), + SC (__NR_fdatasync, sys_fdatasync, 1), + SC (__NR__sysctl, sys_sysctl, 1), + SC (__NR_mlock, sys_mlock, 2), + SC (__NR_munlock, sys_munlock, 2), + SC (__NR_mlockall, sys_mlockall, 1), + SC (__NR_munlockall, sys_munlockall, 0), + SC (__NR_nanosleep, sys_nanosleep, 2), + SC (__NR_mremap, sys_mremap, 4), + SC (__NR_setresuid, sys_setresuid16, 3), + SC (__NR_getresuid, sys_getresuid16, 3), + SC (__NR_poll, sys_poll, 3), + SC (__NR_nfsservctl, sys_nfsservctl, 3), + SC (__NR_setresgid, sys_setresgid16, 3), + SC (__NR_getresgid, sys_getresgid16, 3), + SC (__NR_prctl, sys_prctl, 5), + SC (__NR_sched_setparam, sys_sched_setparam, 2), + SC (__NR_sched_getparam, sys_sched_getparam, 2), + SC (__NR_sched_setscheduler, sys_sched_setscheduler, 3), + SC (__NR_sched_getscheduler, sys_sched_getscheduler, 1), + SC (__NR_sched_yield, sys_sched_yield, 0), + SC (__NR_sched_get_priority_max,sys_sched_get_priority_max, 1), + SC (__NR_sched_get_priority_min,sys_sched_get_priority_min, 1), + SC (__NR_sched_rr_get_interval, sys_sched_rr_get_interval, 2), + SC (__NR_rt_sigreturn, sys_rt_sigreturn, 0), + SC (__NR_rt_sigaction, sys_rt_sigaction, 4), + SC (__NR_rt_sigprocmask, sys_rt_sigprocmask, 4), + SC (__NR_rt_sigpending, sys_rt_sigpending, 2), + SC (__NR_rt_sigtimedwait, sys_rt_sigtimedwait, 4), + SC (__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, 3), + SC (__NR_rt_sigsuspend, sys_rt_sigsuspend, 2), + SC (__NR_pread64, sys_pread64, 4), + SC (__NR_pwrite64, sys_pwrite64, 4), + SC (__NR_chown, sys_chown16, 3), + SC (__NR_getcwd, sys_getcwd, 2), + SC (__NR_capget, sys_capget, 2), + SC (__NR_capset, sys_capset, 2), + SC (__NR_sigaltstack, sys_sigaltstack, 2), + SC (__NR_sendfile, sys_sendfile, 4), + SC (__NR_vfork, sys_vfork, 0), + SC (__NR_getrlimit, sys_getrlimit, 2), + SC (__NR_mmap2, sys_mmap2, 6), + SC (__NR_truncate64, sys_truncate64, 2), + SC (__NR_ftruncate64, sys_ftruncate64, 2), + SC (__NR_stat64, sys_stat64, 2), + SC (__NR_lstat64, sys_lstat64, 2), + SC (__NR_fstat64, sys_fstat64, 2), + SC (__NR_lchown32, sys_lchown, 3), + SC (__NR_getuid32, sys_getuid, 0), + SC (__NR_getgid32, sys_getgid, 0), + SC (__NR_geteuid32, sys_geteuid, 0), + SC (__NR_getegid32, sys_getegid, 0), + SC (__NR_setreuid32, sys_setreuid, 2), + SC (__NR_setregid32, sys_setregid, 2), + SC (__NR_getgroups32, sys_getgroups, 2), + SC (__NR_setgroups32, sys_setgroups, 2), + SC (__NR_fchown32, sys_fchown, 3), + SC (__NR_setresuid32, sys_setresuid, 3), + SC (__NR_getresuid32, sys_getresuid, 3), + SC (__NR_setresgid32, sys_setresgid, 3), + SC (__NR_getresgid32, sys_getresgid, 3), + SC (__NR_chown32, sys_chown, 3), + SC (__NR_setuid32, sys_setuid, 1), + SC (__NR_setgid32, sys_setgid, 1), + SC (__NR_setfsuid32, sys_setfsuid, 1), + SC (__NR_setfsgid32, sys_setfsgid, 1), + SC (__NR_pivot_root, sys_pivot_root, 2), + SC (__NR_mincore, sys_mincore, 3), + SC (__NR_madvise, sys_madvise, 3), + SC (__NR_getdents64, sys_getdents64, 3), + SC (__NR_fcntl64, sys_fcntl64, 3), + SC (__NR_tkill, sys_tkill, 3), + SC (__NR_statfs64, sys_statfs64, 2), + SC (__NR_fstatfs64, sys_fstatfs64, 2), +#undef SC +}; + +void syscall_handler(struct pt_regs *regs, void *excep_info) +{ + unsigned int sc_number; + unsigned int *user_ap; + unsigned int nr_args; + + sc_number = *(unsigned int *)(excep_info); + + /* + * Check if the called syscall is known at all and that it isn't + * a no-longer supported legacy syscall. + */ + if (unlikely (sc_number >= ARRAY_SIZE (syscall) || + !syscall[sc_number].sc_func)) { + printk (KERN_DEBUG "%s(%d): syscall %d out of range or not " + "implemented.\n", current->comm, current->pid, + sc_number); + printk (KERN_DEBUG "Please report to " + ".\n"); + regs->r0 = -ENOSYS; + return; + } + + /* Syscall arguments */ + user_ap = (unsigned int *)(regs->ap); + + if (likely (regs->psl.prevmode == PSL_MODE_USER)) { + /* + * User Mode Syscall Handling - check access to arguments. + */ + + if (user_ap >= (unsigned int *)0x80000000) { + regs->r0 = -EFAULT; + return; + } + + /* + * We don't need to deal with the case where AP + nr_args*4 + * reaches up into S0 space because we've got a guard page + * at 0x80000000 that will cause an exception in the movc3 + * below that copies the argument list. + */ + if (get_user(nr_args, user_ap)) { + regs->r0 = -EFAULT; + return; + } + + /* + * The SP value in the pt_regs structure should really + * be the user stack pointer, not the kernel stack pointer + */ + regs->sp = __mfpr(PR_USP); + } else { + /* + * Kernel Mode Syscall Handling - no need to check access to arguments. + */ + nr_args = *user_ap; + } + +#ifdef CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS + /* + * Check number of syscall arguments + */ + if (unlikely (syscall[sc_number].nr_args != nr_args)) { + printk (KERN_DEBUG "%s(%d): stack mismatch (should=%d, caller=%d) on syscall %d\n", + current->comm, current->pid, + syscall[sc_number].nr_args, nr_args, sc_number); + printk (KERN_DEBUG "Please report to " + ".\n"); +#ifdef CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS_ABORT + regs->r0 = -EFAULT; + return; +#endif /* CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS_ABORT */ + } +#endif /* CONFIG_DEBUG_VAX_CHECK_CHMx_ARGS */ + + /* + * We pass all the user-supplied args plus the pointer to the + * regs to the syscall function. If the syscall is implemented + * in the core kernel, then it will ignore the additional + * argument. + */ + __asm__( + " pushl %1 \n" + " subl2 %2,%%sp \n" + "1: movc3 %2,4(%4),(%%sp) \n" + " calls %3, %5 \n" + " brb 3f \n" + "2: movl %6, %%r0 \n" + "3: movl %%r0, %0 \n" + ".section ex_table,\"a\" \n" + ".align 2 \n" + ".long 1b, 2b \n" + ".text \n" + : "=g"(regs->r0) /* 0 - syscall return value */ + : "g"(regs), /* 1 - regs ptr */ + "g"(nr_args * 4), /* 2 - number of syscall argument bytes to copy */ + "g"(nr_args + 1), /* 3 - number of syscall arguments + explicit "regs" ptr*/ + "r"(user_ap), /* 4 - source for syscall arguments */ + "g"(*syscall[sc_number].sc_func), /* 5 - syscall function ptr */ + "g"(-EFAULT) /* 6 - Return -EFAULT if calling the syscall failed */ + : "r0", "r1", "r2", "r3", "r4", "r5"); + + return; +} + +int sys_pipe(unsigned long *fildes) +{ + int fd[2]; + int error; + + lock_kernel(); + error = do_pipe(fd); + unlock_kernel(); + if (!error) { + if (copy_to_user(fildes, fd, 2*sizeof(int))) + error = -EFAULT; + } + return error; +} + +/* + * sys_ipc() is the de-multiplexer for the SysV IPC calls.. + * + * This is really horribly ugly. + */ +int sys_ipc (uint call, int first, int second, int third, void *ptr, + long fifth) +{ +#ifdef CONFIG_SYSVIPC + int ret; + + switch (call) { + case SEMOP: + return sys_semop (first, (struct sembuf *)ptr, second); + + case SEMGET: + return sys_semget (first, second, third); + + case SEMCTL: { + union semun fourth; + if (!ptr) + return -EINVAL; + if (get_user(fourth.__pad, (void **) ptr)) + return -EFAULT; + return sys_semctl (first, second, third, fourth); + } + + case MSGSND: + return sys_msgsnd (first, (struct msgbuf *) ptr, second, third); + break; + + case MSGRCV: + return sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, + third); + case MSGGET: + return sys_msgget ((key_t) first, second); + + case MSGCTL: + return sys_msgctl (first, second, (struct msqid_ds *) ptr); + + case SHMAT: { + ulong raddr; + ret = do_shmat (first, (char *) ptr, second, &raddr); + if (ret) + return ret; + return put_user (raddr, (ulong *) third); + } + + case SHMDT: + return sys_shmdt ((char *)ptr); + + case SHMGET: + return sys_shmget (first, second, third); + + case SHMCTL: + return sys_shmctl (first, second, (struct shmid_ds *) ptr); + + default: + return -EINVAL; + + } + + return -EINVAL; +#else /* CONFIG_SYSVIPC */ + return -ENOSYS; +#endif /* CONFIG_SYSVIPC */ +} + +int sys_uname(struct old_utsname * name) +{ + int err; + + if (!name) + return -EFAULT; + + down_read(&uts_sem); + err = copy_to_user(name, &system_utsname, sizeof (*name)); + up_read(&uts_sem); + + return err? -EFAULT: 0; +} + +unsigned long sys_mmap(unsigned long addr, size_t len, int prot, + int flags, int fd, off_t offset) +{ + struct file * file = NULL; + unsigned long error = -EFAULT; + + lock_kernel(); + if (!(flags & MAP_ANONYMOUS)) { + error = -EBADF; + file = fget(fd); + if (!file) + goto out; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + error = do_mmap(file, addr, len, prot, flags, offset); + if (file) + fput(file); + +out: + unlock_kernel(); + return error; +} + +/* common code for old and new mmaps */ +static inline long do_mmap2( + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + int error = -EBADF; + struct file * file = NULL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + + down_write(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); + +out: + return error; +} + +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + return do_mmap2(addr, len, prot, flags, fd, pgoff); +} + diff -Nru a/arch/vax/kernel/time.c b/arch/vax/kernel/time.c --- a/arch/vax/kernel/time.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/time.c 2005-10-03 14:41:51 @@ -0,0 +1,341 @@ +/* + * Copyright (C) 1995 Linus Torvalds + * VAX port copyright atp 1998. + * (C) 2000 Erik Mouw + * + * 22-oct-2000: Erik Mouw + * Added some simple do_gettimeofday() and do_settimeofday() + * functions. Not tested due to lack of disk space. + * + * 24 Apr 2002: atp. Finally got round to doing this properly. + * We now use the CMOS clock. + */ + +/* + * Time handling on VAXen + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +u64 jiffies_64; + +spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; +extern unsigned long wall_jiffies; /* kernel/timer.c */ + +#define TICK_SIZE (tick_nsec / 1000) + +/* last time the cmos clock got updated */ +static long last_rtc_update; + +/* protos */ +static int set_rtc_mmss(unsigned long nowtime); +static irqreturn_t do_timer_interrupt(int vec_num, void *dev_id, struct pt_regs *regs); +static unsigned long do_gettimeoffset(void); +void time_init(void); +unsigned long get_cmos_time(void); + +void __init time_init(void) +{ + /* Initialise the hardware clock */ + if (mv->clock_init) { + printk (KERN_DEBUG "Calling mv->clock_init()\n"); + mv->clock_init(); + } else + printk (KERN_DEBUG "No mv->clock_init(), so not calling it...\n"); + + /* Read CMOS time */ + xtime.tv_nsec = 0; + xtime.tv_sec = get_cmos_time(); + wall_to_monotonic.tv_sec = -xtime.tv_sec; + wall_to_monotonic.tv_nsec = -xtime.tv_nsec; + + if (request_irq(0x30, do_timer_interrupt, 0, "timer", NULL)) { + printk("Panic: unable to register timer interrupt handler\n"); + HALT; + } + + /* + * Some VAX CPUs are hardwired to trigger interrupts at 100Hz, + * so we need to pay attention to HZ always being 100 for + * compatibility reasons. For all other machines, we need to + * supply a value (initial counter--an interrupt is triggered upon + * overflow while this value is incremented at a 1�s interval) + * to get more than one interrupt per hour:-) + */ + if (mv->nicr_required) + __mtpr(0xffffffff - 1000000/HZ, PR_NICR); + + /* Set the clock ticking and enable clock interrupts */ + __mtpr(ICCS_ERROR | ICCS_INTERRUPT | /* clear error and interrupt bits */ + ICCS_TRANSFER | /* Load ICR from NICR */ + ICCS_INTENABLE | /* enable interrupts... */ + ICCS_RUN, PR_ICCS); /* ...and go */ +} + + +/* + * In order to set the CMOS clock precisely, set_rtc_mmss has to be + * called 500 ms after the second nowtime has started, because when + * nowtime is written into the registers of the CMOS clock, it will + * jump to the next second precisely 500 ms later. Check the Motorola + * MC146818A or Dallas DS12887 data sheet for details. + * + * BUG: This routine does not handle hour overflow properly; it just + * sets the minutes. Usually you'll only notice that after reboot! + */ +static int set_rtc_mmss(unsigned long nowtime) +{ + int retval = 0; + int real_seconds, real_minutes, cmos_minutes; + unsigned char save_control, save_freq_select; + + /* gets recalled with irq locally disabled */ + spin_lock(&rtc_lock); + save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + + cmos_minutes = CMOS_READ(RTC_MINUTES); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + BCD_TO_BIN(cmos_minutes); + + /* + * since we're only adjusting minutes and seconds, + * don't interfere with hour overflow. This avoids + * messing with unknown time zones but requires your + * RTC not to be off by more than 15 minutes + */ + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) + real_minutes += 30; /* correct for half hour time zone */ + real_minutes %= 60; + + if (abs(real_minutes - cmos_minutes) < 30) { + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BIN_TO_BCD(real_seconds); + BIN_TO_BCD(real_minutes); + } + CMOS_WRITE(real_seconds,RTC_SECONDS); + CMOS_WRITE(real_minutes,RTC_MINUTES); + } else { + printk(KERN_WARNING + "set_rtc_mmss: can't update from %d to %d\n", + cmos_minutes, real_minutes); + retval = -1; + } + + /* The following flags have to be released exactly in this order, + * otherwise the DS12887 (popular MC146818A clone with integrated + * battery and quartz) will not reset the oscillator and will not + * update precisely 500 ms later. You won't find this mentioned in + * the Dallas Semiconductor data sheets, but who believes data + * sheets anyway ... -- Markus Kuhn + */ + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + spin_unlock(&rtc_lock); + + return retval; +} + + +/* This is the interrupt service routine for the timer interrupt */ +static irqreturn_t do_timer_interrupt(int vec_num, void *dev_id, struct pt_regs *regs) +{ + unsigned int iccs; + + /* + * Here we are in the timer irq handler. We just have irqs locally + * disabled but we don't know if the timer_bh is running on the other + * CPU. We need to avoid to SMP race with it. NOTE: we don' t need + * the irq version of write_lock because as just said we have irq + * locally disabled. -arca + */ + write_seqlock(&xtime_lock); + + iccs = __mfpr(PR_ICCS); + if (iccs & ICCS_ERROR) { + printk("Clock overrun\n"); + } + + do_timer(regs); + +#ifndef CONFIG_SMP + update_process_times(user_mode(regs)); +#endif + + /* + * If we have an externally synchronized Linux clock, then update + * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be + * called as close as possible to 500 ms before the new second starts. + */ + if ((time_status & STA_UNSYNC) == 0 + && xtime.tv_sec > last_rtc_update + 660 + && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 + && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { + if (set_rtc_mmss(xtime.tv_sec) == 0) + last_rtc_update = xtime.tv_sec; + else + last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */ + } + + /* + * The VARM says we should do this in the clock ISR. It isn't + * actually required on the KA650, as the ICCS register is + * not fully implemented. But I don't know about the other + * CPUs yet + */ + __mtpr(ICCS_INTERRUPT | /* Clear interrupt bit */ + ICCS_ERROR | /* Clear error bit */ + ICCS_TRANSFER | /* Reload ICR from NICR */ + ICCS_RUN, /* ... and go again */ + PR_ICCS); + + write_sequnlock(&xtime_lock); + + return IRQ_HANDLED; +} + +/* + * Function to compensate the time offset caused by calling this + * function (I think so, yes). This function definatively needs a real + * implementation, but it works for now. -- Erik + */ +static unsigned long do_gettimeoffset(void) +{ + /* FIXME: do something useful over here */ + return 0; +} + +/* + * do_gettimeofday() and do_settimeofday() + * + * Looking at the ARM and i386 implementations, it is very well + * possible that these functions are not correct, but without hardware + * documentation I can't think of a way to make the proper + * corrections -- Erik. + */ +void do_gettimeofday(struct timeval *tv) +{ + unsigned long flags; + unsigned long seq; + unsigned long usec, sec; + + do { + seq = read_seqbegin_irqsave(&xtime_lock, flags); + + usec = do_gettimeoffset(); + { + unsigned long lost = jiffies - wall_jiffies; + if (lost) + usec += lost * (1000000 / HZ); + } + sec = xtime.tv_sec; + usec += (xtime.tv_nsec / 1000); + } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + + while (usec >= 1000000) { + usec -= 1000000; + sec++; + } + + tv->tv_sec = sec; + tv->tv_usec = usec; +} + +int do_settimeofday(struct timespec *tv) +{ + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) { + return -EINVAL; + } + + write_seqlock_irq(&xtime_lock); + /* + * This is revolting. We need to set "xtime" correctly. However, the + * value in this location is the value at the most recent update of + * wall time. Discover what correction gettimeofday() would have + * made, and then undo it! + */ + tv->tv_nsec -= do_gettimeoffset() * NSEC_PER_USEC; + tv->tv_nsec -= (jiffies - wall_jiffies) * TICK_NSEC; + + while (tv->tv_nsec < 0) { + tv->tv_nsec += NSEC_PER_SEC; + tv->tv_sec--; + } + + xtime.tv_sec = tv->tv_sec; + xtime.tv_nsec = tv->tv_nsec; + time_adjust = 0; /* stop active adjtime() */ + time_status |= STA_UNSYNC; + time_maxerror = NTP_PHASE_LIMIT; + time_esterror = NTP_PHASE_LIMIT; + write_sequnlock_irq(&xtime_lock); + + return 0; +} + +/* nicked from the i386 port, but we use the same chip, hee hee */ +unsigned long get_cmos_time(void) +{ + unsigned int year, mon, day, hour, min, sec; + int i; + + spin_lock(&rtc_lock); + /* The Linux interpretation of the CMOS clock register contents: + * When the Update-In-Progress (UIP) flag goes from 1 to 0, the + * RTC registers show the second which has precisely just started. + * Let's hope other operating systems interpret the RTC the same way. + */ + /* read RTC exactly on falling edge of update flag */ + for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) + break; + for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ + if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) + break; + do { /* Isn't this overkill ? UIP above should guarantee consistency */ + sec = CMOS_READ(RTC_SECONDS); + min = CMOS_READ(RTC_MINUTES); + hour = CMOS_READ(RTC_HOURS); + day = CMOS_READ(RTC_DAY_OF_MONTH); + mon = CMOS_READ(RTC_MONTH); + year = CMOS_READ(RTC_YEAR); + } while (sec != CMOS_READ(RTC_SECONDS)); + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hour); + BCD_TO_BIN(day); + BCD_TO_BIN(mon); + BCD_TO_BIN(year); + } + spin_unlock(&rtc_lock); + if ((year += 1900) < 1970) + year += 100; + return mktime(year, mon, day, hour, min, sec); +} + +/* + * Scheduler clock - returns current time in nanosec units. + */ +unsigned long long sched_clock(void) +{ + return (unsigned long long)jiffies * (1000000000 / HZ); +} + diff -Nru a/arch/vax/kernel/vax_dev_init.c b/arch/vax/kernel/vax_dev_init.c --- a/arch/vax/kernel/vax_dev_init.c 1970-01-01 01:00:00 +++ b/arch/vax/kernel/vax_dev_init.c 2005-04-25 14:10:58 @@ -0,0 +1,27 @@ +/* vax_dev_init.c + * atp Feb 2001 + * + * Called from initial do_basic_setup in linux/init/main.c + * Initialise devices according to mv. + * + * Add any other vax device specific initialisation stuff here. + */ +#include /* For NULL */ +#include /* For printk */ +#include + +#include +#include +#include + +static int __init vax_dev_init(void) +{ + if (mv->init_devices) { + mv->init_devices(); + } + + return 1; +} + +subsys_initcall(vax_dev_init); + diff -Nru a/arch/vax/kernel/vmlinux.lds.S b/arch/vax/kernel/vmlinux.lds.S --- a/arch/vax/kernel/vmlinux.lds.S 1970-01-01 01:00:00 +++ b/arch/vax/kernel/vmlinux.lds.S 2004-10-02 13:42:47 @@ -0,0 +1,141 @@ +/* ld script to make bootable VAX image + * Written for i386 by Martin Mares + * Modified by Kenn Humborg + */ + +#include + +/* As of 2.5.16, the jiffy counter is 64-bit and is called jiffies_64. + For backward-compatibility, a symbol 'jiffies' needs to be aliased + to the least-significant 32-bits of jiffies_64 */ + +jiffies = jiffies_64; + +SECTIONS +{ + + /* kernel is based in S0 space at 0x80100000 */ + . = 0x80000000 + 0x100000; + + /* read-only */ + _text = .; /* Text and read-only data */ + __kernel_start = .; + .text : { + /* boot code comes first. The fact that it is linked + at 0x80100000 doesn't matter because it's all + position-independent code */ + *(.boot.text) + *(.boot.data) + + /* Kernel C code starts here */ + *(.text) + SCHED_TEXT; + *(.fixup) + *(.gnu.warning) + } = 0x9090 + .text.lock : { *(.text.lock) } /* out-of-line lock text */ + + _etext = .; /* End of text section */ + + .rodata : { *(.rodata) } + .kstrtab : { *(.kstrtab) } + + . = ALIGN(16); /* Exception table */ + __start___ex_table = .; + __ex_table : { *(__ex_table) } + __stop___ex_table = .; + + __start___ksymtab = .; /* Kernel symbol table */ + __ksymtab : { *(__ksymtab) } + __stop___ksymtab = .; + + __start___gpl_ksymtab = .; /* Kernel symbol table: GPL-only symbols */ + __gpl_ksymtab : { *(__gpl_ksymtab) } + __stop___gpl_ksymtab = .; + + __start___kallsyms = .; /* All kernel symbols */ + __kallsyms : { *(__kallsyms) } + __stop___kallsyms = .; + + + /* writeable */ + .data : { /* Data */ + *(.data) + CONSTRUCTORS + } + + _edata = .; /* End of data section */ + + . = ALIGN(8192); /* init_task */ + .data.init_task : { *(.data.init_task) } + + /* will be freed after init */ + . = ALIGN(4096); /* Init code and data */ + __init_begin = .; + .init.text : { + _sinittext = .; + *(.init.text) + _einittext = .; + } + .init.data : { *(.init.data) } + . = ALIGN(16); + __setup_start = .; + .init.setup : { *(.init.setup) } + __setup_end = .; + __start___param = .; + __param : { *(__param) } + __stop___param = .; + __initcall_start = .; + .initcall.init : { + *(.initcall1.init) + *(.initcall2.init) + *(.initcall3.init) + *(.initcall4.init) + *(.initcall5.init) + *(.initcall6.init) + *(.initcall7.init) + } + __initcall_end = .; + __init_cpumatch_start = .; + .init.cpumatch : { *(.init.cpumatch) } + __init_cpumatch_end = .; + . = ALIGN(4096); + .init.ramfs : { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + } + + . = ALIGN(8); + .con_initcall.init : { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + + . = ALIGN(4096); + __init_end = .; /* This is end of loadable image */ + + __bss_start = .; /* BSS */ + .bss : { + *(.bss) + } + _end = .; /* Boot code uses this to determine how much + code and data to relocate */ + + /* Sections to be discarded */ + /DISCARD/ : { + *(.exit.text) + *(.exit.data) + *(.exitcall.exit) + } + + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } +} diff -Nru a/arch/vax/lib/Makefile b/arch/vax/lib/Makefile --- a/arch/vax/lib/Makefile 1970-01-01 01:00:00 +++ b/arch/vax/lib/Makefile 2005-05-24 04:14:36 @@ -0,0 +1,8 @@ +# +# Makefile for the linux kernel. +# + +lib-y := string.o negdi.o checksum.o lshrdi3.o strncpy_user.o \ + copy_tofrom_user.o strnlen_user.o clear_user.o \ + udiv.o urem.o + diff -Nru a/arch/vax/lib/README b/arch/vax/lib/README --- a/arch/vax/lib/README 1970-01-01 01:00:00 +++ b/arch/vax/lib/README 2002-05-20 02:33:33 @@ -0,0 +1,11 @@ +This is pretty much all temporary. Stuff that isnt will need to be +rewritten. + +# items lifted from linux tree +# linux/lib/*.c +# - headers hacked mercilessly until they compiled. +# - asm-string.h is a stub (from linux/include/asm-alpha/string.h) +# - printk is a chunk copied from linux/arch/alpha/boot/main.c +# - stdarg.h is from gcc 2.7.2.3 +# - console.c is from the test vilo program. + diff -Nru a/arch/vax/lib/checksum.c b/arch/vax/lib/checksum.c --- a/arch/vax/lib/checksum.c 1970-01-01 01:00:00 +++ b/arch/vax/lib/checksum.c 2005-04-26 00:25:05 @@ -0,0 +1,159 @@ +/* + * Dave Airlie wrote the original IP checksum code for Linux/VAX + * in assembler (transliterating from the i386 version). + * + * In 2.5.69, the NFS client code was changed to use zero-copy + * which leads to this function being called with odd-byte-aligned + * buffers, which broke Dave's code. + * + * While fixing this, I re-wrote it in C, only using assembler for + * the carry-handling that is impossible to do in C. Some inspiration + * came from NetBSD :-) The generated looks as good as Dave's. + * - Kenn Humborg, 2003-10-01 + */ + +#include + +unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) +{ + int odd = 0; + + /* First we want to get aligned on a longword boundary, + so that our 32-bit operations later are as fast as + possible. So deal with any non-aligned bytes first */ + + /* But NetBSD doesn't try to align on 32-bits for very small + buffers (less than 16 bytes), so we won't either */ + if (len < 16) { + goto short_buffer; + } + + if (((unsigned int)buff) & 1) { + /* Starts on odd boundary - pull in first byte. + And make a note that we need to byte swap at the end */ + + int byte = *buff << 8; + + /* BTW, the funny positioning of the quotes here is + so that the assembly listing comes out aligned nicely */ + + __asm__ __volatile ( + "addl2 %2, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (byte) + ); + + odd = 1; + buff++; + len--; + } + + if (((unsigned int)buff) & 2) { + /* Still not on 32-bit boundary + And make a note that we need to byte swap at the end */ + + int word = *(unsigned short *)buff; + + __asm__ __volatile ( + "addl2 %2, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (word) + ); + + buff += 2; + len -= 2; + } + + /* Now we MUST be aligned on 32-bits */ + + while (len >= 32) { + __asm__ __volatile ( + "addl2 (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (buff) + ); + len -= 32; + } + + if (len >= 16) { + __asm__ __volatile ( + "addl2 (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (buff) + ); + len -= 16; + } + +short_buffer: + if (len >= 8) { + __asm__ __volatile ( + "addl2 (%2)+, %0 \n" + " adwc (%2)+, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (buff) + ); + len -= 8; + } + + if (len >= 4) { + __asm__ __volatile ( + "addl2 (%2)+, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (buff) + ); + len -= 4; + } + + if (len >= 2) { + int word = *(unsigned short *)buff; + __asm__ __volatile ( + "addl2 %2, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (word) + ); + buff += 2; + len -= 2; + } + + if (len > 0) { + int byte = *buff; + __asm__ __volatile ( + "addl2 %2, %0 \n" + " adwc $0, %0 " + : "=r" (sum) + : "0" (sum), "r" (byte) + ); + } + + if (odd) { + /* + * Need to byte-swap - just roll everything around + *through 8 bits. + */ + __asm__ __volatile ( + "rotl $8, %0, %0 " + : "=r" (sum) + : "0" (sum) + ); + } + + return sum; +} + diff -Nru a/arch/vax/lib/clear_user.S b/arch/vax/lib/clear_user.S --- a/arch/vax/lib/clear_user.S 1970-01-01 01:00:00 +++ b/arch/vax/lib/clear_user.S 2005-04-26 00:25:05 @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2001, Dave Airlie + * + * VAX Assembly implementation of clear_user + */ + +#include +#include + +/* unsigned long __clear_user(void *addr, unsigned long size); + * number of bytes not cleared is returned + */ + +#define EX(insn, addr, reg, handler) \ +9: insn addr, reg; \ + .section __ex_table, "a"; \ + .align 2 ; \ + .long 9b, handler; \ + .previous + + .text +ENTRY(__clear_user) + .word 0x3e + movl 4(%ap), %r1 /* r1 now has addr */ + movl 8(%ap), %r0 /* r0 has size */ + beql 2f +1: EX(movb, $0, (%r1)+, fault) + sobgtr %r0, 1b +2: ret + + .section .fixup, "ax" +fault: ret + .previous + diff -Nru a/arch/vax/lib/copy_tofrom_user.S b/arch/vax/lib/copy_tofrom_user.S --- a/arch/vax/lib/copy_tofrom_user.S 1970-01-01 01:00:00 +++ b/arch/vax/lib/copy_tofrom_user.S 2005-04-26 00:25:05 @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2001, Dave Airlie + * + * VAX Assembly implementation of copy_tofrom_user + */ + +#include +#include + +/* + * int __copy_tofrom_user(void *to, const void *from, unsigned long size); + */ +#define EX(insn, arg0, arg1, handler) \ +9: insn arg0, arg1; \ + .section __ex_table, "a"; \ + .align 2 ; \ + .long 9b, handler; \ + .previous + + .text +ENTRY(__copy_tofrom_user) + .word 0x3e + movl 4(%ap), %r2 /* to in r2 */ + movl 8(%ap), %r3 /* from in r3 */ + movl 12(%ap), %r0 /* size in r0 */ + +1: EX(movb, (%r3)+, %r4, l_fixup) + EX(movb, %r4, (%r2)+, s_fixup) + sobgtr %r0, 1b + ret + .section .fixup,"ax" + .align 4 +l_fixup: +s_fixup: ret + .previous diff -Nru a/arch/vax/lib/lshrdi3.c b/arch/vax/lib/lshrdi3.c --- a/arch/vax/lib/lshrdi3.c 1970-01-01 01:00:00 +++ b/arch/vax/lib/lshrdi3.c 2003-05-11 21:51:40 @@ -0,0 +1,63 @@ +/* lshrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */ +/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#define BITS_PER_UNIT 8 + +typedef int SItype __attribute__ ((mode (SI))); +typedef unsigned int USItype __attribute__ ((mode (SI))); +typedef int DItype __attribute__ ((mode (DI))); +typedef int word_type __attribute__ ((mode (__word__))); + +struct DIstruct {SItype low, high;}; + +typedef union +{ + struct DIstruct s; + DItype ll; +} DIunion; + + +DItype +__lshrdi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) + { + w.s.high = 0; + w.s.low = (USItype)uu.s.high >> -bm; + } + else + { + USItype carries = (USItype)uu.s.high << bm; + w.s.high = (USItype)uu.s.high >> b; + w.s.low = ((USItype)uu.s.low >> b) | carries; + } + + return w.ll; +} diff -Nru a/arch/vax/lib/negdi.c b/arch/vax/lib/negdi.c --- a/arch/vax/lib/negdi.c 1970-01-01 01:00:00 +++ b/arch/vax/lib/negdi.c 2005-04-26 00:25:05 @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2001, Kenn Humborg + * + * This is a temporary implementation of GCCs negdi2 primitive. + * Once we get native support in the compiler, this will be + * removed from here + * + */ + +long long __negdi2(long long x) +{ + __asm__ volatile ( + " xorl2 $-1, 4(%0) \n" /* complement high longword */ + " mnegl (%0), (%0) \n" /* negate low longword */ + " bneq 1f \n" /* no overflow */ + " incl 4(%0) \n" /* inc high longword */ + "1: " + : : "r"(&x) : "r0"); + + return x; +} + diff -Nru a/arch/vax/lib/string.c b/arch/vax/lib/string.c --- a/arch/vax/lib/string.c 1970-01-01 01:00:00 +++ b/arch/vax/lib/string.c 2005-04-26 00:25:05 @@ -0,0 +1,33 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* + * Small optimized versions should generally be found as inline code + * in . However, if size matters (inlined way too + * often) or if speed doesn't matter (overhead of a function call), + * just drop them here. + */ +#include + +void *memset(void *s, int c , __kernel_size_t count) +{ + asm ( + " movl %2, %%r6 \n" /* R6 holds bytes left */ + " movl %0, %%r3 \n" /* dest in R3 */ + " movl $0xffff, %%r7 \n" /* R7 always holds 65535 */ + "next_chunk: \n" + " cmpl %%r6, %%r7 \n" + " blequ last_chunk \n" /* < 65535 bytes left */ + " movc5 $0, 0, %1, %%r7, (%%r3) \n" /* MOVC5 updates R3 for us */ + " subl2 %%r7, %%r6 \n" + " brb next_chunk \n" + "last_chunk: \n" + " movc5 $0, 0, %1, %%r6, (%%r3) " + : /* no outputs */ + : "g"(s), "g"(c), "g"(count) + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7"); + + return s; +} + diff -Nru a/arch/vax/lib/string_user.c b/arch/vax/lib/string_user.c --- a/arch/vax/lib/string_user.c 1970-01-01 01:00:00 +++ b/arch/vax/lib/string_user.c 2005-04-26 00:25:05 @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2001, Kenn Humborg + * + * These functions are used to do string operations on user memory + */ + +#include +#include /* for panic() */ + +unsigned long __clear_user(void *addr, unsigned long size) +{ + panic("__clear_user: not implemented"); +} + diff -Nru a/arch/vax/lib/strncpy_user.S b/arch/vax/lib/strncpy_user.S --- a/arch/vax/lib/strncpy_user.S 1970-01-01 01:00:00 +++ b/arch/vax/lib/strncpy_user.S 2005-04-26 00:25:05 @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2001, Dave Airlie + * + * VAX Assembly implementation of strncpy_from_user + */ + +#include +#include + +/* int __strncpy_from_user(char *dst, const char *src, long count) + * Returns number of bytes copied + */ + +#define EX(insn, addr, reg, handler) \ +9: insn addr, reg; \ + .section __ex_table, "a"; \ + .align 2 ; \ + .long 9b, handler; \ + .previous + + .text +ENTRY(__strncpy_from_user) + .word 0x3e + movl 4(%ap), %r2 /* r2 now has dst */ + movl 8(%ap), %r3 /* r3 now has src */ + movl 12(%ap), %r0 /* r0 has count */ + movl %r0, %r1 /* keep count in r1 */ + beql 2f +1: EX(movb, (%r3)+, %r4, fault) + movb %r4, (%r2)+ + cmpb $0, %r4 + beql 2f + sobgtr %r1, 1b +2: subl2 %r1, %r0 + ret + .section .fixup, "ax" +fault: movl $-EFAULT, %r0 + ret + .previous + diff -Nru a/arch/vax/lib/strnlen_user.S b/arch/vax/lib/strnlen_user.S --- a/arch/vax/lib/strnlen_user.S 1970-01-01 01:00:00 +++ b/arch/vax/lib/strnlen_user.S 2005-04-26 00:25:05 @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2001, Dave Airlie + * + * VAX Assembly implementation of strnlen + */ + +#include +#include + +/* long __strnlen_user(const char *s, long n) + * Returns either strlen s or n + */ +#define EX(insn, arg0, arg1, handler) \ +9: insn arg0, arg1; \ + .section __ex_table, "a"; \ + .align 2 ; \ + .long 9b, handler; \ + .previous + + .text +ENTRY(__strnlen_user) + .word 0x3e + movl 4(%ap), %r0 + movl 8(%ap), %r1 + movl %r0, %r2 + +1: EX(movb, (%r0)+, %r3, fault) + cmpb $0, %r3 + beql 2f + sobgtr %r1, 1b + incl %r0 +2: subl2 %r2, %r0 + ret + + .section .fixup,"ax" +fault: movl $0, %r0 + ret + diff -Nru a/arch/vax/lib/udiv.S b/arch/vax/lib/udiv.S --- a/arch/vax/lib/udiv.S 1970-01-01 01:00:00 +++ b/arch/vax/lib/udiv.S 2005-10-11 09:46:17 @@ -0,0 +1,72 @@ +/* $NetBSD: udiv.S,v 1.3 2003/08/07 16:32:24 agc Exp $ */ + +/*- + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Donn Seeley at UUNET Technologies, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)udiv.s 5.6 (Berkeley) 4/15/91 + */ + +/* + * Modified for Linux/VAX - Kenn Humborg 2005-05-26 + */ + +#include + +/* + * Unsigned division, PCC flavor. + * udiv() takes an ordinary dividend/divisor pair; + */ + + +#define DIVIDEND 4(%ap) +#define DIVISOR 8(%ap) + +ENTRY(__udiv) +ENTRY(__udivsi3) + .word 0x0004 # save R2 + movl DIVISOR,%r2 + jlss Leasy # big divisor: settle by comparison + movl DIVIDEND,%r0 + jlss Lhard # big dividend: extended division + divl2 %r2,%r0 # small divisor and dividend: signed division + ret +Lhard: + clrl %r1 + ediv %r2,%r0,%r0,%r1 + ret +Leasy: + cmpl DIVIDEND,%r2 + jgequ Lone # if dividend is as big or bigger, return 1 + clrl %r0 # else return 0 + ret +Lone: + movl $1,%r0 + ret diff -Nru a/arch/vax/lib/urem.S b/arch/vax/lib/urem.S --- a/arch/vax/lib/urem.S 1970-01-01 01:00:00 +++ b/arch/vax/lib/urem.S 2005-10-11 09:46:17 @@ -0,0 +1,71 @@ +/* $NetBSD: urem.S,v 1.3 2003/08/07 16:32:24 agc Exp $ */ + +/*- + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Donn Seeley at UUNET Technologies, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)urem.s 5.6 (Berkeley) 4/15/91 + */ + +/* + * Modified for Linux/VAX - Kenn Humborg 2005-05-26 + */ + +#include + +/* + * Unsigned modulus, PCC flavor. + * urem() takes an ordinary dividend/divisor pair; + */ + +#define DIVIDEND 4(%ap) +#define DIVISOR 8(%ap) + +ENTRY(__urem) +ENTRY(__umodsi3) + .word 0x0004 # save R2 + movl DIVISOR,%r2 + jlss Leasy # big divisor: settle by comparison + movl DIVIDEND,%r0 + jlss Lhard # big dividend: need extended division + divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus + mull2 %r2,%r1 + subl2 %r1,%r0 + ret +Lhard: + clrl %r1 + ediv %r2,%r0,%r1,%r0 + ret +Leasy: + subl3 %r2,DIVIDEND,%r0 + jcc Ldifference # if divisor goes in once, return difference + movl DIVIDEND,%r0 # if divisor is bigger, return dividend +Ldifference: + ret diff -Nru a/arch/vax/mm/Makefile b/arch/vax/mm/Makefile --- a/arch/vax/mm/Makefile 1970-01-01 01:00:00 +++ b/arch/vax/mm/Makefile 2004-02-23 01:03:36 @@ -0,0 +1,9 @@ +# +# Makefile for the arch/vax/mm +# +# Note! Dependencies are done automagically +# DON'T put your own dependencies here +# unless it's something special (ie not a .c file). + +obj-y := init.o pgtable.o pgalloc.o ioremap.o fault.o + diff -Nru a/arch/vax/mm/fault.c b/arch/vax/mm/fault.c --- a/arch/vax/mm/fault.c 1970-01-01 01:00:00 +++ b/arch/vax/mm/fault.c 2005-04-25 17:40:32 @@ -0,0 +1,276 @@ +/* + * linux/arch/alpha/mm/fault.c + * + * Copyright (C) 1995 Linus Torvalds + * + * Copyright (C) 2001 Kenn Humborg, Andy Phillips, David Airlie + * (VAX Porting Team) + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +/* + * This routine handles page faults and access violations. It + * determines the address, and the problem, and then passes + * it off to handle_mm_fault(). + * + * reason: + * reason == 0 means kernel translation not valid fault in SPT. + * bit 0 = length violation + * bit 1 = fault during PPTE reference + * bit 2 = fault-on-read if 0, fault-on-write if 1 + * + */ + +#define REASON_LENGTH (1<<0) +#define REASON_PPTEREF (1<<1) +#define REASON_WRITE (1<<2) + +#undef VAX_MM_DEBUG +#define VAX_MM_DEBUG_USER_FAULTS + +static void +do_page_fault(struct accvio_info *info, struct pt_regs *regs) +{ + unsigned long address = info->addr; + unsigned int reason = info->reason; + struct vm_area_struct * vma; + struct task_struct *tsk = current; + struct mm_struct *mm = NULL; + const struct exception_table_entry *fixup; + +#ifdef VAX_MM_DEBUG + printk("mmfault: pid %d fault at %8x, pc %8x, psl %8x, reason %8x\n", + current->pid, info->addr, info->pc, info->psl, info->reason); + printk("mmfault:p0br %8lx, p0lr %8lx, p1br %8lx, p1lr %8lx\n", + Xmfpr(PR_P0BR), Xmfpr(PR_P0LR), Xmfpr(PR_P1BR), Xmfpr(PR_P1LR)); +#endif + /* + * This check, and the mm != NULL checks later, will be removed + * later, once we actually have a 'current' properly defined. + */ + if (tsk != NULL) + mm = tsk->mm; + + /* + * If we're in an interrupt context, or have no user context, + * we must not take the fault. + */ + if (in_interrupt() || !mm) + goto no_context; + + down_read (&mm->mmap_sem); + + vma = find_vma(mm, address); + + if (!vma) + goto bad_area; + + if (vma->vm_start <= address) + goto good_area; + + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + + if (expand_stack(vma, address)) + goto bad_area; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + + if (reason & REASON_WRITE) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) + goto bad_area; + } +survive: + switch (handle_mm_fault(mm, vma, address, reason & REASON_WRITE)) { + case VM_FAULT_MINOR: + current->min_flt++; + break; + case VM_FAULT_MAJOR: + current->maj_flt++; + break; + case VM_FAULT_SIGBUS: + goto do_sigbus; + case VM_FAULT_OOM: + goto out_of_memory; + default: + BUG(); + } + + up_read(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up_read(&mm->mmap_sem); + + if (user_mode(regs)) { +#ifdef VAX_MM_DEBUG_USER_FAULTS + printk(KERN_ALERT "Unable to do USER paging request: " + "pid %d, virtual address %08lx, " + "reason mask %08x, PC %08x, PSL %08x\n", + current->pid, address, reason, info->pc, + info->psl); + show_regs(regs); + show_cpu_regs(); + printk("\nStack dump\n"); + hex_dump((void *) (regs->fp & ~0xf), 512); + printk(KERN_ALERT "do_page_fault: sending SIGSEGV\n"); +#endif + force_sig(SIGSEGV, current); + return; + } + +no_context: + /* Are we prepared to handle this fault as an exception? */ + if ((fixup = search_exception_tables(regs->pc)) != NULL) { + regs->pc = fixup->fixup; + return; + } + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + printk(KERN_ALERT "Unable to handle kernel paging request at " + "virtual address %08lx, reason mask %08x, " + "PC %08x, PSL %08x\n", + address, reason, info->pc, info->psl); + printk("\nStack dump\n"); + hex_dump((void *) regs->sp, 256); + show_stack(current, NULL); + show_regs(regs); + show_cpu_regs(); + + machine_halt(); + + /* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + if (current->pid == 1) { + yield(); + goto survive; + } + up_read(&mm->mmap_sem); + if (user_mode(regs)) { + printk("VM: killing process %s\n", current->comm); + do_exit(SIGKILL); + } + goto no_context; + +do_sigbus: + up_read(&mm->mmap_sem); + + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + force_sig(SIGBUS, current); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; +} + +/* + * This is the access violation handler. + */ +void accvio_handler(struct pt_regs *regs, void *excep_info) +{ + struct accvio_info *info = (struct accvio_info *) excep_info; + static int active; + + /* + * This active flag is just a temporary hack to help catch + * accvios in the page fault handler. It will have to + * go eventually as it's not SMP safe. + */ + if (!active) { + active = 1; + do_page_fault(info, regs); + active = 0; + } else { + printk("\nNested access violation: reason mask %02x, " + "addr %08x, PC %08x, PSL %08x\n", + info->reason, info->addr, info->pc, info->psl); + + printk("\nStack dump\n"); + hex_dump((void *) regs->sp, 256); + show_stack(current, NULL); + show_regs(regs); + show_cpu_regs(); + + machine_halt(); + } +} + +/* + * This is the page fault handler. + */ +void page_fault_handler(struct pt_regs *regs, void *excep_info) +{ + struct accvio_info *info = (struct accvio_info *)excep_info; + static int active; + + /* + * This active flag is just a temporary hack to help catch + * accvios in the page fault handler. It will have to + * go eventually as it's not SMP safe. + */ + if (!active) { + /* FIXME: Why is this commented out? */ + /* active = 1;*/ + do_page_fault(info, regs); +#ifdef VAX_MM_DEBUG + printk("finished fault\n"); +#endif + active = 0; + } else { + printk("\nNested page fault: reason mask %02x, " + "addr %08x, PC %08x, PSL %08x\n", + info->reason, info->addr, info->pc, info->psl); + + printk("\nStack dump\n"); + hex_dump((void *) regs->sp, 256); + show_stack(current, NULL); + show_regs(regs); + show_cpu_regs(); + + machine_halt(); + } +} + diff -Nru a/arch/vax/mm/init.c b/arch/vax/mm/init.c --- a/arch/vax/mm/init.c 1970-01-01 01:00:00 +++ b/arch/vax/mm/init.c 2005-05-09 22:37:20 @@ -0,0 +1,202 @@ +/* + * Initialise the VM system. + * Copyright atp Nov 1998 + * GNU GPL + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define VAX_INIT_DEBUG + +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] + __attribute__ ((__aligned__(PAGE_SIZE))); + +pte_t *pg0; + +struct pgd_cache pgd_free_list; + +/* + * We don't use the TLB shootdown stuff yet, but we need this to keep + * the generic TLB shootdown code happy. + */ +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +/* + * This is task 0's PGD structure. Entries 4 and 5 will be filled with + * the system page table base and size by head.S. The remaining + * entries (0 to 3) will be left at zero as there is no valid user + * context in task 0. + */ +pgd_t swapper_pg_dir[PTRS_PER_PGD]; +pmd_t swapper_pm_dir[2048] __attribute__ ((__aligned__(8192))); /* two pages for the kernel S0 pmd */ + +/* + * In other architectures, paging_init sets up the kernel's page tables. + * In Linux/VAX, this is already done by the early boot code. For the + * physical RAM. In this routine we initialise the remaining areas of + * the memory, and system page table. + */ +void __init paging_init(void) +{ + hwpte_t *pte, *lastpte; + unsigned int ii; + + /* Sort out page table. */ + pg0 = (pte_t *) SPT_BASE; + + /* Set up pmd */ + swapper_pg_dir[2].pmd = swapper_pm_dir; + + /* FIXME: This is where the VMALLOC stuff from head.S should go */ + printk("VAXMM: Initialising mm layer for %d tasks of size %dMB\n", + TASK_MAXUPRC, TASK_WSMAX >> 20); + + /* + * Size the process page table slots. See asm/mm/task.h for details + * The _START and _END macros are from pgtable.h + * This is all in PAGELETS and HWPTES, hence no set_pte + */ + pte = (hwpte_t *) GET_SPTE_VIRT(VMALLOC_END); + lastpte = (hwpte_t *) GET_SPTE_VIRT(TASKPTE_START); + ii = 0; + + /* Clear this area */ + while (pte < lastpte) { + *pte++ = __hwpte(0x00000000); + ii++; + } + /* This is stored in hwptes */ + SPT_LEN += ii; + + pte = (hwpte_t *) GET_SPTE_VIRT(TASKPTE_START); + lastpte = pte + SPT_HWPTES_TASKPTE; + /* Clear this area */ + while (pte < lastpte) + *pte++ = __hwpte(0x00000000); + + /* This is stored in hwptes */ + SPT_LEN += SPT_HWPTES_TASKPTE; + __mtpr(SPT_LEN, PR_SLR); + flush_tlb(); + + printk("VAXMM: system page table base %8lx, length (bytes) %8lx length (PTEs) %8lx\n", + SPT_BASE, SPT_SIZE, SPT_LEN); +} + +#if DEBUG_POISON +static void kill_page(unsigned long pg) +{ + unsigned long *p = (unsigned long *) pg; + unsigned long i = PAGE_SIZE, v = 0xdeadbeefdeadbeef; + + do { + p[0] = v; + p[1] = v; + p[2] = v; + p[3] = v; + p[4] = v; + p[5] = v; + p[6] = v; + p[7] = v; + i -= 64; + p += 8; + } while (i != 0); +} +#else +#define kill_page(pg) +#endif + +void mem_init(void) +{ + max_mapnr = num_physpages = max_low_pfn; + + /* Clear the zero-page */ + memset(empty_zero_page, 0, PAGE_SIZE); + + /* This will put all low memory onto the freelists */ + totalram_pages += free_all_bootmem(); + high_memory = (void *) __va((max_low_pfn) * PAGE_SIZE); + + printk("Memory: %luk/%luk available\n", + (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), + max_mapnr << (PAGE_SHIFT - 10)); + + return; +} + +static void free_reserved_mem(void *start, void *end) +{ + void *__start = start; + + for (; __start < end; __start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(__start)); + set_page_count(virt_to_page(__start), 1); + free_page((long) __start); + totalram_pages++; + } +} + +void free_initmem(void) +{ + extern char __init_begin, __init_end; + + free_reserved_mem(&__init_begin, &__init_end); + + printk("Freeing unused kernel memory: %Zdk freed\n", + (&__init_end - &__init_begin) >> 10); +} + +void +show_mem(void) +{ + long i, free = 0, total = 0, reserved = 0; + long shared = 0, cached = 0; + + printk("\nMem-info:\n"); + show_free_areas(); + printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); + i = max_mapnr; + while (i-- > 0) { + total++; + if (PageReserved(mem_map + i)) + reserved++; + else if (PageSwapCache(mem_map + i)) + cached++; + else if (!page_count(mem_map + i)) + free++; + else + shared += page_count(mem_map + i) - 1; + } + printk("%ld pages of RAM\n", total); + printk("%ld free pages\n", free); + printk("%ld reserved pages\n", reserved); + printk("%ld pages shared\n", shared); + printk("%ld pages swap cached\n", cached); + printk("%ld pages in PGD cache\n", pgd_free_list.size); +} + + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (start < end) + printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + set_page_count(virt_to_page(start), 1); + free_page(start); + totalram_pages++; + } +} +#endif + diff -Nru a/arch/vax/mm/ioremap.c b/arch/vax/mm/ioremap.c --- a/arch/vax/mm/ioremap.c 1970-01-01 01:00:00 +++ b/arch/vax/mm/ioremap.c 2005-05-09 22:37:20 @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2000 Kenn Humborg + * + * This file contains the functions for mapping specific physical + * addresses into virtual memory, normally used to memory map + * device registers in IO space. + */ + +/* The i386 code maps a physical range by allocating a new vm area + (which seems to have a full 3-level page table structure) and + then fixing the PTEs to point to the specified physical region. + + We can't do this right now in VAXland because we haven't got the + mm layer implemented far enough yet. In fact, I don't know if + we will ever be able to do it that way because we don't have + sparse enough page tables. I think that we'll have to statically + allocate a system page table that is big enough to map all physical + RAM plus some "spare" page table entries for IO mapping. + + Dynamically expanding the system page table _may_ be possible, + but would require enough contiguous physical memory to hold the + complete, larger table while we copy the current PTEs. I + suspect that it might not work in the general case, because I + have a feeling that we won't be able to notify everything that + needs to know when the SPT base addr changes. (Scatter/gather + hardware might be one example.) + + So, here's what we do right now: + + 1. When creating the initial system page table, we allocate + a certain number of spare PTEs at the end of the table to + be used for mapping IO space. + + 2. Each of these PTEs is be marked INVALID. A PTE in this + range which is INVALID is available for use for IO space + mapping, one which is VALID is already in use. + + 3. When a driver wants to map a range of IO space, we work + out how many PTEs we need and try to find a contigous chunk + of free (i.e. INVALID) PTEs. We make these PTEs valid and + point them to the specified physical area. + +*/ + +#include +#include + +/* Defined in head.S */ +extern pte_t *iomap_base; + +/* This array will store the sizes of areas re-mapped by ioremap(). + We need this because iounmap() doesn't take a size arg. + We store the size as a PTE count. */ + +static unsigned int iomap_sizes[SPT_PTES_IOMAP]; + +void *ioremap(unsigned long phys_addr, unsigned long size) +{ + unsigned long phys_start; + unsigned int offset; + unsigned long phys_end; + unsigned int num_ptes; + void *virt_start; + unsigned int i; + pte_t *start_pte; + pte_t *p; + unsigned long pfn; + + /* Page align the physical addresses */ + phys_start = PAGE_ALIGN_PREV(phys_addr); + offset = phys_addr - phys_start; + + phys_end = PAGE_ALIGN(phys_addr + size); + + num_ptes = (phys_end - phys_start) >> PAGE_SHIFT; + + start_pte = NULL; + p = iomap_base; + while (p < iomap_base+SPT_PTES_IOMAP) { + + if (pte_val(*p) & _PAGE_VALID) { + /* PTE in use, start over */ + start_pte = NULL; + } else { + /* PTE is available */ + if (start_pte == NULL) { + start_pte = p; + } + } + + p++; + + /* Have we found enough PTEs? */ + if (start_pte != NULL) { + if ((p - start_pte) == num_ptes) { + break; + } + } + } + + if ((p - start_pte) != num_ptes) { + /* Unable to find contiguous chunk of IOMAP PTEs */ + printk("ioremap: cannot find 0x%04x available PTEs\n", num_ptes); + return NULL; + } + + /* Stash the size of this IO space mapping */ + iomap_sizes[start_pte - iomap_base] = num_ptes; + + virt_start = SPTE_TO_VIRT(start_pte); + + for (i = 0; i < num_ptes; i++) { + pfn = (phys_start >> PAGE_SHIFT) + i; + set_pte(start_pte + i, pfn_pte(pfn, __pgprot(_PAGE_VALID | _PAGE_KW)) ); + + /* fixme: tlb flushes for other pagelets */ + __flush_tlb_one(virt_start + (i<= (iomap_base + SPT_PTES_IOMAP))) { + printk("iounmap: virtual addr 0x%08lx not in IOMAP region\n", + (unsigned long) addr); + return; + } + + num_ptes = iomap_sizes[p - iomap_base]; + + if (num_ptes == 0) { + printk("iounmap: virtual addr 0x%08lx not currently IO mapped\n", + (unsigned long) addr); + return; + } + + iomap_sizes[p - iomap_base] = 0; + + printk("IO unmapping 0x%04x pages at PTE index 0x%04Zx\n", + num_ptes, p - iomap_base); + + while (num_ptes--) { + pte_val(*p) = 0; + p++; + + __flush_tlb_one(addr); + addr += PAGELET_SIZE; + } + +} + diff -Nru a/arch/vax/mm/pgalloc.c b/arch/vax/mm/pgalloc.c --- a/arch/vax/mm/pgalloc.c 1970-01-01 01:00:00 +++ b/arch/vax/mm/pgalloc.c 2005-04-26 00:25:05 @@ -0,0 +1,404 @@ +/* + * pgalloc.c Routines from include/asm-vax/mm/pgalloc.h + * Allocation of page table entries and so forth. + * + * This is the main part of the VAX specific memory layer. + * + * Copyright atp Jun 2001 - complete rewrite. + * atp aug 2001 - add in stuff for vmalloc to work (pmd_alloc_kernel) + * fix mistake in pte_alloc_kernel. + * atp 21 aug 01 - make TASK_WSMAX what was intended, add in segv stuff. + * + * License: GNU GPL + */ + +#include +#include +#include +#include +#include +#include + +extern void vaxpanic(char *reason); + +#undef VAX_MM_PGALLOC_DEBUG + +/* + * Allocate a pgd. We don't - at present - need to worry about + * maintaining a bitmap as we put pgds that we are finished with + * on our quicklists pool. + */ +static inline pgd_t *get_pgd_fast(void) +{ + pgd_t *pgd; + + if ((pgd = pgd_free_list.head) != NULL) { + pgd_free_list.head = pgd->next; + pgd->next = NULL; + pgd_free_list.size--; + } + return pgd; +} + +/* Allocate a pgd */ +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + /* + * This is rather wasteful, as only a few longwords are + * used in the entire 4kb page. Perhaps we can do something + * smarter here by using the quicklists to pack the pgds into + * a single page. + */ + pgd_t *pgd; + unsigned long taskslot; + + /* Grab a pgd off the cache */ + pgd = get_pgd_fast(); + + if (!pgd) { + /* Check if we have run out of balance slots */ + if (pgd_free_list.slots_used >= TASK_MAXUPRC) + return NULL; + + pgd = kmalloc(sizeof(pgd_t) * PTRS_PER_PGD, GFP_KERNEL); + if (!pgd) + return NULL; + + memset(pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); + + taskslot = GET_TASKSLOT(pgd_free_list.slots_used); + /* one more slot used */ + pgd_free_list.slots_used++; + + pgd[0].pmd = 0; /* These are blank */ + pgd[1].pmd = 0; + } else { + /* pgd_clear keeps this */ + taskslot = pgd->slot; + } + + if (pgd) { + + /* Set the values of the base + length registers */ + pgd[0].br = taskslot + P0PTE_OFFSET; /* skip the PMD */ + pgd[0].lr = 0x0; + /* This comes in handy later */ + pgd[0].slot = taskslot; + /* p1br points at what would be page mapping 0x40000000 (i.e. the _end_ of the slot)*/ + pgd[1].br = taskslot+ (P1PTE_OFFSET) - 0x800000 ; + /* This is the unmapped number of PTEs */ + pgd[1].lr = 0x40000; + pgd[1].slot = taskslot; + + pgd[0].segment = 0; + pgd[1].segment = 1; + +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM:pgd_alloc: p0: %8lX, %8lX, p1: %8lX, %8lx, slot %ld, taskslot %8lx\n", pgd[0].br, pgd[0].lr, pgd[1].br, pgd[1].lr, pgd_free_list.slots_used-1, pgd[0].slot); +#endif + /* Set the s0 region, from the master copy in swapper_pg_dir */ + memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return pgd; +} + +void pgd_clear(pgd_t * pgdp) +{ + /* Wipe a pgd structure carefully -- this is probably overkill */ + pgdp->pmd = 0; + + if (pgdp->segment) { + /* p1br points at what would be page mapping 0x40000000 */ + pgdp->br = pgdp->slot+ (P1PTE_OFFSET) - 0x800000; + /* This is the unmapped number of PTEs */ + pgdp->lr = 0x40000; + } else { + pgdp->br = pgdp->slot+ (P0PTE_OFFSET); /* skip the PMD */ + pgdp->lr = 0x0; + } +} + +/* + * Remap a given page to be part of a contiguous page table for p0/1 space. + * + * This is like remap_pte_range in memory.c but VAX specific. It's called + * when we're creating part of a process page table. A new, blank page + * has just been allocated and we want to use this page to back part of + * the process page table. This will result in this new page being + * double-mapped. One mapping will be its 'identity' mapping where + * VIRT = (PHYS + PAGE_OFFSET). The other mapping will be in the middle + * of the process page table. + * + * s0addr is the address in S0 space that we need to remap the page + * pointed at by pte_page to. + * + * This is also called to remap the two pages in our page middle directory. + */ +static void remap_pgtable_page(void *s0addr, struct page *page) +{ + pte_t *s0pte; + + /* sanity checks */ + if (!s0addr) { + vaxpanic("VAXMM: null S0 address in remap_pgtable_page!\n"); + return; + } + if (!page) { + vaxpanic("VAXMM: null pte_page in remap_pgtable_page!\n"); + return; + } + + /* Locate the S0 pte that describes the page pointed to by s0addr */ + s0pte = GET_SPTE_VIRT(s0addr); + +#ifdef VAX_MM_PGALLOC_DEBUG + /* Is it already pointing somewhere? */ + if (pte_present(*s0pte)) + printk(KERN_DEBUG "VAXMM: S0 pte %8p already valid in " + "remap_pgtable_page??\n", s0pte); + printk(KERN_DEBUG "VAXMM: mapping PTE page %p at %p\n", page, s0addr); +#endif + /* zap the map */ + set_pte(s0pte,mk_pte(page, __pgprot(_PAGE_VALID|_PAGE_KW))); + + flush_tlb_all(); +} + +/* + * Invalidate the S0 pte that was remapped to point at this page in the + * process page table or the page middle directory. + */ +static void unmap_pgtable_page(void *page) +{ + pte_t *s0pte; + + /* Sanity checks */ + if (!page) { + vaxpanic(KERN_ERR "VAXMM: null S0 address in unmap_pgtable_page!\n"); + return; + } + + /* Locate the S0 pte that describes the page pointed to by pte_page */ + s0pte = GET_SPTE_VIRT(page); +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "unmap_pgtable_page: s0addr %p, s0pte %p\n", page, s0pte); +#endif + + set_pte(s0pte, pte_mkinvalid(*s0pte)); + /* FIXME: these flush_tlb_alls need replacing with flush_tlb_8 */ + flush_tlb_all(); + // __flush_tlb_one(s0addr); +} + +/* + * We used to call this routine pmd_alloc. At v2.4.3 pmd_alloc got removed + * from include/linux/mm.h, and we have now pgd_populate and pmd_populate. + * This is pgd_populate + */ +void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +{ + /* + * We have a two page block of memory, allocated via pmd_alloc by + * pmd_alloc_one. This needs to be remapped into the appropriate pmd + * section in the taskslot in S0 space. + * recap: The taskslot holds all the ptes in a contiguous section + * of S0 address space. The amounts of virtual address + * space are mapped out at boot time, from the constants + * in asm-vax/mm/task.h. The first four pages of this + * region are "pmd" pages, used as the bookkeeping + * information, which is normally done by the pgd page on + * 32bit processors. But we have hijacked the pgds to + * represent the four VAX memory segments, and to hold all + * the base/length register information and other related + * stuff. + * Updated atp Mar 2002. pgd_populate, remove PGD_SPECIAL botch. + */ + unsigned int is_p1 = pgd->segment; + pmd_t *s0addr; + +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: Calling pgd_populate with (mm=%8p, pgd=%8p, " + "pmd=%8p\n",mm,pgd,pgd->pmd); +#endif + /* Sanity check */ + if (pgd->pmd) { +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: Calling pmd_alloc on already " + "allocated page (pgd=%8p,pmd=%8p)\n",pgd,pgd->pmd); +#endif + return; + } + + /* Calculate which bit of the page table area this page fits into. */ + s0addr = (pmd_t *)pgd->slot; /* base of the slot */ + s0addr += is_p1? (P1PMD_OFFSET/sizeof(pmd_t)): (P0PMD_OFFSET/sizeof(pmd_t)); + + /* Remap and clear the first page */ + clear_page(pmd); + remap_pgtable_page(s0addr, virt_to_page(pmd)); + + /* This is the pointer to our pmd table. */ + pgd->pmd=s0addr; + + /* This is a two page block of memory */ + s0addr += (PAGE_SIZE/sizeof(pmd_t)); + pmd += (PAGE_SIZE/sizeof(pmd_t)); + + clear_page(pmd); + remap_pgtable_page(s0addr, virt_to_page(pmd)); + +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_alloc: pgd %8p, pgd->br %8lx, pgd->lr " + "%8lx, \n\tpgd->pmd %8p\n", + pgd,pgd->br, pgd->lr, pgd->pmd); +#endif + return; +} + +/* + * pmd_populate is called when the MM core wants to make a page in + * a process page table valid. The core has already allocated a + * page for this, and it now wants for us to use this page to + * hold PTEs for the range corresponding to the PMD entry pointed + * to by the pmd parameter. + * + * It's made a bit trickier by the fact that we need to work out if + * it's a P0 or P1 page table being populated. And then we also + * need to watch for this new page of PTEs being beyond the current + * P0LR or P1LR and extending P0/1LR as necessary. + * + * We used to check against WSMAX and STKMAX here, but we now do this + * check in pte_alloc_one(), where it's easier to check (since pte_alloc_one() + * is handed the user address). + * + * We make use of the knowledge that the pmd is a single block, to work back + * to the pgd, which is where the base and length register values are held. + * + * FIXMES: page table locking. + */ +/* This function could be simpler if we used system page table + entries as PMD entries. */ +void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte_page) +{ + pmd_t *pmd_base; + unsigned long pmd_index; + unsigned int pspace; + pte_t *pte_addr; + unsigned long page_index; + pgd_t *pgd_entry; + + /* Find the start of the page middle directory containing this PMD entry */ + pmd_base = (pmd_t *) ((unsigned long)pmd & PTE_TASK_MASK); /* base of the pmd */ + + /* The process page table page that we want to remap is at offset + pmd_index into the relevant page middle directory */ + pmd_index = pmd - pmd_base; + + /* But, is it a P0 or a P1 PMD? Assume P0 until proven otherwise */ + pspace = 0; + + if (pmd_base == mm->pgd[0].pmd) + pspace = 0; + else if (pmd_base == mm->pgd[1].pmd) + pspace = 1; + else + BUG(); + + pgd_entry = mm->pgd + pspace; + + /* Now we can work out the system virtual address of the relevant + page in the process page table */ + pte_addr = (pte_t *)(pgd_entry->br + (pmd_index << PAGE_SHIFT)); + +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_populate: mm %p br %08lx lr %04lx " + "pmd %p page %p pte_addr %p reg %d index %04lx\n", + mm, pgd_entry->br, pgd_entry->lr, pmd, pte_page, + pte_addr, pspace, pmd_index); +#endif + /* Double-map the newly-allocated page to this S0 address */ + remap_pgtable_page(pte_addr, pte_page); + + /* And point the PMD entry to this new mapping */ + pmd->pte_page = pte_addr; + + /* + * Now adjust the P0LR or P1LR if we we've mapped a new + * page at the end of the region + */ + + /* Calculate how far into the region the newly-added page lives */ + if (pspace == 0) { + /* + * For P0 space, we want to consider the top end of the new + * page of PTEs + */ + page_index = (pte_addr + PTRS_PER_PTE) - (pte_t *)pgd_entry->br; + + if (pgd_entry->lr < page_index) + pgd_entry->lr = page_index; + } else { + /* + * For P1 space, we want to consider the bottom end of the new + * page of PTEs + */ + page_index = pte_addr - (pte_t *)pgd_entry->br; + + if (pgd_entry->lr > page_index) + pgd_entry->lr = page_index; + } + +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_populate: new lr %04lx\n", pgd_entry->lr); +#endif + + /* + * If all this work is for the current process, then we need to + * update the hardware registers + */ + if (pspace == 0) { + if (current->thread.pcb.p0br == pgd_entry->br) { +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_populate: updating hardware regs\n"); +#endif + current->thread.pcb.p0lr = pgd_entry->lr * 8; + set_vaxmm_regs_p0(pgd_entry); + } + + } else { + if (current->thread.pcb.p1br == pgd_entry->br) { +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM: pmd_populate: updating hardware regs\n"); +#endif + current->thread.pcb.p1lr = pgd_entry->lr * 8; + set_vaxmm_regs_p1(pgd_entry); + } + } +} + + +/* + * The pmd argument points to a single PMD entry (which corresponds to + * a single page in a process page table). We should invalidate the + * mapping of this page in the process page table and then clear out + * the PMD entry itself + */ +void pmd_clear(pmd_t *pmd) +{ + unmap_pgtable_page(pmd->pte_page); + pmd->pte_page = NULL; +} + +/* Find an entry in the third-level page table. */ +pte_t * pte_offset(pmd_t *pmd, unsigned long address) +{ + pte_t *pte; + pte = pmd->pte_page + ((address>>PAGE_SHIFT) & (PTRS_PER_PTE-1)); +#ifdef VAX_MM_PGALLOC_DEBUG + printk(KERN_DEBUG "VAXMM:pte_offset: pmd %p, address %8lx, " + "pte_pte %p\n", pmd, address, pte); +#endif + return pte; +} + diff -Nru a/arch/vax/mm/pgtable.c b/arch/vax/mm/pgtable.c --- a/arch/vax/mm/pgtable.c 1970-01-01 01:00:00 +++ b/arch/vax/mm/pgtable.c 2005-04-26 00:25:05 @@ -0,0 +1,38 @@ +/* + * Handle bits of VAX memory management + * atp 2000 + */ + +#include +#include +#include +#include + +/* Note the factor of 8 in the length registers */ +void set_page_dir(struct task_struct * tsk, pgd_t * pgdir) +{ + /* P0BR and P1BR are virtual addresses */ + tsk->thread.pcb.p0br = (pgdir[0]).br; + tsk->thread.pcb.p0lr = (pgdir[0]).lr * 8; + tsk->thread.pcb.p1br = (pgdir[1]).br; + tsk->thread.pcb.p1lr = (pgdir[1]).lr * 8; + + /* + * Now if this is the currently running task, update the registers. + * This doesn't sound like a great idea... perhaps setipl(31) would + * be a good idea here... + */ + if (tsk == current) { + set_vaxmm_regs(pgdir); + flush_tlb_all(); + } +} + +/* Note no factor of 8 in the length registers */ +void set_page_dir_kernel(pgd_t * pgdir) +{ + __mtpr((pgdir[2]).br, PR_SBR); + __mtpr((pgdir[2]).lr, PR_SLR); + flush_tlb_all(); +} + diff -Nru a/arch/vax/tools/Makefile b/arch/vax/tools/Makefile --- a/arch/vax/tools/Makefile 1970-01-01 01:00:00 +++ b/arch/vax/tools/Makefile 2004-11-18 10:30:00 @@ -0,0 +1,11 @@ + + +hostprogs-y := mkbootblk setcmdline +extra-y := mkbootblk setcmdline showcmdline + +quiet_cmd_showcmdline = LN $@ -> $< +cmd_showcmdline = ln -f $< $@ + +$(obj)/showcmdline: $(obj)/setcmdline + $(call cmd,showcmdline) + diff -Nru a/arch/vax/tools/mkbootblk.c b/arch/vax/tools/mkbootblk.c --- a/arch/vax/tools/mkbootblk.c 1970-01-01 01:00:00 +++ b/arch/vax/tools/mkbootblk.c 2005-04-25 17:29:43 @@ -0,0 +1,74 @@ +#include +#include +#include +#include +#include +#include +#include + +struct vax_boot_imgdesc { + unsigned short arch_type; /* 0x18 for VAX */ + unsigned char check1; + unsigned char check2; /* = 0xff ^ (0x18+check1) */ + unsigned long ignored; + unsigned long blk_count; + unsigned long load_offset; + unsigned long start_offset; + unsigned long checksum; /* blk_count + load_offset + start_offset */ +}; + +struct vax_bootblock_header { + unsigned short ignored1; + unsigned char imgdesc_offset; /* offset (in words) to imgdesc */ + unsigned char must_be_1; + unsigned short lbn_hi; + unsigned short lbn_lo; + struct vax_boot_imgdesc imgdesc; +}; + + +int main(int argc, char **argv) +{ + union bootblock { + struct vax_bootblock_header hdr; + unsigned char data[512]; + } block; + + struct stat kernelstat; + int retval; + + if (argc != 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + retval = stat(argv[1], &kernelstat); + if (retval != 0) { + fprintf(stderr, "Cannot stat %s: %s\n", argv[1], strerror(errno)); + return 1; + } + + memset(&block, 0, sizeof(block)); + + block.hdr.imgdesc_offset = offsetof(struct vax_bootblock_header, imgdesc) / 2; + block.hdr.must_be_1 = 1; + block.hdr.lbn_hi = 0; + block.hdr.lbn_lo = 1; + + block.hdr.imgdesc.arch_type = 0x18; + block.hdr.imgdesc.check1 = 0x02; + block.hdr.imgdesc.check2 = 0xff ^ (0x18 + 0x02); + + /* Round up kernel size to multiple of sector size */ + block.hdr.imgdesc.blk_count = (kernelstat.st_size + 511) / 512; + block.hdr.imgdesc.load_offset = 0; + block.hdr.imgdesc.start_offset = 0; + block.hdr.imgdesc.checksum = block.hdr.imgdesc.blk_count + + block.hdr.imgdesc.load_offset + + block.hdr.imgdesc.start_offset; + + fwrite(&block, sizeof(block), 1, stdout); + + return 0; +} + diff -Nru a/arch/vax/tools/setcmdline.c b/arch/vax/tools/setcmdline.c --- a/arch/vax/tools/setcmdline.c 1970-01-01 01:00:00 +++ b/arch/vax/tools/setcmdline.c 2005-04-25 17:27:49 @@ -0,0 +1,73 @@ +/* write a command line into a header block */ +/* atp Sept 2001 */ +#include +#include +#include +#include +#include +#include +#include +#define COMMAND_LINE_SIZE 256 +#define COMMAND_LINE_OFFSET 0x204 + +static int called_as(char *str1, char *str2) { + char *tmp; + + tmp = strrchr(str1,'/'); + if (!tmp) + tmp = str1; + else + tmp++; + + return !strncmp(tmp, str2, strlen(str2)); +} + +int main (int argc, char *argv[]) { + int kern_fd; + char buffer[COMMAND_LINE_SIZE]; + + if (called_as(argv[0], "showcmdline")) { + if (argc < 2) { + printf ("usage: showcmdline kernel_image\n"); + exit(EXIT_FAILURE); + } + } else { + if (argc < 3) { + printf ("usage: setcmdline kernel_image \"command line\"\n"); + exit(EXIT_FAILURE); + } + } + + kern_fd = open(argv[1], O_RDWR); + if (kern_fd < 0) { + perror(argv[1]); + exit(EXIT_FAILURE); + } + + memset(buffer, 0, COMMAND_LINE_SIZE); + + if (called_as(argv[0], "setcmdline")) { + /* + * setcmdline + */ + if (strlen(argv[2]) >= COMMAND_LINE_SIZE) { + printf("Warning: Command Line truncated to %d bytes!\n", + COMMAND_LINE_SIZE - 1); + } + strncpy(buffer, argv[2], COMMAND_LINE_SIZE - 1); + lseek(kern_fd, COMMAND_LINE_OFFSET, SEEK_SET); + write(kern_fd,buffer,strlen(buffer)); + write(kern_fd,"\0",1); + } else { + /* + * showcmdline + */ + lseek(kern_fd, COMMAND_LINE_OFFSET, SEEK_SET); + read(kern_fd,buffer, COMMAND_LINE_SIZE - 1); + printf("\nKernel command line is:\n\t%s\n", buffer); + } + + close(kern_fd); + return 0; +} + diff -Nru a/drivers/Makefile b/drivers/Makefile --- a/drivers/Makefile 2005-06-17 21:48:29 +++ b/drivers/Makefile 2005-07-24 23:32:43 @@ -58,6 +58,7 @@ obj-$(CONFIG_ISDN) += isdn/ obj-$(CONFIG_MCA) += mca/ obj-$(CONFIG_EISA) += eisa/ +obj-$(CONFIG_VAX) += vax/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_INFINIBAND) += infiniband/ diff -Nru a/drivers/scsi/Makefile b/drivers/scsi/Makefile --- a/drivers/scsi/Makefile 2005-06-17 21:48:29 +++ b/drivers/scsi/Makefile 2005-07-24 23:36:46 @@ -115,6 +115,11 @@ obj-$(CONFIG_SCSI_IMM) += imm.o obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o + +# VAX stuff +obj-$(CONFIG_SCSI_VAX_53C94) += NCR53C9x.o vax_esp.o +# end VAX stuff + obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o obj-$(CONFIG_SCSI_FCAL) += fcal.o obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o diff -Nru a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c --- a/drivers/scsi/NCR5380.c 2005-06-17 21:48:29 +++ b/drivers/scsi/NCR5380.c 2005-07-24 23:43:50 @@ -28,6 +28,12 @@ /* * $Log: NCR5380.c,v $ + * Revision 1.9 2005/07/24 21:43:50 kenn + * Merge with 2.6.12 + * + * Revision 1.1.1.23 2005/07/24 20:16:12 kenn + * Import of pristine Linus 2.6.12 tree + * * Revision 1.10 1998/9/2 Alan Cox * (alan@redhat.com) @@ -346,6 +352,7 @@ return -ETIMEDOUT; } +#ifdef NDEBUG static struct { unsigned char value; const char *name; @@ -359,7 +366,6 @@ {PHASE_UNKNOWN, "UNKNOWN"} }; -#ifdef NDEBUG static struct { unsigned char mask; const char *name; @@ -538,6 +544,7 @@ } +#ifdef AUTOPROBE_IRQ static int probe_irq __initdata = 0; /** @@ -613,6 +620,7 @@ return probe_irq; } +#endif /* AUTOPROBE_IRQ */ /** * NCR58380_print_options - show options diff -Nru a/drivers/vax/Makefile b/drivers/vax/Makefile --- a/drivers/vax/Makefile 1970-01-01 01:00:00 +++ b/drivers/vax/Makefile 2004-11-19 13:38:39 @@ -0,0 +1,10 @@ +# +# Makefile for the Linux/VAX kernel device drivers. +# + +obj-y += bus/ +obj-y += char/ +obj-y += net/ +obj-y += scsi/ +obj-y += serial/ + diff -Nru a/drivers/vax/bus/Makefile b/drivers/vax/bus/Makefile --- a/drivers/vax/bus/Makefile 1970-01-01 01:00:00 +++ b/drivers/vax/bus/Makefile 2004-05-11 01:20:32 @@ -0,0 +1,7 @@ +# +# Makefile for the Linux/VAX bus device drivers. +# + +obj-$(CONFIG_QBUS) += qbus.o cqbic.o +obj-$(CONFIG_VSBUS) += vsbus.o vsbus-ka4x.o + diff -Nru a/drivers/vax/bus/cqbic.c b/drivers/vax/bus/cqbic.c --- a/drivers/vax/bus/cqbic.c 1970-01-01 01:00:00 +++ b/drivers/vax/bus/cqbic.c 2005-04-26 00:25:06 @@ -0,0 +1,330 @@ +/* + * Support for the CQBIC (which I guess stands for something like + * CVAX QBus Interface Chip). + * + * This is the QBUS bus adapter used in the KA640/650/655. + * Documentation is in the KA655 Technical manual + * (EK-KA655-TM-001, available online via http://vt100.net/manx). + * + * The CQBIC maps the 8K QBUS I/O space into physical memory at + * physical address 0x20000000. It provides 8192 mapping registers + * that can each map one 512-byte page between VAX physical + * memory and QBUS memory space for DMA transfers. + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +#define CQBIC_DEBUG 0 + +#define CQBIC_MAPREGPHYS 0x20088000 +#define CQBIC_NUMMAPREGS 8192 + +#define CQBIC_IOSPACE_BASE 0x20000000 +#define CQBIC_IOSPACE_SIZE 0x00002000 + +static struct device_driver cqbic_driver; + +struct cqbic_private { + struct qbus_ops * bus_ops; + + DECLARE_BITMAP(vector_bitmap, QBUS_NUM_VECTORS); + + unsigned int __iomem * mapregbase; + unsigned long iospace_phys_base; + unsigned int scb_offset; +}; + +/* Given a (start, len), how many pagelets does this span? */ + +static unsigned int num_pagelets(void *start, unsigned int len) +{ + unsigned int start_pagelet; + unsigned int end_pagelet; + + start_pagelet = (unsigned int)start >> PAGELET_SHIFT; + end_pagelet = ((unsigned int)start + len - 1) >> PAGELET_SHIFT; + + return end_pagelet - start_pagelet + 1; +} + +static int find_n_free(struct cqbic_private *cqbic, unsigned int n) +{ + int i; + int j; + + i = 0; + while (i < (8192 - n)) { + for (j=0; jmapregbase[i+j]) { + /* This reg in use */ + break; + } + } + if (j == n) { + /* Found N contiguous free entries at offset I */ + return i; + } + i += j+1; + } + return -1; +} + +/* Allocate a bunch of map registers sufficient to map 'len' bytes + at address 'start'. + + This is a very dumb allocator - does linear searches for available + registers. Need a better way to do this. My first thought was to + use bits 30:0 in invalid map registers to contain forward and + backward links to maintain a list of free registers. However, bits + 30:20 are reserved (read as zero and should be written as zero), + so that only leaves us with 20 bits for links. This would be + OK if we allow the allocation granularity to be 8 registers. - KPH +*/ + +static struct vax_dmamap *cqbic_alloc_mapregs(struct device *busdev, void *start, unsigned int len) +{ + struct vax_dmamap *map; + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + + map = kmalloc(sizeof(struct vax_dmamap), GFP_ATOMIC); + if (map != NULL) { + int reg; + unsigned int pagelets; + + pagelets = num_pagelets(start, len); + + reg = find_n_free(cqbic, pagelets); + if (reg != -1) { + unsigned int pfn; + unsigned int reg_value; + int i; + + pfn = virt_to_phys(start) >> PAGELET_SHIFT; + reg_value = (pfn & 0xfffff) | 0x80000000; + + for (i = reg; i < reg + pagelets; i++, reg_value++) { + cqbic->mapregbase[i] = reg_value; + } + + map->reg = reg; + map->pagelets = pagelets; + map->virtaddr = start; + map->busaddr = (reg * PAGELET_SIZE) + ((unsigned int)start & ~PAGELET_MASK); +#if CQBIC_DEBUG + printk("Using map registers 0x%04x to 0x%04x to map virt %p to %p (bus %08x)\n", + reg, reg + pagelets - 1, + start, (char *)start + len - 1, map->busaddr); +#endif + + } else { + kfree(map); + map = NULL; + } + } + return map; +} + +static void cqbic_unmap(struct device *busdev, struct vax_dmamap *map) +{ + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + +#if CQBIC_DEBUG + printk("Zapping map registers 0x%04x to 0x%04x\n", map->reg, map->reg + map->pagelets - 1); +#endif + while (map->pagelets--) { + cqbic->mapregbase[map->reg] = 0; + map->reg++; + } + kfree(map); +} + +static void cqbic_dumpmap(struct device *busdev) +{ + int i; + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + + for (i=0; imapregbase[i] != 0) { + printk("CQBIC map reg %04x = %08x (-> %08x)\n", i, + cqbic->mapregbase[i], + (cqbic->mapregbase[i] & 0xfffff) << PAGELET_SHIFT); + } + } +} + + +/* Traditionally, QBUS interrupt vectors are multiples of 4. */ + +static int cqbic_vector_to_irq(struct device *busdev, unsigned int vector) +{ + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + + return (vector / 4) + cqbic->scb_offset; +} + +static int cqbic_request_irq(struct device *busdev, unsigned int vector, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, + const char * devname, + void *dev_id) +{ + return request_irq(cqbic_vector_to_irq(busdev, vector), + handler, irqflags, devname, dev_id); +} + +/* Mark a specific QBUS vector as unavailable for dynamic allocation. + Returns 0 if was previously available, 1 if previously reserved */ + +static unsigned int cqbic_reserve_vector(struct device *busdev, unsigned int vector) +{ + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + + return test_and_set_bit(vector / 4, cqbic->vector_bitmap); +} + +/* Locate an available interrupt vector and mark it reserved. Return 0 + if none available. */ + +static unsigned int cqbic_alloc_vector(struct device *busdev) +{ + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + unsigned int vector; + + do { + vector = 4 * find_first_zero_bit(cqbic->vector_bitmap, QBUS_NUM_VECTORS); + if (!vector) { + return 0; + } + } while (cqbic_reserve_vector(busdev, vector)); + + return vector; +} + +/* Mark an interrupt vector as available again */ + +static void cqbic_free_vector(struct device *busdev, unsigned int vector) +{ + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + + vector = vector / 4; + if (vector) { + clear_bit(vector, cqbic->vector_bitmap); + } +} + +static void *cqbic_ioremap(struct device *busdev, unsigned int bus_addr, unsigned int size) +{ + struct cqbic_private *cqbic = (struct cqbic_private *)busdev->driver_data; + + return ioremap(cqbic->iospace_phys_base + bus_addr, size); +} + +static void __init cqbic_device_detected(struct device *parent, unsigned int csr_offset) +{ + struct qbus_device *qbus_dev; + + qbus_dev = kmalloc(sizeof(*qbus_dev), GFP_KERNEL); + if (qbus_dev == NULL) { + printk("qbus_device_detected: cannot allocate " + "device structure for CSR 0x%x\n", csr_offset); + return; + } + + memset(qbus_dev, 0, sizeof(*qbus_dev)); + + qbus_dev->csr = csr_offset; + qbus_dev->dev.bus = &qbus_bus_type; + qbus_dev->dev.parent = parent; + + snprintf(qbus_dev->dev.bus_id, sizeof(qbus_dev->dev.bus_id), + "%s-%o", parent->bus_id, QBUS_OCTAL_CSR(csr_offset)); + + qbus_register_device(qbus_dev); +} + +static struct qbus_ops cqbic_bus_ops = { + .dma_map = cqbic_alloc_mapregs, + .dma_unmap = cqbic_unmap, + .dma_dumpmap = cqbic_dumpmap, + .vector_to_irq = cqbic_vector_to_irq, + .request_irq = cqbic_request_irq, + .reserve_vector = cqbic_reserve_vector, + .alloc_vector = cqbic_alloc_vector, + .free_vector = cqbic_free_vector, + .ioremap = cqbic_ioremap, +}; + +static int __init cqbic_probe(struct device *busdev) +{ + int i; + void __iomem *cqbic_iospace; + struct cqbic_private *cqbic; + + cqbic = kmalloc(sizeof(*cqbic), GFP_KERNEL); + if (!cqbic) { + return -ENOMEM; + } + + memset(cqbic, 0, sizeof(*cqbic)); + + busdev->driver_data = cqbic; + + cqbic->bus_ops = &cqbic_bus_ops; + + cqbic->iospace_phys_base = CQBIC_IOSPACE_BASE; + + /* The CQBIC maps QBUS interrupts to the second page of the SCB + (each page of the SCB contains 128 vectors). */ + cqbic->scb_offset = 128; + + /* Mark vector 0 as reserved */ + set_bit(0, cqbic->vector_bitmap); + + cqbic->mapregbase = (unsigned int *)ioremap(CQBIC_MAPREGPHYS, + CQBIC_NUMMAPREGS * sizeof(unsigned int)); +#if CQBIC_DEBUG + printk("CQBIC map registers mapped at %p\n", cqbic->mapregbase); +#endif + + for (i=0; imapregbase[i] = 0; + } + + /* Now we scan the qbus and look for CSR addresses that have + something living there. When we find a device, create a + driver model struct device for it */ + + cqbic_iospace = ioremap(cqbic->iospace_phys_base, CQBIC_IOSPACE_SIZE); + + for (i=0; i +#include + +#include + +#define QBUS_DEBUG 0 + + +/* These DMA, memory mapping and IRQ handling functions isolate + drivers for QBUS devices from the details of how the QBUS + is hooked up to the rest of the system */ + +struct vax_dmamap *qbus_alloc_mapregs(struct device *busdev, void *start, unsigned int len) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + return ops->dma_map(busdev, start, len); +} + +void qbus_unmap(struct device *busdev, struct vax_dmamap *map) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + ops->dma_unmap(busdev, map); +} + +void qbus_dumpmap(struct device *busdev) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + ops->dma_dumpmap(busdev); +} + +int qbus_vector_to_irq(struct device *busdev, unsigned int vector) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + return ops->vector_to_irq(busdev, vector); +} + +int qbus_request_irq(struct device *busdev, unsigned int vector, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, + const char * devname, + void *dev_id) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + return ops->request_irq(busdev, vector, handler, irqflags, devname, dev_id); +} + +unsigned int qbus_reserve_vector(struct device *busdev, unsigned int vector) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + return ops->reserve_vector(busdev, vector); +} + +unsigned int qbus_alloc_vector(struct device *busdev) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + return ops->alloc_vector(busdev); +} + +void qbus_free_vector(struct device *busdev, unsigned int vector) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + ops->free_vector(busdev, vector); +} + +void *qbus_ioremap(struct device *busdev, unsigned int bus_addr, unsigned int size) +{ + struct qbus_ops *ops = *(struct qbus_ops **)busdev->driver_data; + return ops->ioremap(busdev, bus_addr, size); +} + + + +/* These functions support the QBUS device type for the driver model */ + +static ssize_t qbus_show_csr(struct device *dev, char *buf) +{ + struct qbus_device *qbus_dev = QBUS_DEV(dev); + return sprintf(buf, "0x%04x\n", qbus_dev->csr); +} + +static DEVICE_ATTR(csr, S_IRUGO, qbus_show_csr, NULL); + +static ssize_t qbus_show_csr_octal(struct device *dev, char *buf) +{ + struct qbus_device *qbus_dev = QBUS_DEV(dev); + return sprintf(buf, "%o\n", QBUS_OCTAL_CSR(qbus_dev->csr)); +} + +static DEVICE_ATTR(csr_octal, S_IRUGO, qbus_show_csr_octal, NULL); + + +/* These functions support the QBUS bus type for the driver model */ + +int qbus_register_device(struct qbus_device *qbus_dev) +{ + int error = device_register(&qbus_dev->dev); + + if (!error) { + device_create_file(&qbus_dev->dev, &dev_attr_csr); + } + + if (!error) { + device_create_file(&qbus_dev->dev, &dev_attr_csr_octal); + } + return error; +} + +int qbus_register_driver(struct qbus_driver *drv) +{ + drv->drv.bus = &qbus_bus_type; + + return driver_register(&drv->drv); +} + +void qbus_unregister_driver(struct qbus_driver *drv) +{ + return driver_unregister(&drv->drv); +} + +/* This gets called for each device when a new driver is + registered */ + +int qbus_bus_match(struct device *dev, struct device_driver *drv) +{ + struct qbus_device *qbus_dev = QBUS_DEV(dev); + struct qbus_driver *qbus_drv = QBUS_DRV(drv); + +#if QBUS_DEBUG + printk("qbus_match: called dev %p, CSR %o, drv %p\n", dev, QBUS_OCTAL_CSR(qbus_dev->csr), drv); +#endif + + if (qbus_drv->probe && + (qbus_drv->probe(qbus_dev) == 0)) { + + /* Found a driver that is willing to handle this device */ + return 1; + } + + return 0; +} + +struct bus_type qbus_bus_type = { + .name = "qbus", + .match = qbus_bus_match, +}; + +static int __init qbus_bus_init(void) +{ + return bus_register(&qbus_bus_type); +} + +postcore_initcall(qbus_bus_init); + + +EXPORT_SYMBOL(qbus_register_driver); +EXPORT_SYMBOL(qbus_unregister_driver); +EXPORT_SYMBOL(qbus_bus_type); + diff -Nru a/drivers/vax/bus/vsbus-ka4x.c b/drivers/vax/bus/vsbus-ka4x.c --- a/drivers/vax/bus/vsbus-ka4x.c 1970-01-01 01:00:00 +++ b/drivers/vax/bus/vsbus-ka4x.c 2005-04-26 00:25:06 @@ -0,0 +1,54 @@ +/* + * Support for the VSBUS pseudo bus adapter in the KA410, KA42, + * KA43, KA46 and KA48 CPUs. + * + * The best documentation I've been able to find so far is the + * VAXstation 2000 and MicroVAX 2000 Technical Manual (EK-VTTAA-TM) + * This manual covers the KA410. However, the newer VAXstations + * seem to be very similar. NetBSD and VMS's LIB.REQ have been + * further sources of information. + * + */ + +#include + +#include +#include + +static int __init vsbus_ka4x_probe(struct device *busdev) +{ + unsigned int __iomem *vectors; + int retval; + + /* + * Map the area where we expect to find our device + * interrupt vectors so that we can copy them somewhere + * more convenient + */ + + vectors = ioremap(0x20040020, 0x20); + if (!vectors) { + return -EAGAIN; + } + + retval = init_vsbus_adapter(vectors, VSA_BASE_REGS); + + iounmap(vectors); + + return retval; +} + +static struct device_driver vsbus_ka4x_driver = { + .name = "ka4x-vsbus", + .bus = &platform_bus_type, + .probe = vsbus_ka4x_probe, +}; + +int __init vsbus_ka4x_init(void) +{ + return driver_register(&vsbus_ka4x_driver); +} + + +subsys_initcall(vsbus_ka4x_init); + diff -Nru a/drivers/vax/bus/vsbus.c b/drivers/vax/bus/vsbus.c --- a/drivers/vax/bus/vsbus.c 1970-01-01 01:00:00 +++ b/drivers/vax/bus/vsbus.c 2005-05-31 16:03:18 @@ -0,0 +1,279 @@ +/* + * Support for the VSBUS pseudo-bus type. + * + * As far as I can make out, VAXstation 3100, MicroVAX 3100 and + * VAXstation 4000 series machines have a chunk of bus interface + * circuitry between the main CPU-memory bus and the on-board + * peripheral chips (ethernet controller, SCSI controller, etc). + * I imagine that this 'bus adapter' is responsible for mapping + * device interrupt lines to VAX SCB vectors, doing address + * line decoding to locate these devices in I/O space and + * provide DMA facilities to main memory. + * + * It would be real nice to see a datasheet or tech manual + * for one of these boards. + * + * This file implements the drivel model 'bus type' for the VSBUS + * and the common features of all the VSBUS implementations. + * + * Differences in logic due to differences in the hardware are in + * vsbus-ka*.c + */ + +#include +#include + +#include +#include + +#define VSBUS_DEBUG 1 + +static struct vsbus_registers __iomem *vs_cpu_ptr; + +static unsigned int vsbus_rom_vectors[VSBUS_NR_IRQS]; + +int init_vsbus_adapter(unsigned int *vectors, unsigned long registers) +{ + if (vs_cpu_ptr) { + printk("vsbus: already initialized\n"); + return -EBUSY; + } + + memcpy(vsbus_rom_vectors, vectors, VSBUS_NR_IRQS * sizeof(unsigned int)); + + vs_cpu_ptr = ioremap(registers, 0x80); + if (!vs_cpu_ptr) { + return -EAGAIN; + } + + return 0; +} + +/* Interrupt vector handling on VSBUS devices is a bit unusual (for + a VAX). There are up to 8 interrupt sources. Each source has + a bit in the INTREQ/INTCLR register and the INTMSK register. + + Bit 7 is the highest priority interrupt, bit 0 is the lowest. + The assignment of bits to devices varies from model to model. + + In order for interrupts from a device to be delivered to the VAX + CPU, the relevant bit in INTMSK must be set. When the hardware + device requests an interrupt, the relevant bit in INTREQ is set. + If (INTMSK & INTREQ) is non-zero, an interrupt is delivered to + the CPU. When the CPU acknowledges this, it expects to be fed + an interrupt vector on the data bus. (This interrupt vector is + then used to index into the SCB to find the interrupt handler.) + + During the interrupt acknowledge cycle, the hardware finds the + highest bit set in (INTMSK & INTREQ) and generates a read from + the firmware ROM at address 0x20040020 + (bit_num * 4). This + causes the ROM to place the longword at that address on the data + bus, which the CPU picks up as the interrupt vector. + + So, in summary, the firmware ROM contains an 8-longword table + at physical address 0x20040020, which contains the interrupt + vectors. The hardware-specific driver fills in this table. */ + + +void vsbus_enable_int(int bit_nr) +{ + vs_cpu_ptr->vc_intmsk |= 1<vc_intclr = 1<vc_intmsk &= ~(1<> 2; + } else { + return 0; + } +} + +struct vsbus_irqinfo { + irqreturn_t (*handler)(int, void *, struct pt_regs *); + unsigned int irqindex; + void * dev_id; +}; + +static struct vsbus_irqinfo irqinfo[VSBUS_NR_IRQS]; + +static irqreturn_t vsbus_irq_handler(int irq, void *data, struct pt_regs *regs) +{ + struct vsbus_irqinfo *info = (struct vsbus_irqinfo *)data; + + vsbus_clear_int(info->irqindex); + return info->handler(irq, info->dev_id, regs); +} + +int vsbus_request_irq(unsigned int vsbus_irqindex, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, const char *devname, void *dev_id) +{ + struct vsbus_irqinfo *info; + int irq; + int retval; + + if (vsbus_irqindex >= VSBUS_NR_IRQS) { + return -EINVAL; + } + + info = irqinfo + vsbus_irqindex; + + /* FIXME: need a semaphore here */ + + if (info->handler) { + return -EBUSY; + } + + info->handler = handler; + info->dev_id = dev_id; + info->irqindex = vsbus_irqindex; + + irq = vsbus_irqindex_to_irq(info->irqindex); + + retval = request_irq(irq, vsbus_irq_handler, irqflags, devname, info); + if (!retval) { + vsbus_clear_int(vsbus_irqindex); + vsbus_enable_int(vsbus_irqindex); + } else { + info->handler = NULL; + } + + return retval; +} + +void vsbus_free_irq(unsigned int vsbus_irqindex) +{ + struct vsbus_irqinfo *info; + int irq; + + if (vsbus_irqindex >= VSBUS_NR_IRQS) { + return; + } + + info = irqinfo + vsbus_irqindex; + + /* FIXME: need a semaphore here */ + + if (info->handler) { + vsbus_disable_int(vsbus_irqindex); + + irq = vsbus_irqindex_to_irq(info->irqindex); + free_irq(irq, info); + + /* FIXME: do we need to synchronize with this interrupt? */ + + info->handler = NULL; + } +} + + +void vsbus_add_fixed_device(struct device *parent, char *name, + unsigned int phys_base, unsigned int irqindex) +{ + struct vsbus_device *vsbus_dev; + + vsbus_dev = kmalloc(sizeof(*vsbus_dev), GFP_KERNEL); + if (vsbus_dev == NULL) { + printk("vsbus_add_fixed_device: cannot allocate " + "device structure for addr 0x%08x irqindex %d\n", phys_base, irqindex); + return; + } + + memset(vsbus_dev, 0, sizeof(*vsbus_dev)); + + vsbus_dev->phys_base = phys_base; + vsbus_dev->vsbus_irq = irqindex; + vsbus_dev->dev.bus = &vsbus_bus_type; + vsbus_dev->dev.parent = parent; + + snprintf(vsbus_dev->dev.bus_id, sizeof(vsbus_dev->dev.bus_id), + "%s", name); + + vsbus_register_device(vsbus_dev); +} + + + +/* These functions support the VSBUS bus type for the driver model */ + +static int vsbus_drv_remove(struct device *dev) +{ + struct vsbus_device *vsbus_dev = VSBUS_DEV(dev); + struct vsbus_driver *vsbus_drv = VSBUS_DRV(dev->driver); + + vsbus_drv->remove(vsbus_dev); + + return 0; +} + +int vsbus_register_device(struct vsbus_device *vsbus_dev) +{ + return device_register(&vsbus_dev->dev); +} + +int vsbus_register_driver(struct vsbus_driver *drv) +{ + drv->drv.bus = &vsbus_bus_type; + drv->drv.remove = vsbus_drv_remove; + + return driver_register(&drv->drv); +} + + +void vsbus_unregister_driver(struct vsbus_driver *drv) +{ + return driver_unregister(&drv->drv); +} + +/* This gets called for each device when a new driver is + registered. Since the set of devices that can appear on + the VSBUS is very limited, we can get away with a very simple + name-based match. */ + +int vsbus_bus_match(struct device *dev, struct device_driver *drv) +{ + struct vsbus_device *vsbus_dev = VSBUS_DEV(dev); + struct vsbus_driver *vsbus_drv = VSBUS_DRV(drv); + +#if VSBUS_DEBUG + printk("vsbus_match: called dev %s, drv %s\n", dev->bus_id, drv->name); +#endif + + if (!strncmp(dev->bus_id, drv->name, strlen(drv->name)) && + (vsbus_drv->probe(vsbus_dev) == 0)) { + + /* Found a driver that is willing to handle this device */ + return 1; + } + + return 0; +} + +struct bus_type vsbus_bus_type = { + .name = "vsbus", + .match = vsbus_bus_match, +}; + +static int __init vsbus_bus_init(void) +{ + return bus_register(&vsbus_bus_type); +} + +postcore_initcall(vsbus_bus_init); + + +EXPORT_SYMBOL(vsbus_register_driver); +EXPORT_SYMBOL(vsbus_unregister_driver); +EXPORT_SYMBOL(vsbus_bus_type); + diff -Nru a/drivers/vax/char/Makefile b/drivers/vax/char/Makefile --- a/drivers/vax/char/Makefile 1970-01-01 01:00:00 +++ b/drivers/vax/char/Makefile 2004-11-19 13:45:18 @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/VAX character device drivers. +# + +obj-$(CONFIG_DZ) += dz.o + diff -Nru a/drivers/vax/char/dz.c b/drivers/vax/char/dz.c --- a/drivers/vax/char/dz.c 1970-01-01 01:00:00 +++ b/drivers/vax/char/dz.c 2005-08-02 01:43:06 @@ -0,0 +1,1605 @@ +/* + * dz.c: Serial port driver for DECStations & VAXstations equiped + * with the DZ chipset. + * + * Copyright (C) 1998 Olivier A. D. Lebaillif + * + * Email: olivier.lebaillif@ifrsys.com + * + * [31-AUG-98] triemer + * Changed IRQ to use Harald's dec internals interrupts.h + * removed base_addr code - moving address assignment to setup.c + * Changed name of dz_init to rs_init to be consistent with tc code + * [13-NOV-98] triemer fixed code to receive characters + * after patches by harald to irq code. + * [09-JAN-99] triemer minor fix for schedule - due to removal of timeout + * field from "current" - somewhere between 2.1.121 and 2.1.131 + * [27-JUN-2001] Arnaldo Carvalho de Melo - cleanups + * + * Parts (C) 1999 David Airlie, airlied@linux.ie + * [07-SEP-99] Bugfixes + */ + +/* #define DEBUG_DZ 1 */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SERIO +#include +#endif /* CONFIG_SERIO */ + +#include + +/* for definition of struct console */ +#ifdef CONFIG_SERIAL_CONSOLE +#define CONSOLE_LINE (3) +#endif /* ifdef CONFIG_SERIAL_CONSOLE */ +#if defined(CONFIG_SERIAL_CONSOLE) || defined(DEBUG_DZ) +#include +#endif /* if defined(CONFIG_SERIAL_CONSOLE) || defined(DEBUG_DZ) */ + +#include +#include + +#include +#include + +#include +#include + +#ifdef DEBUG_DZ +#include +#include +extern int (*prom_printf) (char *,...); +#endif +#include "dz.h" + +#define DZ_INTR_DEBUG 1 + +static struct dz_serial *lines[4]; +static unsigned char tmp_buffer[256]; + +#ifdef DEBUG_DZ +/* + * debugging code to send out chars via prom + */ +static void debug_console( const char *s,int count) +{ + unsigned i; + + for (i = 0; i < count; i++) { + if (*s == 10) + prom_printf("%c", 13); + prom_printf("%c", *s++); + } +} +#endif + +/* + * ------------------------------------------------------------ + * dz_in () and dz_out () + * + * These routines are used to access the registers of the DZ + * chip, hiding relocation differences between implementation. + * ------------------------------------------------------------ + */ + +static inline unsigned short dz_in (struct dz_serial *info, unsigned offset) +{ + volatile u16 *addr = (volatile u16 *)(info->port + offset); + + return *addr; +} + +static inline void dz_out (struct dz_serial *info, unsigned offset, + unsigned short value) +{ + volatile u16 *addr = (volatile u16 *)(info->port + offset); + *addr = value; +} + +/* + * ------------------------------------------------------------ + * rs_stop () and rs_start () + * + * These routines are called before setting or resetting + * tty->stopped. They enable or disable transmitter interrupts, + * as necessary. + * ------------------------------------------------------------ + */ + +static void dz_stop (struct tty_struct *tty) +{ + struct dz_serial *info; + unsigned short mask, tmp; + + if (!tty) + return; + + info = (struct dz_serial *)tty->driver_data; + + mask = 1 << info->line; + tmp = dz_in (info, DZ_TCR); /* read the TX flag */ + + tmp &= ~mask; /* clear the TX flag */ + dz_out (info, DZ_TCR, tmp); +} + +static void dz_start (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + unsigned short mask, tmp; + + mask = 1 << info->line; + tmp = dz_in (info, DZ_TCR); /* read the TX flag */ + + tmp |= mask; /* set the TX flag */ + dz_out (info, DZ_TCR, tmp); +} + +/* + * ------------------------------------------------------------ + * Here starts the interrupt handling routines. All of the + * following subroutines are declared as inline and are folded + * into dz_interrupt. They were separated out for readability's + * sake. + * + * Note: rs_interrupt() is a "fast" interrupt, which means that it + * runs with interrupts turned off. People who may want to modify + * rs_interrupt() should try to keep the interrupt handler as fast as + * possible. After you are done making modifications, it is not a bad + * idea to do: + * + * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer dz.c + * + * and look at the resulting assemble code in serial.s. + * + * ------------------------------------------------------------ + */ + +/* + * ------------------------------------------------------------ + * dz_sched_event () + * + * This routine is used by the interrupt handler to schedule + * processing in the software interrupt portion of the driver. + * ------------------------------------------------------------ + */ +static inline void dz_sched_event (struct dz_serial *info, int event) +{ + info->event |= 1 << event; + schedule_work(&info->tqueue); +} + +/* + * ------------------------------------------------------------ + * receive_char () + * + * This routine deals with inputs from any lines. + * ------------------------------------------------------------ + */ +static inline void receive_chars (struct dz_serial *info_in) +{ + struct dz_serial *info; + struct tty_struct *tty = 0; + struct async_icount *icount; + int ignore = 0; + unsigned short status, tmp; + unsigned char ch; + + /* + * This code is going to be a problem... the call to tty_flip_buffer + * is going to need to be rethought... + */ + do { + status = dz_in (info_in, DZ_RBUF); + info = lines[LINE(status)]; + + /* punt so we don't get duplicate characters */ + if (!(status & DZ_DVAL)) + goto ignore_char; + + ch = UCHAR(status); /* grab the char */ + +#if 0 + if (info->is_console) { + if (ch == 0) + return; /* it's a break ... */ + } +#endif + + tty = info->tty; /* now tty points to the proper dev */ + icount = &info->icount; + + if (!tty) + break; + if (tty->flip.count >= TTY_FLIPBUF_SIZE) break; + + *tty->flip.char_buf_ptr = ch; + *tty->flip.flag_buf_ptr = 0; + icount->rx++; + + /* keep track of the statistics */ + if (status & (DZ_OERR | DZ_FERR | DZ_PERR)) { + if (status & DZ_PERR) /* parity error */ + icount->parity++; + else if (status & DZ_FERR) /* frame error */ + icount->frame++; + if (status & DZ_OERR) /* overrun error */ + icount->overrun++; + + /* + * Check to see if we should ignore the character and + * mask off conditions that should be ignored + */ + + if (status & info->ignore_status_mask) { + if (++ignore > 100) + break; + goto ignore_char; + } + + /* mask off the error conditions we want to ignore */ + tmp = status & info->read_status_mask; + + if (tmp & DZ_PERR) { + *tty->flip.flag_buf_ptr = TTY_PARITY; +#ifdef DEBUG_DZ + debug_console("PERR\n",5); +#endif /* DEBUG_DZ */ + } else if (tmp & DZ_FERR) { + *tty->flip.flag_buf_ptr = TTY_FRAME; +#ifdef DEBUG_DZ + debug_console("FERR\n",5); +#endif /* DEBUG_DZ */ + } if (tmp & DZ_OERR) { +#ifdef DEBUG_DZ + debug_console("OERR\n",5); +#endif /* DEBUG_DZ */ + if (tty->flip.count < TTY_FLIPBUF_SIZE) { + tty->flip.count++; + tty->flip.flag_buf_ptr++; + tty->flip.char_buf_ptr++; + *tty->flip.flag_buf_ptr = TTY_OVERRUN; + } + } + } + tty->flip.flag_buf_ptr++; + tty->flip.char_buf_ptr++; + tty->flip.count++; +ignore_char: + ; + } while (status & DZ_DVAL); + + if (tty) + tty_flip_buffer_push(tty); +} + +/* + * ------------------------------------------------------------ + * transmit_char () + * + * This routine deals with outputs to any lines. + * ------------------------------------------------------------ + */ +static inline void transmit_chars (struct dz_serial *info) +{ + unsigned char tmp; + + if (info->x_char) { /* XON/XOFF chars */ + dz_out(info, DZ_TDR, info->x_char); + info->icount.tx++; + info->x_char = 0; + return; + } + + /* if nothing to do or stopped or hardware stopped */ + if ((info->xmit_cnt <= 0) || info->tty->stopped || + info->tty->hw_stopped) { + dz_stop(info->tty); + return; + } + + /* + * If something to do ... (rember the dz has no output fifo so we go + * one char at a time :-< + */ + tmp = (unsigned short) info->xmit_buf[info->xmit_tail++]; + dz_out(info, DZ_TDR, tmp); + info->xmit_tail = info->xmit_tail & (DZ_XMIT_SIZE - 1); + info->icount.tx++; + + if (--info->xmit_cnt < WAKEUP_CHARS) + dz_sched_event(info, DZ_EVENT_WRITE_WAKEUP); + + /* Are we done */ + if (info->xmit_cnt <= 0) + dz_stop(info->tty); +} + +/* + * ------------------------------------------------------------ + * check_modem_status () + * + * Only valid for the MODEM line duh ! + * ------------------------------------------------------------ + */ +static inline void check_modem_status (struct dz_serial *info) +{ + unsigned short status; + + /* if not ne modem line just return */ + if (info->line != DZ_MODEM) + return; + + status = dz_in(info, DZ_MSR); + + /* it's easy, since DSR2 is the only bit in the register */ + if (status) + info->icount.dsr++; +} + +/* + * ------------------------------------------------------------ + * dz_interrupt () + * + * this is the main interrupt routine for the DZ chip. + * It deals with the multiple ports. + * ------------------------------------------------------------ + */ +/* VAX has separate RX/TX interrupts */ + +static irqreturn_t dz_interrupt_rx (int irq, void *dev, struct pt_regs *regs) +{ + struct dz_serial *info; + unsigned short status; + + status = dz_in ((struct dz_serial *)dev, DZ_CSR); /* get the reason why we just got an irq */ + info = lines[LINE(status)]; /* re-arrange info the proper port */ + + if (status & DZ_RDONE) + receive_chars (info); /* the receive function */ + + return IRQ_HANDLED; +} + +static irqreturn_t dz_interrupt_tx (int irq, void *dev, struct pt_regs *regs) +{ + struct dz_serial *info; + unsigned short status; + + status = dz_in ((struct dz_serial *)dev, DZ_CSR); /* get the reason why we just got an irq */ + info = lines[LINE(status)]; /* re-arrange info the proper port */ + + if (status & DZ_TRDY) + transmit_chars (info); + + return IRQ_HANDLED; +} + +/* + * ------------------------------------------------------------------- + * Here ends the DZ interrupt routines. + * ------------------------------------------------------------------- + */ + +static void do_softint (void *private_data) +{ + struct dz_serial *info = (struct dz_serial *) private_data; + struct tty_struct *tty = info->tty; + + if (!tty) + return; + + if (test_and_clear_bit(DZ_EVENT_WRITE_WAKEUP, &info->event)) { + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup) (tty); + wake_up_interruptible (&tty->write_wait); + } +} + +/* + * ------------------------------------------------------------------- + * This routine is called from the scheduler tqueue when the interrupt + * routine has signalled that a hangup has occurred. The path of + * hangup processing is: + * + * serial interrupt routine -> (scheduler tqueue) -> + * do_serial_hangup() -> tty->hangup() -> rs_hangup() + * ------------------------------------------------------------------- + */ +static void do_serial_hangup (void *private_data) +{ + struct dz_serial *info = (struct dz_serial *) private_data; + struct tty_struct *tty = info->tty; + + if (!tty) + return; + + tty_hangup(tty); +} + +/* + * ------------------------------------------------------------------- + * startup () + * + * various initialization tasks + * ------------------------------------------------------------------- + */ +static int startup (struct dz_serial *info) +{ + unsigned long page, flags; + unsigned short tmp; + + if (info->is_initialized) + return 0; + + save_and_cli(flags); + + if (!info->port) { + if (info->tty) set_bit (TTY_IO_ERROR, &info->tty->flags); + restore_flags (flags); + return -ENODEV; + } + + if (!info->xmit_buf) { + page = get_zeroed_page(GFP_KERNEL); + if (!page) { + restore_flags (flags); + return -ENOMEM; + } + info->xmit_buf = (unsigned char *)page; + } + + if (info->tty) + clear_bit (TTY_IO_ERROR, &info->tty->flags); + + /* enable the interrupt and the scanning */ + tmp = dz_in (info, DZ_CSR); + tmp |= (DZ_RIE | DZ_TIE | DZ_MSE); + dz_out (info, DZ_CSR, tmp); + + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + + change_speed (info); /* set up the speed */ + + /* + * Clear the line transmitter buffer I can't figure out why I need to + * do this - but its necessary - in order for the console portion and + * the interrupt portion to live happily side by side. + */ + + info->is_initialized = 1; + + restore_flags (flags); + + return 0; +} + +/* + * ------------------------------------------------------------------- + * shutdown () + * + * This routine will shutdown a serial port; interrupts are disabled, and + * DTR is dropped if the hangup on close termio flag is on. + * ------------------------------------------------------------------- + */ +static void shutdown (struct dz_serial *info) +{ + unsigned long flags; + unsigned short tmp; + + if (!info->is_initialized) + return; + + save_and_cli(flags); + + dz_stop (info->tty); + + info->cflags &= ~DZ_CREAD; /* turn off receive enable flag */ + dz_out(info, DZ_LPR, info->cflags); + + if (info->xmit_buf) { /* free Tx buffer */ + free_page((unsigned long)info->xmit_buf); + info->xmit_buf = 0; + } + + if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) { + tmp = dz_in(info, DZ_TCR); + if (tmp & DZ_MODEM_DTR) { + tmp &= ~DZ_MODEM_DTR; + dz_out(info, DZ_TCR, tmp); + } + } + + if (info->tty) + set_bit (TTY_IO_ERROR, &info->tty->flags); + + info->is_initialized = 0; + + restore_flags (flags); +} + +/* + * ------------------------------------------------------------------- + * change_speed () + * + * set the baud rate. + * ------------------------------------------------------------------- + */ +static void change_speed (struct dz_serial *info) +{ + unsigned long flags; + unsigned cflag; + int baud; + + if (!info->tty || !info->tty->termios) + return; + + save_and_cli(flags); + + info->cflags = info->line; + + cflag = info->tty->termios->c_cflag; + + switch (cflag & CSIZE) { + case CS5: + info->cflags |= DZ_CS5; + break; + case CS6: + info->cflags |= DZ_CS6; + break; + case CS7: + info->cflags |= DZ_CS7; + break; + case CS8: + default: + info->cflags |= DZ_CS8; + } + + if (cflag & CSTOPB) + info->cflags |= DZ_CSTOPB; + if (cflag & PARENB) + info->cflags |= DZ_PARENB; + if (cflag & PARODD) + info->cflags |= DZ_PARODD; + + baud = tty_get_baud_rate(info->tty); + switch (baud) { + case 50: + info->cflags |= DZ_B50; + break; + case 75: + info->cflags |= DZ_B75; + break; + case 110: + info->cflags |= DZ_B110; + break; + case 134: + info->cflags |= DZ_B134; + break; + case 150: + info->cflags |= DZ_B150; + break; + case 300: + info->cflags |= DZ_B300; + break; + case 600: + info->cflags |= DZ_B600; + break; + case 1200: + info->cflags |= DZ_B1200; + break; + case 1800: + info->cflags |= DZ_B1800; + break; + case 2000: + info->cflags |= DZ_B2000; + break; + case 2400: + info->cflags |= DZ_B2400; + break; + case 3600: + info->cflags |= DZ_B3600; + break; + case 4800: + info->cflags |= DZ_B4800; + break; + case 7200: + info->cflags |= DZ_B7200; + break; + case 9600: + default: + info->cflags |= DZ_B9600; + } + + info->cflags |= DZ_RXENAB; + dz_out(info, DZ_LPR, info->cflags); + + /* setup accept flag */ + info->read_status_mask = DZ_OERR; + if (I_INPCK(info->tty)) + info->read_status_mask |= (DZ_FERR | DZ_PERR); + + /* characters to ignore */ + info->ignore_status_mask = 0; + if (I_IGNPAR(info->tty)) + info->ignore_status_mask |= (DZ_FERR | DZ_PERR); + + restore_flags(flags); +} + +/* + * ------------------------------------------------------------------- + * dz_flush_char () + * + * Flush the buffer. + * ------------------------------------------------------------------- + */ +static void dz_flush_chars (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + unsigned long flags; + + if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || + !info->xmit_buf) + return; + + save_and_cli(flags); + dz_start (info->tty); + restore_flags(flags); +} + + +/* + * ------------------------------------------------------------------- + * dz_write () + * + * main output routine. + * ------------------------------------------------------------------- + */ +static int dz_write (struct tty_struct *tty, + const unsigned char *buf, int count) +{ + struct dz_serial *info; + unsigned long flags; + int c, ret = 0; + + if (!tty) + return ret; + info = (struct dz_serial *) tty->driver_data; + + if (!info->xmit_buf) + return ret; + if (!tmp_buf) + tmp_buf = tmp_buffer; + + while (1) { + save_and_cli(flags); + + c = MIN(count, MIN(DZ_XMIT_SIZE - info->xmit_cnt - 1, + DZ_XMIT_SIZE - info->xmit_head)); + if (c <= 0) { + restore_flags (flags); + break; + } + memcpy(info->xmit_buf + info->xmit_head, buf, c); + info->xmit_head = ((info->xmit_head + c) & (DZ_XMIT_SIZE-1)); + info->xmit_cnt += c; + restore_flags(flags); + + buf += c; + count -= c; + ret += c; + } + + if (info->xmit_cnt) { + if (!tty->stopped) { + if (!tty->hw_stopped) { + dz_start (info->tty); + } + } + } + + return ret; +} + +/* + * ------------------------------------------------------------------- + * dz_write_room () + * + * compute the amount of space available for writing. + * ------------------------------------------------------------------- + */ +static int dz_write_room (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + int ret; + + ret = DZ_XMIT_SIZE - info->xmit_cnt - 1; + if (ret < 0) + ret = 0; + + return ret; +} + +/* + * ------------------------------------------------------------------- + * dz_chars_in_buffer () + * + * compute the amount of char left to be transmitted + * ------------------------------------------------------------------- + */ +static int dz_chars_in_buffer (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + + return info->xmit_cnt; +} + +/* + * ------------------------------------------------------------------- + * dz_flush_buffer () + * + * Empty the output buffer + * ------------------------------------------------------------------- + */ +static void dz_flush_buffer (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + + cli(); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + sti(); + + wake_up_interruptible (&tty->write_wait); + + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + tty->ldisc.write_wakeup(tty); +} + +/* + * ------------------------------------------------------------ + * dz_throttle () and dz_unthrottle () + * + * This routine is called by the upper-layer tty layer to signal that + * incoming characters should be throttled (or not). + * ------------------------------------------------------------ + */ +static void dz_throttle (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + + if (I_IXOFF(tty)) + info->x_char = STOP_CHAR(tty); +} + +static void dz_unthrottle (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + info->x_char = START_CHAR(tty); + } +} + +static void dz_send_xchar (struct tty_struct *tty, char ch) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + + info->x_char = ch; + + if (ch) + dz_start(info->tty); +} + +/* + * ------------------------------------------------------------ + * rs_ioctl () and friends + * ------------------------------------------------------------ + */ +static int get_serial_info(struct dz_serial *info, + struct serial_struct *retinfo) +{ + struct serial_struct tmp; + + if (!retinfo) + return -EFAULT; + + memset (&tmp, 0, sizeof(tmp)); + + tmp.type = info->type; + tmp.line = info->line; + tmp.port = info->port; + tmp.irq = 0; + tmp.flags = info->flags; + tmp.baud_base = info->baud_base; + tmp.close_delay = info->close_delay; + tmp.closing_wait = info->closing_wait; + + return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; +} + +static int set_serial_info (struct dz_serial *info, + struct serial_struct *new_info) +{ + struct serial_struct new_serial; + struct dz_serial old_info; + int retval = 0; + + if (!new_info) + return -EFAULT; + + if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) + return -EFAULT; + + old_info = *info; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (info->count > 1) + return -EBUSY; + + /* + * OK, past this point, all the error checking has been done. + * At this point, we start making changes..... + */ + + info->baud_base = new_serial.baud_base; + info->type = new_serial.type; + info->close_delay = new_serial.close_delay; + info->closing_wait = new_serial.closing_wait; + + retval = startup(info); + + return retval; +} + +/* + * get_lsr_info - get line status register info + * + * Purpose: Let user call ioctl() to get info when the UART physically + * is emptied. On bus types like RS485, the transmitter must + * release the bus after transmitting. This must be done when + * the transmit shift register is empty, not be done when the + * transmit holding register is empty. This functionality + * allows an RS485 driver to be written in user space. + */ +static int get_lsr_info (struct dz_serial *info, unsigned int *value) +{ + unsigned short status = dz_in (info, DZ_LPR); + + return put_user (status, value); +} + +/* + * This routine sends a break character out the serial port. + */ +static void send_break (struct dz_serial *info, int duration) +{ + unsigned long flags; + unsigned short tmp, mask; + + if (!info->port) + return; + + mask = 1 << info->line; + tmp = dz_in (info, DZ_TCR); + tmp |= mask; + + current->state = TASK_INTERRUPTIBLE; + + save_and_cli(flags); + dz_out(info, DZ_TCR, tmp); + schedule_timeout(duration); + tmp &= ~mask; + dz_out(info, DZ_TCR, tmp); + restore_flags(flags); +} + +static int dz_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct dz_serial * info = (struct dz_serial *)tty->driver_data; + int retval; + + if (cmd != TIOCGSERIAL && cmd != TIOCSSERIAL && + cmd != TIOCSERCONFIG && cmd != TIOCSERGWILD && + cmd != TIOCSERSWILD && cmd != TIOCSERGSTRUCT) { + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + } + + switch (cmd) { + case TCSBRK: /* SVID version: non-zero arg --> no break */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + if (!arg) + send_break(info, HZ/4); /* 1/4 second */ + return 0; + + case TCSBRKP: /* support for POSIX tcsendbreak() */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + send_break(info, arg ? arg*(HZ/10) : HZ/4); + return 0; + + case TIOCGSOFTCAR: + return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *)arg); + + case TIOCSSOFTCAR: + if (get_user (arg, (unsigned long *)arg)) + return -EFAULT; + + tty->termios->c_cflag = (tty->termios->c_cflag & ~CLOCAL) | + (arg ? CLOCAL : 0); + return 0; + + case TIOCGSERIAL: + return get_serial_info(info, (struct serial_struct *)arg); + + case TIOCSSERIAL: + return set_serial_info(info, (struct serial_struct *) arg); + + case TIOCSERGETLSR: /* Get line status register */ + return get_lsr_info (info, (unsigned int *)arg); + + case TIOCSERGSTRUCT: + return copy_to_user((struct dz_serial *)arg, info, + sizeof(struct dz_serial)) ? -EFAULT : 0; + + default: + return -ENOIOCTLCMD; + } + + return 0; +} + +static void dz_set_termios (struct tty_struct *tty, + struct termios *old_termios) +{ + struct dz_serial *info = (struct dz_serial *)tty->driver_data; + + if (tty->termios->c_cflag == old_termios->c_cflag) + return; + + change_speed (info); + + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + dz_start(tty); + } +} + +/* + * ------------------------------------------------------------ + * dz_close() + * + * This routine is called when the serial port gets closed. First, we + * wait for the last remaining data to be sent. Then, we turn off + * the transmit enable and receive enable flags. + * ------------------------------------------------------------ + */ +static void dz_close(struct tty_struct *tty, struct file *filp) +{ + struct dz_serial * info = (struct dz_serial *)tty->driver_data; + unsigned long flags; + + if (!info) + return; + + save_and_cli(flags); + + if (tty_hung_up_p(filp)) { + restore_flags(flags); + return; + } + + if ((tty->count == 1) && (info->count != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty structure + * will be freed. Info->count should always be one in these + * conditions. If it's greater than one, we've got real + * problems, since it means the serial port won't be shutdown. + */ + printk("dz_close: bad serial port count; tty->count is 1, " + "info->count is %d\n", info->count); + info->count = 1; + } + + if (--info->count < 0) { + printk("ds_close: bad serial port count for ttyS%02d: %d\n", + info->line, info->count); + info->count = 0; + } + + if (info->count) { + restore_flags(flags); + return; + } + info->flags |= DZ_CLOSING; + /* + * Now we wait for the transmit buffer to clear; and we notify the line + * discipline to only process XON/XOFF characters. + */ + tty->closing = 1; + + if (info->closing_wait != DZ_CLOSING_WAIT_NONE) + tty_wait_until_sent(tty, info->closing_wait); + + /* + * At this point we stop accepting input. To do this, we disable the + * receive line status interrupts. + */ + shutdown(info); + + if (tty->driver->flush_buffer) + tty->driver->flush_buffer (tty); + if (tty->ldisc.flush_buffer) + tty->ldisc.flush_buffer (tty); + tty->closing = 0; + info->event = 0; + info->tty = 0; + + tty_ldisc_flush(tty); + + if (info->blocked_open) { + if (info->close_delay) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(info->close_delay); + } + wake_up_interruptible(&info->open_wait); + } + + info->flags &= ~(DZ_NORMAL_ACTIVE | DZ_CLOSING); + wake_up_interruptible(&info->close_wait); + + restore_flags(flags); +} + +/* + * dz_hangup () --- called by tty_hangup() when a hangup is signaled. + */ +static void dz_hangup (struct tty_struct *tty) +{ + struct dz_serial *info = (struct dz_serial *) tty->driver_data; + + dz_flush_buffer(tty); + shutdown(info); + info->event = 0; + info->count = 0; + info->flags &= ~DZ_NORMAL_ACTIVE; + info->tty = 0; + wake_up_interruptible(&info->open_wait); +} + +/* + * ------------------------------------------------------------ + * rs_open() and friends + * ------------------------------------------------------------ + */ +static int block_til_ready(struct tty_struct *tty, struct file *filp, + struct dz_serial *info) +{ + DECLARE_WAITQUEUE(wait, current); + int retval; + int do_clocal = 0; + + /* + * If the device is in the middle of being closed, then block + * until it's done, and then try again. + */ + if (info->flags & DZ_CLOSING) { + interruptible_sleep_on(&info->close_wait); + return -EAGAIN; + } + + /* + * If non-blocking mode is set, or the port is not enabled, then make + * the check up front and then exit. + */ + if ((filp->f_flags & O_NONBLOCK) || + (tty->flags & (1 << TTY_IO_ERROR))) { + info->flags |= DZ_NORMAL_ACTIVE; + + return 0; + } + + if (tty->termios->c_cflag & CLOCAL) + do_clocal = 1; + + /* + * Block waiting for the carrier detect and the line to become free + * (i.e., not in use by the callout). While we are in this loop, + * info->count is dropped by one, so that dz_close() knows when to free + * things. We restore it upon exit, either normal or abnormal. + */ + retval = 0; + add_wait_queue(&info->open_wait, &wait); + + info->count--; + info->blocked_open++; + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + if (tty_hung_up_p (filp) || !(info->is_initialized)) { + retval = -EAGAIN; + break; + } + if (!(info->flags & DZ_CLOSING) && do_clocal) + break; + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + schedule(); + } + + current->state = TASK_RUNNING; + remove_wait_queue (&info->open_wait, &wait); + if (!tty_hung_up_p(filp)) + info->count++; + info->blocked_open--; + + if (retval) + return retval; + info->flags |= DZ_NORMAL_ACTIVE; + return 0; +} + +/* + * This routine is called whenever a serial port is opened. It + * enables interrupts for a serial port. It also performs the + * serial-specific initialization for the tty structure. + */ +static int dz_open (struct tty_struct *tty, struct file *filp) +{ + struct dz_serial *info; + int retval, line; + + line = tty->index; + + if ((line < 0) || (line >= DZ_NB_PORT)) + return -ENODEV; + + info = lines[line]; + info->count++; + + tty->driver_data = info; + info->tty = tty; + + /* + * Start up serial port + */ + retval = startup (info); + if (retval) + return retval; + + retval = block_til_ready (tty, filp, info); + if (retval) + return retval; + + return 0; +} + +static void show_serial_version (void) +{ + printk("%s%s\n", dz_name, dz_version); +} + +#ifdef CONFIG_SERIO +static spinlock_t dz_serio_lock = SPIN_LOCK_UNLOCKED; + +static int +dz_serio_write (struct serio *serio, unsigned char one_byte) +{ + struct dz_serial *info = serio->port_data; + unsigned long flags; + + spin_lock_irqsave (&dz_serio_lock, flags); + dz_write (info->tty, &one_byte, sizeof (one_byte)); + spin_unlock_irqrestore (&dz_serio_lock, flags); + + return 0; +} + +static int +dz_serio_open (struct serio *serio) +{ + struct dz_serial *info = serio->port_data; + unsigned long flags; + int ret; + + spin_lock_irqsave (&dz_serio_lock, flags); + if (info->serio_opened == 0) { + info->serio_opened = 1; + ret = 0; + } else + ret = -EBUSY; + spin_unlock_irqrestore (&dz_serio_lock, flags); + + return ret; +} + +static void +dz_serio_close (struct serio *serio) +{ + struct dz_serial *info = serio->port_data; + unsigned long flags; + + spin_lock_irqsave (&dz_serio_lock, flags); + info->serio_opened = 0; + spin_unlock_irqrestore (&dz_serio_lock, flags); +} +#endif /* CONFIG_SERIO */ + +static struct tty_driver *serial_driver; + +static struct tty_operations serial_ops = { + .open = dz_open, + .close = dz_close, + .write = dz_write, + .flush_chars = dz_flush_chars, + .write_room = dz_write_room, + .chars_in_buffer = dz_chars_in_buffer, + .flush_buffer = dz_flush_buffer, + .ioctl = dz_ioctl, + .throttle = dz_throttle, + .unthrottle = dz_unthrottle, + .send_xchar = dz_send_xchar, + .set_termios = dz_set_termios, + .stop = dz_stop, + .start = dz_start, + .hangup = dz_hangup, +}; + +static int __init dz_probe(struct vsbus_device *vsbus_dev) +{ + int i, flags; + unsigned long base_addr; + struct dz_serial *info; + + printk("dz_probe: name = %s, base = 0x%08x, irqindex = %d\n", + vsbus_dev->dev.bus_id, vsbus_dev->phys_base, vsbus_dev->vsbus_irq); + + serial_driver = alloc_tty_driver(DZ_NB_PORT); + if (!serial_driver) + return -ENOMEM; + + show_serial_version(); + + serial_driver->owner = THIS_MODULE; + serial_driver->devfs_name = "tts/"; + serial_driver->name = "ttyS"; + serial_driver->major = TTY_MAJOR; + serial_driver->minor_start = 64; + serial_driver->type = TTY_DRIVER_TYPE_SERIAL; + serial_driver->subtype = SERIAL_TYPE_NORMAL; + serial_driver->init_termios = tty_std_termios; + serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | + CLOCAL; + serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS; + tty_set_operations(serial_driver, &serial_ops); + + if (tty_register_driver(serial_driver)) + panic("Couldn't register serial driver\n"); + + /* FIXME: check this for NULL */ + base_addr = (unsigned long) ioremap(vsbus_dev->phys_base, 16); + + local_irq_save(flags); + for (i=0; i < DZ_NB_PORT; i++) { + info = &multi[i]; + lines[i] = info; + info->magic = SERIAL_MAGIC; + + info->port = base_addr; + info->line = i; + info->tty = 0; + info->close_delay = 50; + info->closing_wait = 3000; + info->x_char = 0; + info->event = 0; + info->count = 0; + info->blocked_open = 0; + INIT_WORK(&info->tqueue, do_softint, info); + INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); + init_waitqueue_head(&info->open_wait); + init_waitqueue_head(&info->close_wait); + + /* + * If we are pointing to address zero then punt - not correctly + * set up in setup.c to handle this. + */ + if (! info->port) + return 0; + + printk("ttyS%02d at 0x%04x\n", info->line, info->port); + + tty_register_device(serial_driver, info->line, NULL); + +#ifdef CONFIG_SERIO + memset (&info->serio, 0, sizeof (struct serio)); + info->serio.port_data = info; + info->serio.id.type = SERIO_RS232; + + switch (i) { + case DZ_KEYBOARD: + info->serio.id.type |= SERIO_LKKBD; + sprintf (info->serio.name, "dzkbd"); + sprintf (info->serio.phys, "dz/line0"); + break; + case DZ_MOUSE: + info->serio.id.type |= SERIO_VSXXXAA; + sprintf (info->serio.name, "dzmouse"); + sprintf (info->serio.phys, "dz/line1"); + break; + case DZ_MODEM: + sprintf (info->serio.name, "dz"); + sprintf (info->serio.phys, "dz/line2"); + break; + case DZ_PRINTER: + sprintf (info->serio.name, "dz"); + sprintf (info->serio.phys, "dz/line3"); + break; + } + + info->serio.open = dz_serio_open; + info->serio.close = dz_serio_close; + info->serio.write = dz_serio_write; + + serio_register_port (&info->serio); +#endif /* CONFIG_SERIO */ + } + + /* Reset the chip */ +#ifndef CONFIG_SERIAL_CONSOLE + { + int tmp; + dz_out(info, DZ_CSR, DZ_CLR); + while ((tmp = dz_in(info,DZ_CSR)) & DZ_CLR); + + /* Enable scanning */ + dz_out(info, DZ_CSR, DZ_MSE); + } +#endif + + /* + * Order matters here... the trick is that flags is updated... in + * request_irq - to immediatedly obliterate it is unwise. + */ + local_irq_restore(flags); + + /* The bus-specific IRQ we are handed in the vsbus_dev + * structure is the TX interrupt. The RX is always the + * next one up. + */ + printk("dz.c: using irq rx %d(bus %d), irq tx %d(bus %d)\n", + vsbus_irqindex_to_irq(vsbus_dev->vsbus_irq + 1), + vsbus_dev->vsbus_irq + 1, + vsbus_irqindex_to_irq(vsbus_dev->vsbus_irq), + vsbus_dev->vsbus_irq); + + if (vsbus_request_irq (vsbus_dev->vsbus_irq, dz_interrupt_tx, SA_INTERRUPT, "DZ-TX", lines[0])) + panic ("Unable to register DZ TX interrupt\n"); + if (vsbus_request_irq (vsbus_dev->vsbus_irq + 1, dz_interrupt_rx, SA_INTERRUPT, "DZ-RX", lines[0])) + panic ("Unable to register DZ RX interrupt\n"); + + /* enable scanning */ + dz_out(info, DZ_CSR, DZ_MSE); + + return 0; +} + +#ifdef CONFIG_SERIAL_CONSOLE + +static void dz_console_put_char (unsigned char ch) +{ + unsigned long flags; + int loops = 2500; + unsigned short tmp = ch; + unsigned short tcr, mask; + /* + * this code sends stuff out to serial device - spinning its wheels and + * waiting. + */ + + /* force the issue - point it at lines[3]*/ + dz_console = &multi[CONSOLE_LINE]; + + save_and_cli(flags); + + mask = 1 << dz_console->line; + tcr = dz_in (dz_console, DZ_TCR); /* read the TX flag */ + dz_out(dz_console, DZ_TCR, mask); + + /* spin our wheels */ + while (((dz_in(dz_console, DZ_CSR) & DZ_TRDY) != DZ_TRDY) && loops--) + ; + + /* Actually transmit the character. */ + dz_out(dz_console, DZ_TDR, tmp); + + /* spin our wheels */ + while (((dz_in(dz_console,DZ_CSR) & DZ_TRDY) != DZ_TRDY) && loops--) + ; + dz_out(dz_console, DZ_TCR, tcr); + + restore_flags(flags); +} + +/* + * ------------------------------------------------------------------- + * dz_console_print () + * + * dz_console_print is registered for printk. + * The console must be locked when we get here. + * ------------------------------------------------------------------- + */ +static void dz_console_print (struct console *cons, + const char *str, + unsigned int count) +{ +#ifdef DEBUG_DZ + prom_printf((char *)str); +#endif + while (count--) { + if (*str == '\n') + dz_console_put_char('\r'); + dz_console_put_char(*str++); + } +} + +static struct tty_driver *dz_console_device(struct console *c, int *index) +{ + *index = c->index; + return serial_driver; +} + +static int __init dz_console_setup(struct console *co, char *options) +{ + int baud = 9600; + int bits = 8; + int parity = 'n'; + int cflag = CREAD | HUPCL | CLOCAL; + char *s; + unsigned short mask,tmp; + + if (options) { + baud = simple_strtoul(options, NULL, 10); + s = options; + while (*s >= '0' && *s <= '9') + s++; + if (*s) + parity = *s++; + if (*s) + bits = *s - '0'; + } + + /* + * Now construct a cflag setting. + */ + switch (baud) { + case 1200: + cflag |= DZ_B1200; + break; + case 2400: + cflag |= DZ_B2400; + break; + case 4800: + cflag |= DZ_B4800; + break; + case 9600: + default: + cflag |= DZ_B9600; + break; + } + switch (bits) { + case 7: + cflag |= DZ_CS7; + break; + default: + case 8: + cflag |= DZ_CS8; + break; + } + switch(parity) { + case 'o': + case 'O': + cflag |= DZ_PARODD; + break; + case 'e': + case 'E': + cflag |= DZ_PARENB; + break; + } + co->cflag = cflag; + + /* TOFIX: force to console line */ + dz_console = &multi[CONSOLE_LINE]; + dz_console->port = (unsigned long)dz11_addr; + dz_console->line = CONSOLE_LINE; + + /* This line locks up Dave Airlie's VS3100m38 after HALT */ + /* dz_out(dz_console, DZ_CSR, DZ_CLR); */ + + while ((tmp = dz_in(dz_console,DZ_CSR)) & DZ_CLR) + ; + + /* enable scanning */ + dz_out(dz_console, DZ_CSR, DZ_MSE); + + /* Set up flags... */ + dz_console->cflags = 0; + dz_console->cflags |= DZ_B9600; + dz_console->cflags |= DZ_CS8; + dz_console->cflags |= DZ_PARENB; + dz_out (dz_console, DZ_LPR, dz_console->cflags); + + mask = 1 << dz_console->line; + tmp = dz_in (dz_console, DZ_TCR); /* read the TX flag */ + if (!(tmp & mask)) { + tmp |= mask; /* set the TX flag */ + dz_out (dz_console, DZ_TCR, tmp); + } + + return 0; +} + +static struct console dz_sercons = { + .name = "ttyS", + .write = dz_console_print, + .device = dz_console_device, + .setup = dz_console_setup, + .flags = CON_CONSDEV, + .index = CONSOLE_LINE, +}; + +void __init dz_serial_console_init(void) +{ + register_console(&dz_sercons); +} + +#endif /* ifdef CONFIG_SERIAL_CONSOLE */ + +static struct vsbus_driver dz_driver = { + .probe = dz_probe, + .drv = { + .name = "dz", + }, +}; + +static int __init dz_init_new(void) +{ + return vsbus_register_driver(&dz_driver); +} + +module_init(dz_init_new); + +MODULE_LICENSE("GPL"); + diff -Nru a/drivers/vax/char/dz.h b/drivers/vax/char/dz.h --- a/drivers/vax/char/dz.h 1970-01-01 01:00:00 +++ b/drivers/vax/char/dz.h 2005-03-22 10:20:12 @@ -0,0 +1,255 @@ +/* + * dz.h: Serial port driver for DECStations and VAXstations equiped + * with the DZ chipset. + * + * Copyright (C) 1998 Olivier A. D. Lebaillif + * + * Email: olivier.lebaillif@ifrsys.com + * + */ +#ifndef DZ_SERIAL_H +#define DZ_SERIAL_H + +#include +#ifdef CONFIG_SERIO +#include +#endif /* CONFIG_SERIO */ + +/* + * Definitions for the Control and Status Received. + */ +#define DZ_TRDY 0x8000 /* Transmitter empty */ +#define DZ_TIE 0x4000 /* Transmitter Interrupt Enable */ +#define DZ_RDONE 0x0080 /* Receiver data ready */ +#define DZ_RIE 0x0040 /* Receive Interrupt Enable */ +#define DZ_MSE 0x0020 /* Master Scan Enable */ +#define DZ_CLR 0x0010 /* Master reset */ +#define DZ_MAINT 0x0008 /* Loop Back Mode */ + +/* + * Definitions for the Received buffer. + */ +#define DZ_RBUF_MASK 0x00FF /* Data Mask in the Receive Buffer */ +#define DZ_LINE_MASK 0x0300 /* Line Mask in the Receive Buffer */ +#define DZ_DVAL 0x8000 /* Valid Data indicator */ +#define DZ_OERR 0x4000 /* Overrun error indicator */ +#define DZ_FERR 0x2000 /* Frame error indicator */ +#define DZ_PERR 0x1000 /* Parity error indicator */ + +#define LINE(x) (x & DZ_LINE_MASK) >> 8 /* Get the line number from the input buffer */ +#define UCHAR(x) (unsigned char)(x & DZ_RBUF_MASK) + +/* + * Definitions for the Transmit Register. + */ +#define DZ_LINE_KEYBOARD 0x0001 +#define DZ_LINE_MOUSE 0x0002 +#define DZ_LINE_MODEM 0x0004 +#define DZ_LINE_PRINTER 0x0008 + +#define DZ_MODEM_DTR 0x0400 /* DTR for the modem line (2) */ + +/* + * Definitions for the Modem Status Register. + */ +#define DZ_MODEM_DSR 0x0200 /* DSR for the modem line (2) */ + +/* + * Definitions for the Transmit Data Register. + */ +#define DZ_BRK0 0x0100 /* Break assertion for line 0 */ +#define DZ_BRK1 0x0200 /* Break assertion for line 1 */ +#define DZ_BRK2 0x0400 /* Break assertion for line 2 */ +#define DZ_BRK3 0x0800 /* Break assertion for line 3 */ + +/* + * Definitions for the Line Parameter Register. + */ +#define DZ_KEYBOARD 0x0000 /* line 0 = keyboard */ +#define DZ_MOUSE 0x0001 /* line 1 = mouse */ +#define DZ_MODEM 0x0002 /* line 2 = modem */ +#define DZ_PRINTER 0x0003 /* line 3 = printer */ + +#define DZ_CSIZE 0x0018 /* Number of bits per byte (mask) */ +#define DZ_CS5 0x0000 /* 5 bits per byte */ +#define DZ_CS6 0x0008 /* 6 bits per byte */ +#define DZ_CS7 0x0010 /* 7 bits per byte */ +#define DZ_CS8 0x0018 /* 8 bits per byte */ + +#define DZ_CSTOPB 0x0020 /* 2 stop bits instead of one */ + +#define DZ_PARENB 0x0040 /* Parity enable */ +#define DZ_PARODD 0x0080 /* Odd parity instead of even */ + +#define DZ_CBAUD 0x0E00 /* Baud Rate (mask) */ +#define DZ_B50 0x0000 +#define DZ_B75 0x0100 +#define DZ_B110 0x0200 +#define DZ_B134 0x0300 +#define DZ_B150 0x0400 +#define DZ_B300 0x0500 +#define DZ_B600 0x0600 +#define DZ_B1200 0x0700 +#define DZ_B1800 0x0800 +#define DZ_B2000 0x0900 +#define DZ_B2400 0x0A00 +#define DZ_B3600 0x0B00 +#define DZ_B4800 0x0C00 +#define DZ_B7200 0x0D00 +#define DZ_B9600 0x0E00 + +#define DZ_CREAD 0x1000 /* Enable receiver */ +#define DZ_RXENAB 0x1000 /* enable receive char */ +/* + * Addresses for the DZ registers + */ +#ifdef CONFIG_VAX +#define DZ_CSR 0x00 /* Control and Status Register */ +#define DZ_RBUF 0x04 /* Receive Buffer */ +#define DZ_LPR 0x04 /* Line Parameters Register */ +#define DZ_TCR 0x08 /* Transmitter Control Register */ +#define DZ_MSR 0x0c /* Modem Status Register */ +#define DZ_TDR 0x0c /* Transmit Data Register */ +#else +#define DZ_CSR 0x00 /* Control and Status Register */ +#define DZ_RBUF 0x08 /* Receive Buffer */ +#define DZ_LPR 0x08 /* Line Parameters Register */ +#define DZ_TCR 0x10 /* Transmitter Control Register */ +#define DZ_MSR 0x18 /* Modem Status Register */ +#define DZ_TDR 0x18 /* Transmit Data Register */ +#endif + +#ifdef CONFIG_VAX +#define DZ_NB_PORT 4 +#else +#define DZ_NB_PORT 4 +#endif + +#define DZ_XMIT_SIZE 4096 /* buffer size */ +#define WAKEUP_CHARS DZ_XMIT_SIZE/4 + +#define DZ_EVENT_WRITE_WAKEUP 0 + +#ifndef MIN +#define MIN(a,b) ((a) < (b) ? (a) : (b)) + +#define DZ_INITIALIZED 0x80000000 /* Serial port was initialized */ +#define DZ_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */ +#define DZ_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ +#define DZ_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ +#define DZ_CLOSING 0x08000000 /* Serial port is closing */ +#define DZ_CTS_FLOW 0x04000000 /* Do CTS flow control */ +#define DZ_CHECK_CD 0x02000000 /* i.e., CLOCAL */ + +#define DZ_CLOSING_WAIT_INF 0 +#define DZ_CLOSING_WAIT_NONE 65535 + +#define DZ_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */ +#define DZ_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */ +#define DZ_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */ + +struct dz_serial { + unsigned port; /* base address for the port */ + int type; + int flags; + int baud_base; + int blocked_open; + unsigned short close_delay; + unsigned short closing_wait; + unsigned short line; /* port/line number */ + unsigned short cflags; /* line configuration flag */ + unsigned short x_char; /* xon/xoff character */ + unsigned short read_status_mask; /* mask for read condition */ + unsigned short ignore_status_mask; /* mask for ignore condition */ + unsigned long event; /* mask used in BH */ + unsigned char *xmit_buf; /* Transmit buffer */ + int xmit_head; /* Position of the head */ + int xmit_tail; /* Position of the tail */ + int xmit_cnt; /* Count of the chars in the buffer */ + int count; /* indicates how many times it has been opened */ + int magic; + + struct async_icount icount; /* keep track of things ... */ + struct tty_struct *tty; /* tty associated */ + struct work_struct tqueue; /* Queue for BH */ + struct work_struct tqueue_hangup; + wait_queue_head_t open_wait; + wait_queue_head_t close_wait; + + unsigned char is_console; /* flag indicating a serial console */ + unsigned char is_initialized; +#ifdef CONFIG_SERIO + struct serio serio; + int serio_opened; +#endif /* CONFIG_SERIO */ +}; + +static struct dz_serial multi[DZ_NB_PORT]; /* Four serial lines in the DZ chip */ +static struct dz_serial *dz_console; + +/* + * tmp_buf is used as a temporary buffer by serial_write. We need to + * lock it in case the copy_from_user blocks while swapping in a page, + * and some other program tries to do a serial write at the same time. + * Since the lock will only come under contention when the system is + * swapping and available memory is low, it makes sense to share one + * buffer across all the serial ports, since it significantly saves + * memory if large numbers of serial ports are open. + */ +static unsigned char *tmp_buf; +static DECLARE_MUTEX(tmp_buf_sem); + +static char *dz_name = "DECstation DZ serial driver version "; +static char *dz_version = "1.02"; + +static inline unsigned short dz_in (struct dz_serial *, unsigned); +static inline void dz_out (struct dz_serial *, unsigned, unsigned short); + +static inline void dz_sched_event (struct dz_serial *, int); +static inline void receive_chars (struct dz_serial *); +static inline void transmit_chars (struct dz_serial *); +static inline void check_modem_status (struct dz_serial *); + +static void dz_stop (struct tty_struct *); +static void dz_start (struct tty_struct *); +#ifdef CONFIG_VAX +static irqreturn_t dz_interrupt_rx (int, void *, struct pt_regs *); +static irqreturn_t dz_interrupt_tx (int, void *, struct pt_regs *); +#else +static void dz_interrupt (int, void *, struct pt_regs *); +#endif +static void do_softint (void *); +static void do_serial_hangup (void *); +static void change_speed (struct dz_serial *); +static void dz_flush_chars (struct tty_struct *); +static void dz_console_print (struct console *, const char *, unsigned int); +static void dz_flush_buffer (struct tty_struct *); +static void dz_throttle (struct tty_struct *); +static void dz_unthrottle (struct tty_struct *); +static void dz_send_xchar (struct tty_struct *, char); +static void shutdown (struct dz_serial *); +static void send_break (struct dz_serial *, int); +static void dz_set_termios (struct tty_struct *, struct termios *); +static void dz_close (struct tty_struct *, struct file *); +static void dz_hangup (struct tty_struct *); +static void show_serial_version (void); + +static int dz_write (struct tty_struct *, const unsigned char *, int); +static int dz_write_room (struct tty_struct *); +static int dz_chars_in_buffer (struct tty_struct *); +static int startup (struct dz_serial *); +static int get_serial_info (struct dz_serial *, struct serial_struct *); +static int set_serial_info (struct dz_serial *, struct serial_struct *); +static int get_lsr_info (struct dz_serial *, unsigned int *); +static int dz_ioctl (struct tty_struct *, struct file *, unsigned int, unsigned long); +static int block_til_ready (struct tty_struct *, struct file *, struct dz_serial *); +static int dz_open (struct tty_struct *, struct file *); + +#ifdef MODULE +int init_module (void) +void cleanup_module (void) +#endif + +#endif + +#endif /* DZ_SERIAL_H */ diff -Nru a/drivers/vax/net/Makefile b/drivers/vax/net/Makefile --- a/drivers/vax/net/Makefile 1970-01-01 01:00:00 +++ b/drivers/vax/net/Makefile 2004-10-08 12:19:50 @@ -0,0 +1,8 @@ +# +# Makefile for the Linux/VAX network device drivers. +# + +obj-$(CONFIG_DELQA) += delqa.o +obj-$(CONFIG_VAX_LANCE) += lance.o +obj-$(CONFIG_VAX_SGEC) += sgec.o + diff -Nru a/drivers/vax/net/delqa-regs.h b/drivers/vax/net/delqa-regs.h --- a/drivers/vax/net/delqa-regs.h 1970-01-01 01:00:00 +++ b/drivers/vax/net/delqa-regs.h 2002-07-19 01:39:33 @@ -0,0 +1,88 @@ + +/* Register offsets */ + +#define DELQA_ADDR1 0 +#define DELQA_ADDR2 2 +#define DELQA_RCLL 4 /* loword of first RX descriptor addr */ +#define DELQA_RCLH 6 /* hiword of first RX descriptor addr */ +#define DELQA_XMTL 8 /* loword of first TX descriptor addr */ +#define DELQA_XMTH 10 /* hiword of first TX descriptor addr */ +#define DELQA_VECTOR 12 /* Q-bus interrupt vector */ +#define DELQA_CSR 14 /* control & status */ + + +/* Bits in CSR */ + +#define DELQA_CSR_RCV_ENABLE 0x0001 /* Receiver enable */ +#define DELQA_CSR_RESET 0x0002 /* Software reset */ +#define DELQA_CSR_NEX_MEM_INT 0x0004 /* Non-existent memory interrupt */ +#define DELQA_CSR_LOAD_ROM 0x0008 /* Load boot/diag from rom */ +#define DELQA_CSR_XL_INVALID 0x0010 /* Transmit list invalid */ +#define DELQA_CSR_RL_INVALID 0x0020 /* Receive list invalid */ +#define DELQA_CSR_INT_ENABLE 0x0040 /* Interrupt enable */ +#define DELQA_CSR_XMIT_INT 0x0080 /* Transmit interrupt */ +#define DELQA_CSR_ILOOP 0x0100 /* Internal loopback */ +#define DELQA_CSR_ELOOP 0x0200 /* External loopback */ +#define DELQA_CSR_STIM_ENABLE 0x0400 /* Sanity timer enable */ +#define DELQA_CSR_POWERUP 0x1000 /* Transceiver power on */ +#define DELQA_CSR_CARRIER 0x2000 /* Carrier detect */ +#define DELQA_CSR_RCV_INT 0x8000 /* Receiver interrupt */ + + +/* Bits in ADDR_HI field in descriptors */ + +#define DELQA_ADDRHI_VALID 0x8000 /* ADDRHI/LO are valid */ +#define DELQA_ADDRHI_CHAIN 0x4000 /* ADDRHI/LO points to next descriptor */ +#define DELQA_ADDRHI_EOMSG 0x2000 /* Buffer contains last byte of frame */ +#define DELQA_ADDRHI_SETUP 0x1000 /* Buffer contains a setup frame */ +#define DELQA_ADDRHI_ODDEND 0x0080 /* last byte not on word boundary */ +#define DELQA_ADDRHI_ODDBEGIN 0x0040 /* first byte not on word boundary */ + + + + +/* Bits in buffer descriptor field STATUS1 for transmit */ + +#define DELQA_TXSTS1_LASTNOT 0x8000 +#define DELQA_TXSTS1_ERRORUSED 0x4000 +#define DELQA_TXSTS1_LOSS 0x1000 +#define DELQA_TXSTS1_NOCARRIER 0x0800 +#define DELQA_TXSTS1_STE16 0x0400 +#define DELQA_TXSTS1_ABORT 0x0200 +#define DELQA_TXSTS1_FAIL 0x0100 +#define DELQA_TXSTS1_COUNT_MASK 0x00f0 +#define DELQA_TXSTS1_COUNT_SHIFT 4 + +/* Special value that signifies that descriptor is not yet used + by DELQA. The descriptor FLAG and STATUS1 fields both get + initialized to this value. */ +#define DELQA_NOTYET 0x8000 + +/* Bits in buffer descriptor field STATUS1 for transmit */ + +#define DELQA_TXSTS2_TDR_MASK 0x3fff +#define DELQA_TXSTS2_TDR_SHIFT 0 + + +/* Bits in buffer descriptor field STATUS1 for receive */ + +#define DELQA_RXSTS1_LASTNOT 0x8000 +#define DELQA_RXSTS1_ERRORUSED 0x4000 +#define DELQA_RXSTS1_ESETUP 0x2000 +#define DELQA_RXSTS1_DISCARD 0x1000 +#define DELQA_RXSTS1_RUNT 0x0800 +#define DELQA_RXSTS1_LEN_HI_MASK 0x0700 +#define DELQA_RXSTS1_LEN_HI_SHIFT 8 +#define DELQA_RXSTS1_FRAME 0x0004 +#define DELQA_RXSTS1_CRCERR 0x0002 +#define DELQA_RXSTS1_OVF 0x0001 + + +/* Bits in buffer descriptor field STATUS2 for receive */ + +#define DELQA_RXSTS2_LEN_LO1_MASK 0x00ff +#define DELQA_RXSTS2_LEN_LO1_SHIFT 0 +#define DELQA_RXSTS2_LEN_LO2_MASK 0xff00 +#define DELQA_RXSTS2_LEN_LO2_SHIFT 8 + + diff -Nru a/drivers/vax/net/delqa.c b/drivers/vax/net/delqa.c --- a/drivers/vax/net/delqa.c 1970-01-01 01:00:00 +++ b/drivers/vax/net/delqa.c 2005-10-21 09:45:22 @@ -0,0 +1,987 @@ +/* + * Quick-and-dirty driver for DELQA/DESQA (Q-bus ethernet adapters) + * + * (C) 2002-2004, Kenn Humborg + * + * TODO: Pre-allocate the Q-bus mapping registers for TX at init time and + * re-use them, rather than allocating them for each packet. This + * would remove the only failure possibility from delqa_start_xmit(). + * + * TODO: Allow multiple DELQAs at different base addresses. + * + * TODO: Reset DELQA on q-bus memory access error (is this the right + * thing to do?). + * + * TODO: Implement delqa_tx_timeout(). + * + * TODO: Handle multicast addresses and PROMISC flag in format_setup_frame(). + * + * TODO: Implement delqa_close(). + */ + +#include +#include +#include +#include + +#include + +#include +#include "delqa-regs.h" + +#define DELQA_DEBUG_REGWR 0 +#define DELQA_DEBUG_CSR 0 +#define DELQA_DEBUG_DESC 0 +#define DELQA_DEBUG_PKT 0 + +/* Where does the DELQA traditionally live on the bus? */ +#define DELQA_CSR_BUS_ADDR 0774440 + +/* FIXME: Are these numbers OK? These are what NetBSD uses. */ +#define RXDESCS 30 +#define TXDESCS 60 + +#define RXBUFSIZE 2048 + +struct delqa_bufdesc { + unsigned short flag; + unsigned short addr_hi; + unsigned short addr_lo; + signed short buflen; + unsigned short status1; + unsigned short status2; +}; + +struct delqa_descs { + struct delqa_bufdesc rxdesc[RXDESCS+1]; + struct delqa_bufdesc txdesc[TXDESCS+1]; +}; + +struct delqa_private { + unsigned char __iomem *base; + unsigned int qbus_vector; + struct net_device_stats stats; + struct delqa_descs *descs; + struct vax_dmamap *desc_map; /* DMA mapping for delqa_descs structure */ + struct vax_dmamap *rx_map[RXDESCS]; /* DMA mappings for each RX buffer */ + struct vax_dmamap *tx_map[TXDESCS]; /* DMA mappings for each TX buffer */ + struct sk_buff *tx_skb[TXDESCS]; /* We TX direct from the SKB */ + unsigned char setup_frame[128]; + unsigned char setup_frame_len; + unsigned int next_tx_free; /* Only written by mainline code */ + unsigned int next_tx_pending; /* Only written by init and interrupt code */ + unsigned int next_rx; + unsigned int last_tdr; /* Last Time Domain Reflectometer value on TX */ + spinlock_t lock; + struct device *parent; /* The QBUS on which we live */ +}; + +#define LOWORD(x) ((int)(x) & 0xffff) +#define HIWORD(x) (((int)(x)>>16) & 0xffff) + +static unsigned short int read_reg(struct delqa_private *priv, unsigned int offset) +{ + volatile unsigned short int *p; + + p = (volatile unsigned short *)(priv->base + offset); + + return *p; +} + +static void write_reg(struct delqa_private *priv, unsigned int offset, unsigned short int value) +{ + volatile unsigned short int *p; +#if DELQA_DEBUG_REGWR + char *reg[8] = { "ADDR1", "ADDR2", "RCCL", "RCLH", "XMTL", "XMTH", "VECTOR", "CSR"}; + printk("delqa write_reg: offset %02d(%s) value %04x\n", offset, reg[offset/2], value); +#endif + + p = (volatile unsigned short *)(priv->base + offset); + *p = value; +} + + +static void dump_csr(char *msg, struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *)dev->priv; + unsigned short csr = read_reg(priv, DELQA_CSR); + + printk("%s: %s: CSR %04x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + dev->name, + msg == NULL ? "" : msg, csr, + (csr & DELQA_CSR_RCV_INT) ? " RI" : "", + (csr & DELQA_CSR_CARRIER) ? " CA" : "", + (csr & DELQA_CSR_POWERUP) ? " OK" : "", + (csr & DELQA_CSR_STIM_ENABLE) ? " SE" : "", + (csr & DELQA_CSR_ELOOP) ? " EL" : "", + (csr & DELQA_CSR_ILOOP) ? "" : " IL", /* active low */ + (csr & DELQA_CSR_XMIT_INT) ? " XI" : "", + (csr & DELQA_CSR_INT_ENABLE) ? " IE" : "", + (csr & DELQA_CSR_RL_INVALID) ? " RL" : "", + (csr & DELQA_CSR_XL_INVALID) ? " XL" : "", + (csr & DELQA_CSR_LOAD_ROM) ? " BD" : "", + (csr & DELQA_CSR_NEX_MEM_INT) ? " NI" : "", + (csr & DELQA_CSR_RESET) ? " SR" : "", + (csr & DELQA_CSR_RCV_ENABLE) ? " RE" : ""); +} + +/* + . -> Invalid + + -> Valid, not yet used by DELQA + * -> Valid, owned by DELQA (in progress) + - -> Valid, processed by DELQA - no errors + x -> Valid, processed by DELQA - with errors + c -> Valid, chain descriptor +*/ + +static void dump_descs(struct net_device *dev) +{ + unsigned int i; + struct delqa_private *priv = (struct delqa_private *)dev->priv; + struct delqa_bufdesc *desc; + + printk("%s: TX free=%02d pending=%02d ", dev->name, priv->next_tx_free, priv->next_tx_pending); + for (i=0; idescs->txdesc + i; + if (desc->addr_hi & DELQA_ADDRHI_CHAIN) { + printk("c"); + } else if (desc->addr_hi & DELQA_ADDRHI_VALID) { + /* VALID bit set */ + switch (desc->status1 & (DELQA_TXSTS1_LASTNOT|DELQA_TXSTS1_ERRORUSED)) { + case 0: + printk("-"); + break; + case DELQA_TXSTS1_ERRORUSED: + printk("x"); + break; + case DELQA_TXSTS1_LASTNOT: + if (desc->flag & 0x4000) { + printk("*"); + } else { + printk("+"); + } + break; + case DELQA_TXSTS1_LASTNOT|DELQA_TXSTS1_ERRORUSED: + /* Don't expect this, since we never break packets across buffers */ + printk("?"); + break; + } + } else { + printk("."); + } + } + printk("\n"); + + printk("%s: RX next=%02d ", dev->name, priv->next_rx); + for (i=0; idescs->rxdesc + i; + if (desc->addr_hi & DELQA_ADDRHI_CHAIN) { + printk("c"); + } else if (desc->addr_hi & DELQA_ADDRHI_VALID) { + /* VALID bit set */ + switch (desc->status1 & (DELQA_RXSTS1_LASTNOT|DELQA_RXSTS1_ERRORUSED)) { + case 0: + printk("-"); + break; + case DELQA_RXSTS1_ERRORUSED: + printk("x"); + break; + case DELQA_RXSTS1_LASTNOT: + if (desc->flag & 0x4000) { + printk("*"); + } else { + printk("+"); + } + break; + case DELQA_RXSTS1_LASTNOT|DELQA_RXSTS1_ERRORUSED: + /* Don't expect this, since we never break packets across buffers */ + printk("?"); + break; + } + } else { + printk("."); + } + } + printk("\n"); +} + +static void delqa_tx_interrupt(struct net_device *dev, struct delqa_private *priv) +{ + struct delqa_bufdesc *desc; + int desc_freed = 0; + unsigned int tdr; + unsigned int collisions; + + /* Get first descriptor waiting to be "taken back" from + the DELQA */ + desc = priv->descs->txdesc + priv->next_tx_pending; + + while (desc->status1 != DELQA_NOTYET) { + +#if DELQA_DEBUG_PKT + printk("TX desc %d, status1=%04x, status2=%04x\n", priv->next_tx_pending, + desc->status1, desc->status2); +#endif + + tdr = (desc->status2 & DELQA_TXSTS2_TDR_MASK) >> DELQA_TXSTS2_TDR_SHIFT; + collisions = (desc->status1 & DELQA_TXSTS1_COUNT_MASK) >> DELQA_TXSTS1_COUNT_SHIFT; + + if (desc->status1 & DELQA_TXSTS1_ERRORUSED) { + + priv->stats.tx_errors++; + + if (desc->status1 & DELQA_TXSTS1_LOSS) { + printk(KERN_WARNING "%s: carrier lost on " + "transmit - ethernet cable " + "problem?\n", dev->name); + } + if (desc->status1 & DELQA_TXSTS1_NOCARRIER) { + printk(KERN_WARNING "%s: no carrier on transmit" + " - transceiver or transceiver " + "cable problem?\n", dev->name); + } + if (desc->status1 & DELQA_TXSTS1_ABORT) { + if (tdr == priv->last_tdr) { + printk(KERN_WARNING "%s: excessive " + "collisions on transmit\n", + dev->name); + } else { + printk(KERN_WARNING"%s: excessive " + "collisions on transmit" + " - cable fault at " + "TDR=%d\n", dev->name, + tdr); + } + if (collisions == 0) { + /* Collision counter overflowed */ + priv->stats.collisions += 16; + } + } + } else { + if (desc->addr_hi & DELQA_ADDRHI_SETUP) { + /* Don't count setup frames in stats */ + } else { + /* Packet got onto the wire */ + priv->stats.tx_packets++; + priv->stats.tx_bytes += desc->buflen * 2; + } + } + + priv->stats.collisions += collisions; + + priv->last_tdr = tdr; + + if (desc->addr_hi & DELQA_ADDRHI_SETUP) { + /* Setup frame - no associated skb */ + } else { + dev_kfree_skb_irq(priv->tx_skb[priv->next_tx_pending]); + } + + /* clear VALID bit */ + desc->addr_hi = 0; + + /* reclaim descriptor */ + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + desc->status2 = 0; + + /* Free the mapping registers */ + qbus_unmap(priv->parent, priv->tx_map[priv->next_tx_pending]); + + /* At least one descriptor freed up */ + desc_freed = 1; + + priv->next_tx_pending++; + if (priv->next_tx_pending == TXDESCS) { + priv->next_tx_pending = 0; + } + desc = priv->descs->txdesc + priv->next_tx_pending; + } + + if (netif_queue_stopped(dev) && desc_freed) { + netif_wake_queue(dev); + } +} + +static void delqa_rx_interrupt(struct net_device *dev, struct delqa_private *priv) +{ + struct delqa_bufdesc *desc; + unsigned int len; + struct sk_buff *skb; + unsigned int busaddr; + + /* Get first descriptor waiting to be "taken back" from + the DELQA */ + desc = priv->descs->rxdesc + priv->next_rx; + + while (desc->status1 != DELQA_NOTYET) { + +#if DELQA_DEBUG_PKT + printk("RX desc %d, status1=%04x, status2=%04x, len=%d\n", priv->next_rx, + desc->status1, desc->status2, len); +#endif + + if (desc->status1 & DELQA_RXSTS1_ESETUP) { + /* This is the loopback of a setup frame - ignore */ + } if (desc->status1 & DELQA_RXSTS1_ERRORUSED) { + + /* Error while receiving */ + priv->stats.rx_errors++; + + } else { + /* Good frame received */ + + unsigned int len_hi; + unsigned int len_lo1; + unsigned int len_lo2; + + len_hi = (desc->status1 & DELQA_RXSTS1_LEN_HI_MASK) + >> DELQA_RXSTS1_LEN_HI_SHIFT; + + len_lo1 = (desc->status2 & DELQA_RXSTS2_LEN_LO1_MASK) + >> DELQA_RXSTS2_LEN_LO1_SHIFT; + + len_lo2 = (desc->status2 & DELQA_RXSTS2_LEN_LO2_MASK) + >> DELQA_RXSTS2_LEN_LO2_SHIFT; + + if (len_lo1 != len_lo2) { + printk("%s: DELQA status2 bytes don't match\n", dev->name); + } + + len = (len_hi << 8) + len_lo1 + 60; + + skb = dev_alloc_skb(len + 2); + if (skb == NULL) { + printk("%s: cannot allocate skb, dropping packet\n", dev->name); + priv->stats.rx_dropped++; + } else { + priv->stats.rx_packets++; + priv->stats.rx_bytes += len; + + skb->dev = dev; + skb_reserve(skb, 2); + skb_put(skb, len); + memcpy(skb->data, + priv->rx_map[priv->next_rx]->virtaddr, len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + } + } + + /* reclaim descriptor */ + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + desc->status2 = 1; /* High and low bytes must be different */ + + priv->next_rx++; + if (priv->next_rx == RXDESCS) { + priv->next_rx = 0; + } + desc = priv->descs->rxdesc + priv->next_rx; + } + + /* DEQNA manual errata sheet states that we must check for an + invalid receive list before leaving the ISR and reset the + buffer list if it is invalid */ + if (read_reg(priv, DELQA_CSR) & DELQA_CSR_RL_INVALID) { + + printk("%s: receive list invalid - resetting\n", dev->name); + /* The descriptor pointed to by next_rx must be the + first available descriptor. This is because: + + o The DELQA doesn't touch the receive list when + RL_INVALID is set. + + o The while() loop above stops when next_rx points + to a 'NOTYET' descriptor. */ + + busaddr = priv->desc_map->busaddr + + offsetof(struct delqa_descs, rxdesc[priv->next_rx]); + + write_reg(priv, DELQA_RCLL, LOWORD(busaddr)); + write_reg(priv, DELQA_RCLH, HIWORD(busaddr)); + } + +} + +static irqreturn_t delqa_interrupt(const int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct delqa_private *priv = (struct delqa_private *)dev->priv; + unsigned int csr; + unsigned int newcsr; + + spin_lock(&priv->lock); + + csr = read_reg(priv, DELQA_CSR); + +#if DELQA_DEBUG_CSR + dump_csr("delqa_interrupt entry", dev); +#endif +#if DELQA_DEBUG_DESC + dump_descs(dev); +#endif + + newcsr = DELQA_CSR_ILOOP | DELQA_CSR_RCV_ENABLE | DELQA_CSR_INT_ENABLE; + + if (csr & DELQA_CSR_XMIT_INT) { + /* Either memory read error or tx interrupt */ + if (csr & DELQA_CSR_NEX_MEM_INT) { + dump_csr("Q-bus memory error", dev); + dump_descs(dev); + qbus_dumpmap(priv->parent); + + /* FIXME: what should we do here? */ + panic("DELQA bus memory access error"); + } else { + newcsr |= DELQA_CSR_XMIT_INT; + delqa_tx_interrupt(dev, priv); + } + } + + if (csr & DELQA_CSR_RCV_INT) { + newcsr |= DELQA_CSR_RCV_INT; + delqa_rx_interrupt(dev, priv); + } + + /* Clear RX and TX interrupt bits to allow further interrupts. We + also enable the receiver and turn off the internal loopback at + this point. */ + write_reg(priv, DELQA_CSR, newcsr); + +#if DELQA_DEBUG_CSR + dump_csr("delqa_interrupt exit", dev); +#endif + + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +static int delqa_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *)dev->priv; + struct delqa_bufdesc *desc; + unsigned int len; + unsigned int i; + unsigned int busaddr; + unsigned int csr; + unsigned int flags; + + if (skb->len < ETH_ZLEN) { + struct sk_buff *new_skb; + + new_skb = skb_copy_expand(skb, 0, ETH_ZLEN - skb->len, GFP_ATOMIC); + if (new_skb == NULL) { + return -ENOMEM; + } + + memset(skb_put(new_skb, ETH_ZLEN - skb->len), 0, + ETH_ZLEN - skb->len); + + dev_kfree_skb(skb); + + /* We must not return a failure after this point, since that + would result in the caller trying to free the skb that we've + just freed. */ + + skb = new_skb; + } + + spin_lock_irqsave(&priv->lock, flags); + + i = priv->next_tx_free; + priv->next_tx_free++; + if (priv->next_tx_free == TXDESCS) { + priv->next_tx_free = 0; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + desc = priv->descs->txdesc + i; + + /* FIXME: These mapping registers MUST be allocated at init time + to prevent any possibility of failure here - see above comment */ + + priv->tx_map[i] = qbus_alloc_mapregs(priv->parent, skb->data, skb->len); + if (priv->tx_map[i] == NULL) { + /* FIXME: What should I do here? */ + panic("delqa_start_xmit: no map reg"); + } + + priv->tx_skb[i] = skb; + + busaddr = priv->tx_map[i]->busaddr; + + desc->addr_lo = LOWORD(busaddr); + desc->addr_hi = HIWORD(busaddr) | DELQA_ADDRHI_EOMSG; + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + desc->status2 = 0; + + /* Work out the length and alignment stuff */ + + len = skb->len; + if ((len & 1) || ((unsigned long)(skb->data) & 1)) { + len += 2; + } + if ((unsigned long)(skb->data) & 1) { + desc->addr_hi |= DELQA_ADDRHI_ODDBEGIN; + } + if ((unsigned long)(skb->data + len) & 1) { + desc->addr_hi |= DELQA_ADDRHI_ODDEND; + } + desc->buflen = -(len/2); + + /* Set the "go" bit on this descriptor */ + desc->addr_hi |= DELQA_ADDRHI_VALID; + +#if DELQA_DEBUG_DESC + dump_descs(dev); +#endif + + spin_lock_irqsave(&priv->lock, flags); + + csr = read_reg(priv, DELQA_CSR); + if (csr & DELQA_CSR_XL_INVALID) { + + /* Get Q-bus address of first TX descriptor */ + busaddr = priv->desc_map->busaddr + offsetof(struct delqa_descs, txdesc[i]); + + write_reg(priv, DELQA_XMTL, LOWORD(busaddr)); + write_reg(priv, DELQA_XMTH, HIWORD(busaddr)); + } + + /* Check if the 'next' descriptor is actually free */ + desc = priv->descs->txdesc + priv->next_tx_free; + + if (desc->addr_hi & DELQA_ADDRHI_VALID) { + /* All descriptors in use - stop tx queue */ + netif_stop_queue(dev); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/* FIXME: implement this */ + +static void delqa_tx_timeout(struct net_device *dev) +{ + printk("delqa_tx_timeout not implemented\n"); +HALT; +} + +static void store_setup_address(unsigned int index, unsigned char *addr, + unsigned char *setup_frame) +{ + unsigned int start; + unsigned int i; + + if (index < 6) { + start = index + 1; + } else { + start = index + 65; + } + + for (i=0; i<6; i++) { + setup_frame[start + i * 8] = addr[i]; + } +} + +static void format_setup_frame(struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *)dev->priv; + + /* Fill in hardware, broadcast and multicast addresses here */ + + /* Pre-fill with all FF (has side effect of setting all addresses + to the broadcast address) */ + memset(priv->setup_frame, 0xff, sizeof(priv->setup_frame)); + + /* First address will be our unicast address */ + store_setup_address(0, dev->dev_addr, priv->setup_frame); + + /* FIXME: Store multicast addresses here */ + + /* FIXME: Use MULTICAST and PROMISC flags to tweak setup_len */ + priv->setup_frame_len = 128; +} + +/* FIXME: pre-allocate mapping registers for this at init time. Then this + would be a no-fail function */ + +static void queue_setup_frame(struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *)dev->priv; + struct delqa_bufdesc *desc; + unsigned int busaddr; + unsigned int csr; + unsigned int i; + unsigned int flags; + + /* Point the first available TX descriptor at the setup + frame and enable the transmitter */ + + spin_lock_irqsave(&priv->lock, flags); + + i = priv->next_tx_free; + + priv->next_tx_free++; + if (priv->next_tx_free == TXDESCS) { + priv->next_tx_free = 0; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + desc = priv->descs->txdesc + i; + + priv->tx_map[i] = qbus_alloc_mapregs(priv->parent, priv->setup_frame, priv->setup_frame_len); + if (priv->tx_map[i] == NULL) { + panic("delqa queue_setup_frame: dma mapping failed"); + } + + busaddr = priv->tx_map[i]->busaddr; + + desc->addr_lo = LOWORD(busaddr); + desc->addr_hi = HIWORD(busaddr) | DELQA_ADDRHI_SETUP | DELQA_ADDRHI_EOMSG; + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + desc->status2 = 0; + desc->buflen = -priv->setup_frame_len/2; /* (maybe) bytes, not words like all other uses */ + desc->addr_hi |= DELQA_ADDRHI_VALID; + +#if DELQA_DEBUG_DESC + dump_descs(dev); +#endif + + spin_lock_irqsave(&priv->lock, flags); + + csr = read_reg(priv, DELQA_CSR); + + if (csr & DELQA_CSR_XL_INVALID) { + + /* Get Q-bus address of first TX descriptor */ + busaddr = priv->desc_map->busaddr + offsetof(struct delqa_descs, txdesc[i]); + + write_reg(priv, DELQA_XMTL, LOWORD(busaddr)); + write_reg(priv, DELQA_XMTH, HIWORD(busaddr)); + } + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void delqa_reset(struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *)dev->priv; + unsigned int csr; + + printk("%s: resetting DELQA... ", dev->name); + + write_reg(priv, DELQA_CSR, DELQA_CSR_RESET); + udelay(1000); + + csr = read_reg(priv, DELQA_CSR); + write_reg(priv, DELQA_CSR, csr & ~DELQA_CSR_RESET); + write_reg(priv, DELQA_VECTOR, priv->qbus_vector); + + printk("done\n"); +} + +static int delqa_open(struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *)dev->priv; + struct delqa_bufdesc *desc; + int i; + unsigned int busaddr; + + /* Reset the hardware before hooking the interrupt vector + to guarantee that we won't get any interrupts until we + enable them. */ + + delqa_reset(dev); + + if (qbus_request_irq(priv->parent, priv->qbus_vector, delqa_interrupt, 0, "delqa", dev)) { + printk("delqa_open: cannot get qbus irq %d\n", priv->qbus_vector); + return -EAGAIN; + } + + /* Mark the transmit descriptors as not yet owned by + the DELQA (and also not VALID). */ + for (i=0; idescs->txdesc + i; + + /* Clear VALID bit */ + desc->addr_hi = 0; + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + desc->status2 = 0; + } + + /* Mark the receive descriptors as not yet owned by + the DELQA. */ + for (i=0; idescs->rxdesc + i; + + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + desc->status2 = 1; + } + + /* Tell the DELQA where the receive descriptors live (i.e. + which Q-bus addresses are mapped to the descriptor + addresses by the mapping registers. There is no + point in setting the transmit descriptor address, since + there are no valid transmit descriptors yet. When + the card hits an invalid transmit descriptor, it stops + the transmit logic, which can only be restarted by + setting the transmit descriptor address again. */ + + busaddr = priv->desc_map->busaddr + offsetof(struct delqa_descs, rxdesc[0]); + + write_reg(priv, DELQA_RCLL, LOWORD(busaddr)); + write_reg(priv, DELQA_RCLH, HIWORD(busaddr)); + + write_reg(priv, DELQA_CSR, DELQA_CSR_INT_ENABLE | DELQA_CSR_XMIT_INT | DELQA_CSR_RCV_INT); + + format_setup_frame(dev); + queue_setup_frame(dev); + + return 0; +} + +/* FIXME: implement delqa_close */ + +static int delqa_close(struct net_device *dev) +{ + printk("delqa_close not implemented\n"); +HALT; + return 0; +} + +static void delqa_set_multicast(struct net_device *dev) +{ + format_setup_frame(dev); + queue_setup_frame(dev); +} + +static struct net_device_stats *delqa_get_stats(struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *) dev->priv; + + return &priv->stats; +} + +/* This function allocates the receive buffers, allocates and maps + QBUS mapping registers for these buffers, initializes the receive + descriptors to point to these buffers, and sets up the chain + descriptors at the end of the descriptor lists */ + +static int init_desc_rings(struct net_device *dev) +{ + struct delqa_private *priv = (struct delqa_private *)dev->priv; + struct delqa_bufdesc *desc; + unsigned int busaddr; + int i; + + for (i=0; irx_map[i] = qbus_alloc_mapregs(priv->parent, buf, RXBUFSIZE); + if (priv->rx_map[i] == NULL) { + printk("delqa init_desc_rings: dma mapping failed"); + kfree(buf); + goto cleanup; + } + + busaddr = priv->rx_map[i]->busaddr; + + desc = priv->descs->rxdesc + i; + + desc->addr_lo = LOWORD(busaddr); + desc->addr_hi = HIWORD(busaddr); + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + desc->status2 = 1; + desc->buflen = - (RXBUFSIZE / 2); /* words, not bytes */ + desc->addr_hi |= DELQA_ADDRHI_VALID; + } + + /* Remember that we've allocated one more descriptor than we + need. This one is used to chain the end of the descriptor + list back to the beginning. */ + + /* Last receive descriptor contains bus address of first desc */ + + desc = priv->descs->rxdesc + RXDESCS; + + busaddr = priv->desc_map->busaddr + offsetof(struct delqa_descs, rxdesc[0]); + + desc->addr_lo = LOWORD(busaddr); + desc->addr_hi = HIWORD(busaddr) | DELQA_ADDRHI_VALID | DELQA_ADDRHI_CHAIN; + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + + /* Last transmit descriptor contains bus address of first desc */ + + desc = priv->descs->txdesc + TXDESCS; + + busaddr = priv->desc_map->busaddr + offsetof(struct delqa_descs, txdesc[0]); + + desc->addr_lo = LOWORD(busaddr); + desc->addr_hi = HIWORD(busaddr) | DELQA_ADDRHI_VALID | DELQA_ADDRHI_CHAIN; + desc->flag = DELQA_NOTYET; + desc->status1 = DELQA_NOTYET; + + priv->next_tx_free = 0; + priv->next_tx_pending = 0; + priv->next_rx = 0; + + return 0; + +cleanup: + for (i=0; irx_map[i] != NULL) { + kfree(priv->rx_map[i]->virtaddr); + qbus_unmap(priv->parent, priv->rx_map[i]); + priv->rx_map[i] = NULL; + } + } + return -ENOMEM; +} + +static int delqa_probe(struct qbus_device *qbus_dev) +{ + struct net_device *dev; + struct delqa_private *priv; + int i; + int status = 0; + + switch (QBUS_OCTAL_CSR(qbus_dev->csr)) { + case DELQA_CSR_BUS_ADDR: + /* This could be a DELQA */ + break; + default: + /* Not one of our expected CSR addresses */ + return 1; + } + + dev = alloc_etherdev(sizeof(struct delqa_private)); + if (!dev) { + return -ENOMEM; + } + + priv = (struct delqa_private *) dev->priv; + + spin_lock_init(&priv->lock); + + priv->parent = get_device(qbus_dev->dev.parent); + + priv->base = qbus_ioremap(priv->parent, qbus_dev->csr, 16); + if (priv->base == NULL) { + status = -ENOMEM; + goto cleanup; + } + + priv->descs = kmalloc(sizeof(struct delqa_descs), GFP_KERNEL); + if (priv->descs == NULL) { + status = -ENOMEM; + goto cleanup; + } + + dev->mem_start = (unsigned int)priv->descs; + dev->mem_end = (unsigned int)priv->descs + sizeof(*(priv->descs)); + + priv->qbus_vector = qbus_alloc_vector(priv->parent); + if (!priv->qbus_vector) { + printk("delqa_probe: cannot allocate QBUS interrupt vector\n"); + status = -EAGAIN; + goto cleanup; + } + + write_reg(priv, DELQA_VECTOR, priv->qbus_vector); + + printk("delqa qbus vector: %d (0%03o, 0x%04x)\n", priv->qbus_vector, priv->qbus_vector, priv->qbus_vector); + + /* This is purely informational */ + dev->irq = qbus_vector_to_irq(priv->parent, priv->qbus_vector); + + printk("Ethernet address in ROM: "); + for (i = 0; i < 6; i++) { + dev->dev_addr[i] = priv->base[i*2] & 0xff; + printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + } + + /* Here we need to setup qbus mapping registers so that the DELQA + can DMA to and from our buffers */ + + priv->desc_map = qbus_alloc_mapregs(priv->parent, priv->descs, sizeof(struct delqa_descs)); + if (priv->desc_map == NULL) { + status = -ENOMEM; + goto cleanup; + } + + status = init_desc_rings(dev); + if (status < 0) { + goto cleanup; + } + + dev->open = delqa_open; + dev->stop = delqa_close; + dev->hard_start_xmit = delqa_start_xmit; + dev->tx_timeout = delqa_tx_timeout; + dev->watchdog_timeo = 5*HZ; + dev->get_stats = &delqa_get_stats; + dev->set_multicast_list = &delqa_set_multicast; + dev->dma = 0; + + SET_NETDEV_DEV(dev, &qbus_dev->dev); + + status = register_netdev(dev); + if (status) { + goto cleanup; + } + + return 0; + +cleanup: + if (priv->desc_map) { + qbus_unmap(qbus_dev->dev.parent, priv->desc_map); + } + if (priv->qbus_vector) { + qbus_free_vector(qbus_dev->dev.parent, priv->qbus_vector); + } + kfree(priv->descs); + if (priv->base) { + qbus_iounmap(priv->base); + } + if (priv->parent) { + put_device(priv->parent); + } + + free_netdev(dev); + + return status; +} + +static struct qbus_driver delqa_driver = { + .probe = delqa_probe, + .drv = { + .name = "delqa", + }, +}; + +int __init delqa_init(void) +{ + return qbus_register_driver(&delqa_driver); +} + +device_initcall(delqa_init); + diff -Nru a/drivers/vax/net/lance.c b/drivers/vax/net/lance.c --- a/drivers/vax/net/lance.c 1970-01-01 01:00:00 +++ b/drivers/vax/net/lance.c 2005-04-25 11:37:12 @@ -0,0 +1,1062 @@ +/* + * Lance ethernet driver for the VAX station 3100 + * + * Adapted from declance.c - Linux MIPS Decstation Team + * modified for DS5000/200 + VAX MIPS - Dave Airlie (airlied@linux.ie) + * + * I have every intention of remerging this driver with declance.c + * at some stage, this version is just intermediate I hope :-) + * - D.A. July 2000 + * + * I've started to write some more of this.. doesn't do anything + * extra visibly, just does some static block allocation to use + * until kmalloc arrives... next on the list are the IRQ and getting + * the lance pointed at the init block in the right address space. + * - D.A. 14 Aug 2000 + * + */ + +static char *version = +"vaxlance.c: v0.008 by Linux Mips DECstation task force + airlied@linux.ie\n"; + +static char *lancestr = "LANCE"; + +#include +#include +#include +#include + +#include /* for __flush_tlb_one */ + +#include + +/* Ugly kludge to deal with KA43 weirdness */ +#include +#include + +#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */ +#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */ + +#define LE_CSR0 0 +#define LE_CSR1 1 +#define LE_CSR2 2 +#define LE_CSR3 3 + +#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ + +#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ +#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ +#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ +#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ +#define LE_C0_MERR 0x0800 /* ME: Memory error */ +#define LE_C0_RINT 0x0400 /* Received interrupt */ +#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ +#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ +#define LE_C0_INTR 0x0080 /* Interrupt or error */ +#define LE_C0_INEA 0x0040 /* Interrupt enable */ +#define LE_C0_RXON 0x0020 /* Receiver on */ +#define LE_C0_TXON 0x0010 /* Transmitter on */ +#define LE_C0_TDMD 0x0008 /* Transmitter demand */ +#define LE_C0_STOP 0x0004 /* Stop the card */ +#define LE_C0_STRT 0x0002 /* Start the card */ +#define LE_C0_INIT 0x0001 /* Init the card */ + +#define LE_C3_BSWP 0x4 /* SWAP */ +#define LE_C3_ACON 0x2 /* ALE Control */ +#define LE_C3_BCON 0x1 /* Byte control */ + +/* Receive message descriptor 1 */ +#define LE_R1_OWN 0x80 /* Who owns the entry */ +#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ +#define LE_R1_FRA 0x20 /* FRA: Frame error */ +#define LE_R1_OFL 0x10 /* OFL: Frame overflow */ +#define LE_R1_CRC 0x08 /* CRC error */ +#define LE_R1_BUF 0x04 /* BUF: Buffer error */ +#define LE_R1_SOP 0x02 /* Start of packet */ +#define LE_R1_EOP 0x01 /* End of packet */ +#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ + +#define LE_T1_OWN 0x80 /* Lance owns the packet */ +#define LE_T1_ERR 0x40 /* Error summary */ +#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ +#define LE_T1_EONE 0x08 /* Error: one retry needed */ +#define LE_T1_EDEF 0x04 /* Error: deferred */ +#define LE_T1_SOP 0x02 /* Start of packet */ +#define LE_T1_EOP 0x01 /* End of packet */ +#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ + +#define LE_T3_BUF 0x8000 /* Buffer error */ +#define LE_T3_UFL 0x4000 /* Error underflow */ +#define LE_T3_LCOL 0x1000 /* Error late collision */ +#define LE_T3_CLOS 0x0800 /* Error carrier loss */ +#define LE_T3_RTY 0x0400 /* Error retry */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ + +/* Define: 2^4 Tx buffers and 2^4 Rx buffers */ + +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 4 +#define LANCE_LOG_RX_BUFFERS 4 +#endif + +#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) + +#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +#define PKT_BUF_SZ 1536 +#define RX_BUFF_SIZE PKT_BUF_SZ +#define TX_BUFF_SIZE PKT_BUF_SZ + +#undef VAX_LANCE_DEBUG +#undef VAX_LANCE_DEBUG_BUFFERS + + +struct lance_rx_desc { + unsigned short rmd0; /* low address of packet */ + unsigned char rmd1_hadr; /* high address of packet */ + unsigned char rmd1_bits; /* descriptor bits */ + short length; /* This length is 2s complement (negative)! + * Buffer length + */ + unsigned short mblength; /* This is the actual number of bytes received */ +}; + +struct lance_tx_desc { + unsigned short tmd0; /* low address of packet */ + unsigned char tmd1_hadr; /* high address of packet */ + unsigned char tmd1_bits; /* descriptor bits */ + short length; /* Length is 2s complement (negative)! */ + unsigned short misc; +}; + + +/* First part of the LANCE initialization block, described in databook. */ +struct lance_init_block { + unsigned short mode; /* Pre-set mode (reg. 15) */ + + unsigned char phys_addr[6]; /* Physical ethernet address + * only 0, 1, 4, 5, 8, 9 are valid + * 2, 3, 6, 7, 10, 11 are gaps + */ + unsigned short filter[4]; /* Multicast filter. + * only 0, 2, 4, 6 are valid + * 1, 3, 5, 7 are gaps + */ + + /* Receive and transmit ring base, along with extra bits. */ + unsigned short rx_ptr; /* receive descriptor addr */ + unsigned short rx_len; /* receive len and high addr */ + unsigned short tx_ptr; /* transmit descriptor addr */ + unsigned short tx_len; /* transmit len and high addr */ + short gap0[4]; + + /* The buffer descriptors */ + struct lance_rx_desc brx_ring[RX_RING_SIZE]; + struct lance_tx_desc btx_ring[TX_RING_SIZE]; +}; + + +#define BUF_OFFSET_CPU (offsetof(struct lance_shared_mem, rx_buf)) +#define BUF_OFFSET_LNC BUF_OFFSET_CPU + + +/* This is how our shared memory block is layed out */ + +struct lance_shared_mem { + struct lance_init_block init_block; /* Includes RX/TX descriptors */ + char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; + char tx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; +}; + + +struct lance_private { + char *name; + + /* virtual addr of registers */ + volatile struct lance_regs *ll; + + /* virtual addr of shared memory block */ + volatile struct lance_shared_mem *lance_mem; + + /* virtual addr of block inside shared mem block */ + volatile struct lance_init_block *init_block; + + unsigned char vsbus_int; + spinlock_t lock; + + int rx_new, tx_new; + int rx_old, tx_old; + + struct net_device_stats stats; + + unsigned short busmaster_regval; + + struct net_device *dev; /* Backpointer */ + struct lance_private *next_module; + struct timer_list multicast_timer; +}; + +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ + lp->tx_old - lp->tx_new-1) + +/* The lance control ports are at an absolute address, machine dependent. + * VAXstations align the two 16-bit registers on 32-bit boundaries + * so we have to give the structure an extra member making rap pointing + * at the right address + */ +struct lance_regs { + volatile unsigned short rdp; /* register data port */ + unsigned short pad; + volatile unsigned short rap; /* register address port */ +}; + + + +/* Communication with the LANCE takes place via four channels: + + 1. The RDP and RAP ports (which live at 200e0000 physical on + VS3100-family machines). Through these two ports we can + access the LANCE's 4 registers: CSR0, CSR1, CSR2 and CSR3 + (very imaginatively named...) + + 2. The LANCE init block which we allocate. We tell the LANCE where the + init block lives in memory via the CSR1 and CSR2 registers. The init + block contains the ethernet address, multi-cast address filter and + contains the physical addresses of the RX and TX buffer descriptors. + The init block must be word aligned. + + 3. The RX and TX buffer descriptors are pointed to by the init block and + in turn contain the physical addresses of the RX and TX buffers. + The buffer descriptors must be quadword aligned. + + 4. The RX and TX buffers themselves. These buffers have no alignment + requirement. + + To keep things simple, we allocate a single 64K chunk of memory which + contains the init block, followed by the buffer descriptors and then + the buffers. + + For most CPUs, the physical addresses used by the LANCE and the + virtual addresses used by the CPU follow the usual virt = phys+0x80000000 + convention. + + However, the KA43 has an unusual requirement. Physical memory on the + KA43 is accessible from address 0 upwards as normal, but is also visible + in the region starting a 0x28000000. This region is called the DIAGMEM + region. What's different about it, I don't know, but it's probably + something to do with caching. + + So, after allocating the 64KB chunk, but before we tell the LANCE + about it, we tweak the PTEs behind these pages to map to physical + addresses in the DIAGMEM region. + + As of 2001-03-06, the closest data sheet I can find is the AM79C90 (aka + C-LANCE) on AMD's site at http://www.amd.com/products/npd/techdocs/17881.pdf. +*/ + + + +static inline void writereg(volatile unsigned short *regptr, short value) +{ + *regptr = value; +} + +static inline void writecsr0(volatile struct lance_regs *ll, unsigned short value) +{ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, value); +} + +static inline void lance_stop(volatile struct lance_regs *ll) +{ + writecsr0(ll, LE_C0_STOP); + + /* Is this needed? NetBSD does it sometimes */ + udelay(100); +} + +/* Load the CSR registers */ +static void load_csrs(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + unsigned long leptr; + + leptr = virt_to_phys(lp->init_block); + + writereg(&ll->rap, LE_CSR1); + writereg(&ll->rdp, (leptr & 0xFFFF)); + writereg(&ll->rap, LE_CSR2); + writereg(&ll->rdp, (leptr >> 16) & 0xFF); + writereg(&ll->rap, LE_CSR3); + writereg(&ll->rdp, lp->busmaster_regval); + + /* Point back to csr0 */ + writereg(&ll->rap, LE_CSR0); +} + + +/* + * Our specialized copy routines + * + */ +static inline void cp_to_buf(void *to, const void *from, __kernel_size_t len) +{ + memcpy(to, from, len); +} + +static inline void cp_from_buf(void *to, unsigned char *from, int len) +{ + memcpy(to, from, len); +} + +/* Setup the Lance Rx and Tx rings */ +static void lance_init_ring(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_init_block *ib = lp->init_block; + unsigned long leptr; + int i; + + /* Lock out other processes while setting up hardware */ + + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the lance init block. + * XXX bit 0 of the physical address registers has to be zero + */ + ib->phys_addr[0] = dev->dev_addr[0]; + ib->phys_addr[1] = dev->dev_addr[1]; + ib->phys_addr[2] = dev->dev_addr[2]; + ib->phys_addr[3] = dev->dev_addr[3]; + ib->phys_addr[4] = dev->dev_addr[4]; + ib->phys_addr[5] = dev->dev_addr[5]; + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + + /* Calculate the physical address of the first receive descriptor */ + leptr = virt_to_phys(ib->brx_ring); + ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); + ib->rx_ptr = leptr; + +#ifdef VAX_LANCE_DEBUG + printk("RX ptr: %8.8lx(%8.8x)\n", leptr, ib->brx_ring); +#endif + /* Setup tx descriptor pointer */ + + /* Calculate the physical address of the first transmit descriptor */ + leptr = virt_to_phys(ib->btx_ring); + ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); + ib->tx_ptr = leptr; + +#ifdef VAX_LANCE_DEBUG + printk("TX ptr: %8.8lx(%8.8x)\n", leptr, ib->btx_ring); + + printk("TX rings:\n"); +#endif + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = virt_to_phys(lp->lance_mem->tx_buf[i]) & 0xffffff; + + ib->btx_ring[i].tmd0 = leptr; + ib->btx_ring[i].tmd1_hadr = leptr >> 16; + ib->btx_ring[i].tmd1_bits = 0; + ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ + ib->btx_ring[i].misc = 0; + +#ifdef VAX_LANCE_DEBUG + if (i < 3) + printk("%d: 0x%8.8lx(0x%8.8x)\n", i, leptr, (int) lp->lance_mem->tx_buf[i]); +#endif + } + + /* Setup the Rx ring entries */ +#ifdef VAX_LANCE_DEBUG + printk("RX rings:\n"); +#endif + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = virt_to_phys(lp->lance_mem->rx_buf[i]) & 0xffffff; + + ib->brx_ring[i].rmd0 = leptr; + ib->brx_ring[i].rmd1_hadr = leptr >> 16; + ib->brx_ring[i].rmd1_bits = LE_R1_OWN; + ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; + ib->brx_ring[i].mblength = 0; +#ifdef VAX_LANCE_DEBUG + if (i < 3) + printk("%d: 0x%8.8lx(0x%8.8x)\n", i, leptr, (int) lp->lance_mem->rx_buf[i]); +#endif + } +} + +static int init_restart_lance(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + int i; + + /* Is this needed? NetBSD does it. */ + udelay(100); + + writecsr0(ll, LE_C0_INIT); + + /* Wait for the lance to complete initialization */ + for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) { +#ifdef VAX_LANCE_DEBUG + printk("LANCE opened maybe %d\n", i); +#endif + udelay(10); + } + if ((i == 100) || (ll->rdp & LE_C0_ERR)) { +#ifdef VAX_LANCE_DEBUG + printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp); +#endif + return -1; + } + if ((ll->rdp & LE_C0_ERR)) { +#ifdef VAX_LANCE_DEBUG + printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp); +#endif + return -1; + } +#ifdef VAX_LANCE_DEBUG + printk("LANCE opened maybe\n"); +#endif + writecsr0(ll, LE_C0_IDON); + writecsr0(ll, LE_C0_INEA | LE_C0_STRT); + + /* AM79C90 datasheet describes a problem in the original AM7990 + whereby INEA cannot be set while STOP is set. What is not + clear is if setting INEA at the same time is STRT is OK. + So, just in case, we might need to set INEA again */ + /* writecsr0(ll, LE_C0_INEA); */ + + return 0; +} + +static int lance_rx(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_rx_desc *rd = 0; + unsigned char bits; + int len = 0; +#ifdef VAX_LANCE_DEBUG_BUFFERS + int i; +#endif + struct sk_buff *skb = 0; + +#ifdef VAX_LANCE_DEBUG_BUFFERS + + printk("["); + for (i = 0; i < RX_RING_SIZE; i++) { + if (i == lp->rx_new) + printk("%s", + ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X"); + else + printk("%s", + ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1"); + } + printk("]"); +#endif + + for (rd = &ib->brx_ring[lp->rx_new]; + !((bits = rd->rmd1_bits) & LE_R1_OWN); + rd = &ib->brx_ring[lp->rx_new]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + lp->stats.rx_over_errors++; + lp->stats.rx_errors++; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) + lp->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) + lp->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) + lp->stats.rx_over_errors++; + if (bits & LE_R1_FRA) + lp->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) + lp->stats.rx_errors++; + } else { + len = (rd->mblength & 0xfff) - 4; + skb = dev_alloc_skb(len + 2); + + if (skb == 0) { + printk("%s: Memory squeeze, deferring packet.\n", + dev->name); + lp->stats.rx_dropped++; + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK; + return 0; + } + lp->stats.rx_bytes += len; + + skb->dev = dev; + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + cp_from_buf(skb->data, + (char *) lp->lance_mem->rx_buf[lp->rx_new], + len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->last_rx=jiffies; + lp->stats.rx_packets++; + } + + /* Return the packet to the pool */ + rd->mblength = 0; + rd->length = -RX_BUFF_SIZE | 0xf000; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK; + } + return 0; +} + +static void lance_tx(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_tx_desc *td; + int i, j; + int status; + j = lp->tx_old; + + spin_lock(&lp->lock); + + for (i = j; i != lp->tx_new; i = j) { + td = &ib->btx_ring[i]; + /* If we hit a packet not owned by us, stop */ + if (td->tmd1_bits & LE_T1_OWN) + break; + + if (td->tmd1_bits & LE_T1_ERR) { + status = td->misc; + + lp->stats.tx_errors++; + if (status & LE_T3_RTY) + lp->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) + lp->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + lp->stats.tx_carrier_errors++; + printk("%s: Carrier Lost", dev->name); + + lance_stop(ll); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + if (status & (LE_T3_BUF | LE_T3_UFL)) { + lp->stats.tx_fifo_errors++; + + printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + + lance_stop(ll); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + td->tmd1_bits &= ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (td->tmd1_bits & LE_T1_EONE) + lp->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (td->tmd1_bits & LE_T1_EMORE) + lp->stats.collisions += 2; + + lp->stats.tx_packets++; + } + j = (j + 1) & TX_RING_MOD_MASK; + } + lp->tx_old = j; +out: + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); +} + +static irqreturn_t lance_interrupt(const int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_regs *ll = lp->ll; + int csr0; + + writereg(&ll->rap, LE_CSR0); + csr0 = ll->rdp; + + if ((csr0 & LE_C0_INTR) == 0) { + /* Hmmm, not for us... */ + return IRQ_HANDLED; + } + + /* According to NetBSD, we need to temporarily disable the + interrupts here to get things to work properly all the + time */ + + /* temporarily disable interrupts from LANCE */ + csr0 &= ~LE_C0_INEA; + + /* Acknowledge all the interrupt sources */ + writecsr0(ll, csr0); + + /* re-enable interrupts from LANCE */ + writecsr0(ll, LE_C0_INEA); + + if ((csr0 & LE_C0_ERR)) { + /* Clear the error condition */ + writecsr0(ll, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | + LE_C0_CERR | LE_C0_MERR); + } + if (csr0 & LE_C0_RINT) + lance_rx(dev); + + if (csr0 & LE_C0_TINT) + lance_tx(dev); + + if (csr0 & LE_C0_BABL) + lp->stats.tx_errors++; + + if (csr0 & LE_C0_MISS) + lp->stats.rx_errors++; + + if (csr0 & LE_C0_MERR) { + printk("%s: Memory error, status %04x", dev->name, csr0); + + lance_stop(ll); + + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); + } + + /* FIXME: why is this really needed? */ + writecsr0(ll, LE_C0_INEA); + + return IRQ_HANDLED; +} + +struct net_device *last_dev = 0; + +static int lance_open(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + + last_dev = dev; + + /* Associate IRQ with lance_interrupt */ + if (vsbus_request_irq(lp->vsbus_int, &lance_interrupt, 0, lp->name, dev)) { + printk("Lance: Can't get irq %d\n", dev->irq); + return -EAGAIN; + } + + lance_stop(ll); + + /* Clear the multicast filter */ + ib->mode=0; + ib->filter[0] = 0; + ib->filter[1] = 0; + ib->filter[2] = 0; + ib->filter[3] = 0; + + lance_init_ring(dev); + load_csrs(lp); + + netif_start_queue(dev); + + return init_restart_lance(lp); +} + +static int lance_close(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_regs *ll = lp->ll; + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + lance_stop(ll); + + vsbus_free_irq(lp->vsbus_int); + + return 0; +} + +static inline int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_regs *ll = lp->ll; + int status; + + lance_stop(ll); + + lance_init_ring(dev); + load_csrs(lp); + dev->trans_start = jiffies; + status = init_restart_lance(lp); +#ifdef VAX_LANCE_DEBUG + printk("Lance restart=%d\n", status); +#endif + return status; +} + +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_regs *ll = lp->ll; + + printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", + dev->name, ll->rdp); + lance_reset(dev); + netif_wake_queue(dev); +} + +static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_init_block *ib = lp->init_block; + int entry, skblen, len; + + skblen = skb->len; + + len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; + + spin_lock_irq(&lp->lock); + + lp->stats.tx_bytes += len; + + entry = lp->tx_new & TX_RING_MOD_MASK; + ib->btx_ring[entry].length = (-len) | 0xf000; + ib->btx_ring[entry].misc = 0; + + cp_to_buf((char *) lp->lance_mem->tx_buf[entry], skb->data, skblen); + + /* Clear the slack of the packet, do I need this? */ + /* For a firewall its a good idea - AC */ +/* + if (len != skblen) + memset ((char *) &ib->tx_buf [entry][skblen], 0, (len - skblen) << 1); + */ + /* Now, give the packet to the lance */ + ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); + lp->tx_new = (lp->tx_new + 1) & TX_RING_MOD_MASK; + + if (TX_BUFFS_AVAIL <= 0) + netif_stop_queue(dev); + + /* Kick the lance: transmit now */ + writecsr0(ll, LE_C0_INEA | LE_C0_TDMD); + + spin_unlock_irq(&lp->lock); + + dev->trans_start = jiffies; + dev_kfree_skb(skb); + + return 0; +} + +static struct net_device_stats *lance_get_stats(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + + return &lp->stats; +} + +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_init_block *ib = lp->init_block; + volatile u16 *mcast_table = (u16 *)&ib->filter; + struct dev_mc_list *dmi = dev->mc_list; + char *addrs; + int i, j, bit, byte; + u32 crc, poly = CRC_POLYNOMIAL_LE; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = 0xffff; + ib->filter[1] = 0xffff; + ib->filter[2] = 0xffff; + ib->filter[3] = 0xffff; + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + ib->filter[2] = 0; + ib->filter[3] = 0; + + /* Add addresses */ + for (i = 0; i < dev->mc_count; i++) { + addrs = dmi->dmi_addr; + dmi = dmi->next; + + /* multicast address? */ + if (!(*addrs & 1)) + continue; + + crc = 0xffffffff; + for (byte = 0; byte < 6; byte++) + for (bit = *addrs++, j = 0; j < 8; j++, bit >>= 1) { + int test; + + test = ((bit ^ crc) & 0x01); + crc >>= 1; + + if (test) { + crc = crc ^ poly; + } + } + + crc = crc >> 26; + mcast_table[crc >> 4] |= 1 << (crc & 0xf); + } + return; +} + +static void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + lance_stop(ll); + + lance_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + ib->mode |= LE_MO_PROM; + } else { + ib->mode &= ~LE_MO_PROM; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); +} + +static void lance_set_multicast_retry(unsigned long _opaque) +{ + struct net_device *dev = (struct net_device *) _opaque; + + lance_set_multicast(dev); +} + + +static int __init vax_lance_init(struct net_device *dev, struct vsbus_device *vsbus_dev) +{ + static unsigned version_printed = 0; + struct lance_private *lp; + volatile struct lance_regs *ll; + int i; + unsigned char __iomem *esar; + + /* Could these base addresses be different on other CPUs? */ + unsigned long lance_phys_addr=vsbus_dev->phys_base; + unsigned long esar_phys_addr=KA43_NWA_BASE; + + if (version_printed++ == 0) + printk(version); + + lp = (struct lance_private *) dev->priv; + + spin_lock_init(&lp->lock); + + /* Need a block of 64KB */ + /* At present, until we figure out the address extension + * parity control bit, ask for memory in the DMA zone */ + dev->mem_start = __get_free_pages(GFP_DMA, 4); + if (!dev->mem_start) { + return -ENOMEM; + } + +#ifdef CONFIG_CPU_KA43 + if (is_ka43()) { + + /* FIXME: + We need to check if this block straddles the 16MB boundary. If + it does, then we can't use it for DMA. Instead we allocate + another 64KB block (which obviously cannot straddle the 16MB + boundary as well) and free the first. + + We also need to set the magic bit in PARCTL if we are above + the 16MB boundary. + + */ + + /* KA43 only. */ + ka43_diagmem_remap(dev->mem_start, 65536); + } +#endif /* CONFIG_CPU_KA43 */ + + + dev->mem_end = dev->mem_start + 65536; + + /* FIXME: check this for NULL */ + dev->base_addr = (unsigned long) ioremap(lance_phys_addr, 0x8);; + + lp->lance_mem = (volatile struct lance_shared_mem *)(dev->mem_start); + lp->init_block = &(lp->lance_mem->init_block); + + lp->vsbus_int = vsbus_dev->vsbus_irq; + dev->irq = vsbus_irqindex_to_irq(vsbus_dev->vsbus_irq); + + ll = (struct lance_regs *) dev->base_addr; + + /* FIXME: deal with failure here */ + esar=ioremap(esar_phys_addr, 0x80); + + /* prom checks */ +#ifdef CHECK_ADDRESS_ROM_CHECKSUM + /* If this is dead code, let's remove it... - KPH 2001-03-04 */ + /* not sure if its dead it might just not work on the VAX I have + does anyone know if VAX store test pattern in EEPROM */ + /* First, check for test pattern */ + if (esar[0x60] != 0xff && esar[0x64] != 0x00 && + esar[0x68] != 0x55 && esar[0x6c] != 0xaa) { + printk("Ethernet station address prom not found!\n"); + return -ENODEV; + } + /* Check the prom contents */ + for (i = 0; i < 8; i++) { + if (esar[i * 4] != esar[0x3c - i * 4] && + esar[i * 4] != esar[0x40 + i * 4] && + esar[0x3c - i * 4] != esar[0x40 + i * 4]) { + printk("Something is wrong with the ethernet " + "station address prom!\n"); + return -ENODEV; + } + } +#endif + /* Copy the ethernet address to the device structure, later to the + * lance initialization block so the lance gets it every time it's + * (re)initialized. + */ + printk("Ethernet address in ROM: "); + for (i = 0; i < 6; i++) { + dev->dev_addr[i] = esar[i * 4]; + printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + } + + /* Don't need this any more */ + iounmap(esar); + + printk("Using LANCE interrupt vector %d, vsbus irq %d\n", dev->irq, lp->vsbus_int); + + dev->open = &lance_open; + dev->stop = &lance_close; + dev->hard_start_xmit = &lance_start_xmit; + dev->tx_timeout = &lance_tx_timeout; + dev->watchdog_timeo = 5*HZ; + dev->get_stats = &lance_get_stats; + dev->set_multicast_list = &lance_set_multicast; + dev->dma = 0; + + /* lp->ll is the location of the registers for lance card */ + lp->ll = ll; + + lp->name = lancestr; + + /* busmaster_regval (CSR3) should be zero according to the PMAD-AA + * specification. + */ + lp->busmaster_regval = 0; + lp->dev = dev; + + ether_setup(dev); + + /* We cannot sleep if the chip is busy during a + * multicast list update event, because such events + * can occur from interrupts (ex. IPv6). So we + * use a timer to try again later when necessary. -DaveM + */ + init_timer(&lp->multicast_timer); + lp->multicast_timer.data = (unsigned long) dev; + lp->multicast_timer.function = &lance_set_multicast_retry; + + SET_NETDEV_DEV(dev, &vsbus_dev->dev); + + return 0; +} + + +static int vaxlance_probe(struct vsbus_device *vsbus_dev) +{ + struct net_device *netdev; + int retval; + + printk("vaxlance_probe: name = %s, base = 0x%08x, irqindex = %d\n", + vsbus_dev->dev.bus_id, vsbus_dev->phys_base, vsbus_dev->vsbus_irq); + + netdev = alloc_etherdev(sizeof(struct lance_private)); + if (!netdev) { + return -ENOMEM; + } + + retval = vax_lance_init(netdev, vsbus_dev); + if (!retval) { + retval = register_netdev(netdev); + } + + if (retval) { + free_netdev(netdev); + } + + return 0; +} + +static struct vsbus_driver vaxlance_driver = { + .probe = vaxlance_probe, + .drv = { + .name = "lance", + }, +}; + +int __init vaxlance_init(void) +{ + return vsbus_register_driver(&vaxlance_driver); +} + +device_initcall(vaxlance_init); + diff -Nru a/drivers/vax/net/sgec.c b/drivers/vax/net/sgec.c --- a/drivers/vax/net/sgec.c 1970-01-01 01:00:00 +++ b/drivers/vax/net/sgec.c 2005-04-26 23:49:57 @@ -0,0 +1,1035 @@ +/* + * SGEC ethernet driver. Reported as EZA0 etc by VAX Console. + * + * SGEC stands for Second Generation Ethernet Card, and is the + * replacement for the LANCE adapters in the MicroVaxen. + * + * Loosely adapted from vaxlance.c by Dave Airlie + * by Richard Banks, Aug 2001 + * + */ + +/* NOTE to self - look at code in arch/vax/if/ *ze*, arch/vax/vsa/ *ze*, and dev/ic/ *sgec* */ + + +static char *version = "vaxsgec.c: v0.001 by Richard Banks\n"; + +static char *sgecstr = "SGEC"; + +#include +#include +#include +#include +#include /* for autoirq_setup/_report */ + +#include /* for __flush_tlb_one */ +#include +#include + +/* use #undef to turn these off */ +#define VAX_SGEC_DEBUG +#define VAX_SGEC_DEBUG_BUFFERS +#define VAX_SGEC_AUTOPROBE_IRQ + +#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */ +#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */ + + +/* SGEC CSRs */ +struct sgec_regs { + unsigned long sg_nicsr0; /* vector address, IPL, sync mode */ + unsigned long sg_nicsr1; /* TX poll demand */ + unsigned long sg_nicsr2; /* RX poll demand */ + struct sgec_rx_desc *sg_nicsr3; /* RX descriptor list address */ + struct sgec_tx_desc *sg_nicsr4; /* TX descriptor list address */ + unsigned long sg_nicsr5; /* SGEC status */ + unsigned long sg_nicsr6; /* SGEC command/mode */ + unsigned long sg_nicsr7; /* system page table base address */ + unsigned long sg_nivcsr8; /* reserved virtual CSR */ + unsigned long sg_nivcsr9; /* watchdog timers (virtual) */ + unsigned long sg_nivcsr10; /* revision, missed frame count (v) */ + unsigned long sg_nivcsr11; /* boot message verification (low) (v) */ + unsigned long sg_nivcsr12; /* boot message verification (high) (v) */ + unsigned long sg_nivcsr13; /* boot message processor (v) */ + unsigned long sg_nivcsr14; /* diagnostic breakpoint (v) */ + unsigned long sg_nicsr15; /* monitor command */ +}; + +/* SGEC bit definitions */ +/* NICSR0: */ +#define SG_NICSR0_IPL 0xc0000000 /* interrupt priority level: */ +#define SG_NICSR0_IPL14 0x00000000 /* 0x14 */ +#define SG_NICSR0_IPL15 0x40000000 /* 0x15 */ +#define SG_NICSR0_IPL16 0x80000000 /* 0x16 */ +#define SG_NICSR0_IPL17 0xc0000000 /* 0x17 */ +#define SG_NICSR0_SA 0x20000000 /* sync(1)/async mode */ +#define SG_NICSR0_MBO 0x1fff0003 /* must be set to one on write */ +#define SG_NICSR0_IV_MASK 0x0000fffc /* bits for the interrupt vector */ + +/* NICSR1: */ +#define SG_NICSR1_TXPD 0xffffffff /* transmit polling demand */ + +/* NICSR2: */ +#define SG_NICSR2_RXPD 0xffffffff /* receive polling demand */ + +/* NICSR3 and NICSR4 are pure addresses */ +/* NICSR5: */ +#define SG_NICSR5_ID 0x80000000 /* init done */ +#define SG_NICSR5_SF 0x40000000 /* self-test failed */ +#define SG_NICSR5_SS 0x3c000000 /* self-test status field */ +#define SG_NICSR5_TS 0x03000000 /* transmission state: */ +#define SG_NICSR5_TS_STOP 0x00000000 /* stopped */ +#define SG_NICSR5_TS_RUN 0x01000000 /* running */ +#define SG_NICSR5_TS_SUSP 0x02000000 /* suspended */ +#define SG_NICSR5_RS 0x00c00000 /* reception state: */ +#define SG_NICSR5_RS_STOP 0x00000000 /* stopped */ +#define SG_NICSR5_RS_RUN 0x00400000 /* running */ +#define SG_NICSR5_RS_SUSP 0x00800000 /* suspended */ +#define SG_NICSR5_OM 0x00060000 /* operating mode: */ +#define SG_NICSR5_OM_NORM 0x00000000 /* normal */ +#define SG_NICSR5_OM_ILBK 0x00020000 /* internal loopback */ +#define SG_NICSR5_OM_ELBK 0x00040000 /* external loopback */ +#define SG_NICSR5_OM_DIAG 0x00060000 /* reserved for diags */ +#define SG_NICSR5_DN 0x00010000 /* virtual CSR access done */ +#define SG_NICSR5_MBO 0x0038ff00 /* must be one */ +#define SG_NICSR5_BO 0x00000080 /* boot message received */ +#define SG_NICSR5_TW 0x00000040 /* transmit watchdog timeout */ +#define SG_NICSR5_RW 0x00000020 /* receive watchdog timeout */ +#define SG_NICSR5_ME 0x00000010 /* memory error */ +#define SG_NICSR5_RU 0x00000008 /* receive buffer unavailable */ +#define SG_NICSR5_RI 0x00000004 /* receiver interrupt */ +#define SG_NICSR5_TI 0x00000002 /* transmitter interrupt */ +#define SG_NICSR5_IS 0x00000001 /* interrupt summary */ +/* whew! */ + +/* NICSR6: */ +#define SG_NICSR6_RE 0x80000000 /* reset */ +#define SG_NICSR6_IE 0x40000000 /* interrupt enable */ +#define SG_NICSR6_MBO 0x01e7f000 /* must be one */ +#define SG_NICSR6_BL 0x1e000000 /* burst limit mask */ +#define SG_NICSR6_BL_8 0x10000000 /* 8 longwords */ +#define SG_NICSR6_BL_4 0x08000000 /* 4 longwords */ +#define SG_NICSR6_BL_2 0x04000000 /* 2 longwords */ +#define SG_NICSR6_BL_1 0x02000000 /* 1 longword */ +#define SG_NICSR6_BE 0x00100000 /* boot message enable */ +#define SG_NICSR6_SE 0x00080000 /* single cycle enable */ +#define SG_NICSR6_ST 0x00000800 /* start(1)/stop(0) transmission */ +#define SG_NICSR6_SR 0x00000400 /* start(1)/stop(0) reception */ +#define SG_NICSR6_OM 0x00000300 /* operating mode: */ +#define SG_NICSR6_OM_NORM 0x00000000 /* normal */ +#define SG_NICSR6_OM_ILBK 0x00000100 /* internal loopback */ +#define SG_NICSR6_OM_ELBK 0x00000200 /* external loopback */ +#define SG_NICSR6_OM_DIAG 0x00000300 /* reserved for diags */ +#define SG_NICSR6_DC 0x00000080 /* disable data chaining */ +#define SG_NICSR6_FC 0x00000040 /* force collision mode */ +#define SG_NICSR6_PB 0x00000008 /* pass bad frames */ +#define SG_NICSR6_AF 0x00000006 /* address filtering mode: */ +#define SG_NICSR6_AF_NORM 0x00000000 /* normal filtering */ +#define SG_NICSR6_AF_PROM 0x00000002 /* promiscuous mode */ +#define SG_NICSR6_AF_ALLM 0x00000004 /* all multicasts */ + +/* NICSR7 is an address, NICSR8 is reserved */ +/* NICSR9: */ +#define SG_VNICSR9_RT 0xffff0000 /* receiver timeout, *1.6 us */ +#define SG_VNICSR9_TT 0x0000ffff /* transmitter timeout */ + +/* NICSR10: */ +#define SG_VNICSR10_RN 0x001f0000 /* SGEC version */ +#define SG_VNICSR10_MFC 0x0000ffff /* missed frame counter */ + + + +/* SGEC Descriptor defines */ +struct sgec_rx_desc{ + unsigned short word0; /* descriptor word 0 */ + unsigned short framelen; /* frame length */ + unsigned char rsvd[3]; /* unused */ + unsigned char word1; /* descriptor word 1 - flags */ + short page_offset; /* buffer page offset */ + short buffsize; /* buffer size */ + unsigned char *buffaddr; /* buffer address */ +}; + +#define SG_FR_OWN 0x8000 /* We own the descriptor */ +#define SG_R0_ERR 0x8000 /* an error occurred */ +#define SG_R0_LEN 0x4000 /* length error */ +#define SG_R0_DAT 0x3000 /* data type (next 3 are subtypes) */ +#define SG_R0_DAT_NORM 0x0000 /* normal frame */ +#define SG_R0_DAT_INLB 0x1000 /* internal loop back */ +#define SG_R0_DAT_EXLB 0x2000 /* external loop back */ +#define SG_R0_FRA 0x0800 /* runt frame */ +#define SG_R0_OFL 0x0400 /* buffer overflow */ +#define SG_R0_FSG 0x0200 /* first segment */ +#define SG_R0_LSG 0x0100 /* last segment */ +#define SG_R0_LNG 0x0080 /* frame too long */ +#define SG_R0_COL 0x0040 /* collision seen */ +#define SG_R0_EFT 0x0020 /* etherenet frame type */ +#define SG_R0_TNV 0x0008 /* address translation not valid */ +#define SG_R0_DRB 0x0004 /* saw some dribbling bits */ +#define SG_R0_CRC 0x0002 /* CRC error */ +#define SG_R0_FFO 0x0001 /* fifo overflow */ +#define SG_R1_CAD 0x80 /* chain address */ +#define SG_R1_VAD 0x40 /* virtual address */ +#define SG_R1_VPA 0x20 /* virtual/physical PTE address */ + +/* Transmit descriptor */ +struct sgec_tx_desc { + unsigned short word0; /* descriptor word 0 flags */ + unsigned short tdr; /* TDR count of cable fault */ + unsigned char rsvd1[2]; /* unused bytes */ + unsigned short word1; /* descriptor word 1 flags */ + short pageoffset; /* offset of buffer in page */ + short bufsize; /* length of data buffer */ + unsigned char *bufaddr; /* address of data buffer */ +}; + +/* Receive descriptor bits */ +#define SG_TDR_OWN 0x8000 /* SGEC owns this descriptor */ +#define SG_TD0_ES 0x8000 /* an error has occurred */ +#define SG_TD0_TO 0x4000 /* transmit watchdog timeout */ +#define SG_TD0_LE 0x1000 /* length error */ +#define SG_TD0_LO 0x0800 /* loss of carrier */ +#define SG_TD0_NC 0x0400 /* no carrier */ +#define SG_TD0_LC 0x0200 /* late collision */ +#define SG_TD0_EC 0x0100 /* excessive collisions */ +#define SG_TD0_HF 0x0080 /* heartbeat fail */ +#define SG_TD0_CC 0x0078 /* collision count mask */ +#define SG_TD0_TN 0x0004 /* address translation invalid */ +#define SG_TD0_UF 0x0002 /* underflow */ +#define SG_TD0_DE 0x0001 /* transmission deferred */ +#define SG_TD1_CA 0x8000 /* chain address */ +#define SG_TD1_VA 0x4000 /* virtual address */ +#define SG_TD1_DT 0x3000 /* data type: */ +#define SG_TD1_DT_NORM 0x0000 /* normal transmit frame */ +#define SG_TD1_DT_SETUP 0x2000 /* setup frame */ +#define SG_TD1_DT_DIAG 0x3000 /* diagnostic frame */ +#define SG_TD1_AC 0x0800 /* CRC disable */ +#define SG_TD1_FS 0x0400 /* first segment */ +#define SG_TD1_LS 0x0200 /* last segment */ +#define SG_TD1_POK 0x0600 /* packet OK to send - first and last segment set */ +#define SG_TD1_IC 0x0100 /* interrupt on completion */ +#define SG_TD1_VT 0x0080 /* virtual(1)/phys PTE address */ + +/* + * Adresses. + */ +#define NISA_ROM ( \ + { \ + unsigned long __addr; \ + if (is_ka49 ()) \ + /* VS 4000m90 */ \ + __addr = 0x27800000; \ + else \ + /* QBUS 3100/85 */ \ + __addr = 0x20084000; \ + \ + __addr; \ + }) + +/* + * Register offsets + */ +#define SG_CSR0 0 +#define SG_CSR1 4 +#define SG_CSR2 8 +#define SG_CSR3 12 +#define SG_CSR4 16 +#define SG_CSR5 20 +#define SG_CSR6 24 +#define SG_CSR7 28 +#define SG_CSR8 32 +#define SG_CSR9 36 +#define SG_CSR10 40 +#define SG_CSR11 44 +#define SG_CSR12 48 +#define SG_CSR13 52 +#define SG_CSR14 56 +#define SG_CSR15 60 + +/* must be an even number of receive/transmit descriptors */ +#define RXDESCS 30 /* no of receive descriptors */ +#define TXDESCS 60 /* no of transmit descriptors */ + +#define TX_RING_SIZE 60 +#define TX_RING_MOD_MASK 59 + +#define RX_RING_SIZE 30 +#define RX_RING_MOD_MASK 29 + +#define PKT_BUF_SZ 1536 +#define RX_BUFF_SIZE PKT_BUF_SZ +#define TX_BUFF_SIZE PKT_BUF_SZ + + +/* First part of the SGEC initialization block, described in databook. */ +struct sgec_init_block { + unsigned short mode; /* Pre-set mode (reg. 15) */ + + unsigned char phys_addr[6]; /* Physical ethernet address + * only 0, 1, 4, 5, 8, 9 are valid + * 2, 3, 6, 7, 10, 11 are gaps + */ + unsigned short filter[4]; /* Multicast filter. + * only 0, 2, 4, 6 are valid + * 1, 3, 5, 7 are gaps + */ + + /* Receive and transmit ring base, along with extra bits. */ + unsigned short rx_ptr; /* receive descriptor addr */ + unsigned short rx_len; /* receive len and high addr */ + unsigned short tx_ptr; /* transmit descriptor addr */ + unsigned short tx_len; /* transmit len and high addr */ + short gap0[4]; + + /* The buffer descriptors */ + struct sgec_rx_desc brx_ring[RX_RING_SIZE]; + struct sgec_tx_desc btx_ring[TX_RING_SIZE]; +}; + + +#define BUF_OFFSET_CPU (offsetof(struct sgec_shared_mem, rx_buf)) +#define BUF_OFFSET_LNC BUF_OFFSET_CPU + + +/* This is how our shared memory block is layed out */ + +struct sgec_shared_mem { + struct sgec_init_block init_block; /* Includes RX/TX descriptors */ + char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; + char tx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; +}; + +struct sgec_private { + char *name; + /* location of registers */ + volatile struct sgec_regs *ll; + /* virtual addr of shared memory block */ + volatile struct sgec_shared_mem *sgec_mem; + /* virtual addr of block inside shared mem block */ + volatile struct sgec_init_block *init_block; + unsigned char vsbus_int; + spinlock_t lock; + int rx_new, tx_new; + int rx_old, tx_old; + struct net_device_stats stats; + unsigned short busmaster_regval; + struct net_device *dev; /* Backpointer */ + struct sgec_private *next_module; + struct timer_list multicast_timer; +}; + +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ + lp->tx_old - lp->tx_new-1) + + +static inline void writereg(volatile unsigned short *regptr, unsigned long value) +{ + *regptr = value; +} + +static inline void writecsr6(volatile struct sgec_regs *ll, unsigned long value) +{ + /* was &ll->sg_nicsr6 */ + writereg((volatile unsigned short *)&ll->sg_nicsr6, value); +} + +static inline void sgec_stop(volatile struct sgec_regs *ll) +{ + writecsr6(ll, ll->sg_nicsr6 & ~SG_NICSR6_ST); /* stop transmission */ + writecsr6(ll, ll->sg_nicsr6 & ~SG_NICSR6_SR); /* stop receiving */ + udelay(100); +} + +/* Load the CSR registers */ +static void load_csrs(struct sgec_private *lp) +{ + volatile struct sgec_regs *ll = lp->ll; + // unsigned long leptr; + + writecsr6(ll, SG_NICSR6_IE|SG_NICSR6_BL_8|SG_NICSR6_ST|SG_NICSR6_SR|SG_NICSR6_DC); +} + +static inline void cp_to_buf(void *to, const void *from, __kernel_size_t len) +{ + memcpy(to, from, len); +} + +static inline void cp_from_buf(void *to, unsigned char *from, int len) +{ + memcpy(to, from, len); +} + +static void sgec_init_ring(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_init_block *ib = lp->init_block; + unsigned long leptr; + int i; + + /* Lock out other processes while setting up hardware */ + + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the sgec init block. + * XXX bit 0 of the physical address registers has to be zero + */ + ib->phys_addr[0] = dev->dev_addr[0]; + ib->phys_addr[1] = dev->dev_addr[1]; + ib->phys_addr[2] = dev->dev_addr[2]; + ib->phys_addr[3] = dev->dev_addr[3]; + ib->phys_addr[4] = dev->dev_addr[4]; + ib->phys_addr[5] = dev->dev_addr[5]; + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + + /* Calculate the physical address of the first receive descriptor */ + leptr = virt_to_phys(ib->brx_ring); + /* ib->rx_len = (SGEC_LOG_RX_BUFFERS << 13) | (leptr >> 16); + ib->rx_ptr = leptr; */ + +#ifdef VAX_SGEC_DEBUG + // printk("RX ptr: %8.8lx(%8.8x)\n", leptr, ib->brx_ring); +#endif + /* Setup tx descriptor pointer */ + + /* Calculate the physical address of the first transmit descriptor */ + leptr = virt_to_phys(ib->btx_ring); + /* ib->tx_len = (SGEC_LOG_TX_BUFFERS << 13) | (leptr >> 16); + ib->tx_ptr = leptr; */ + +#ifdef VAX_SGEC_DEBUG + //printk("TX ptr: %8.8lx(%8.8x)\n", leptr, ib->btx_ring); + + printk("TX rings:\n"); +#endif + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = virt_to_phys(lp->sgec_mem->tx_buf[i]) & 0xffffff; + // ib->btx_ring[i].framelen = SG_FR_OWN; +#ifdef VAX_SGEC_DEBUG + if (i < 3) + printk("%d: 0x%8.8lx(0x%8.8x)\n", i, leptr, (int) lp->sgec_mem->tx_buf[i]); +#endif + } + + /* Setup the Rx ring entries */ +#ifdef VAX_SGEC_DEBUG + printk("RX rings:\n"); +#endif + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = virt_to_phys(lp->sgec_mem->rx_buf[i]) & 0xffffff; +#ifdef VAX_SGEC_DEBUG + if (i < 3) + printk("%d: 0x%8.8lx(0x%8.8x)\n", i, leptr, (int) lp->sgec_mem->rx_buf[i]); +#endif + } +} + +static int init_restart_sgec(struct sgec_private *lp) +{ + volatile struct sgec_regs *ll = lp->ll; + int i; + int reg; + + udelay(100); + + reg = ll->sg_nicsr6; + writecsr6(ll, SG_NICSR6_RE); /* reset card */ + + /* Wait for the sgec to complete initialization */ + for (i = 0; (i < 100) && !(ll->sg_nicsr5 & SG_NICSR5_ID); i++) { +#ifdef VAX_SGEC_DEBUG + printk("SGEC opened maybe %d\n", i); +#endif + udelay(10); + } + if ((i == 100) || (ll->sg_nicsr5 & SG_NICSR5_SF)) { +#ifdef VAX_SGEC_DEBUG + // printk("SGEC unopened after %d ticks, csr0=%4.4x.\n", i, ll->sg_nicsr5); +#endif + return -1; + } + if ((ll->sg_nicsr5 & SG_NICSR5_SF)) { +#ifdef VAX_SGEC_DEBUG + // printk("SGEC unopened after %d ticks, csr0=%4.4x.\n", i, ll->sg_nicsr5); +#endif + return -1; + } +#ifdef VAX_SGEC_DEBUG + printk("SGEC opened maybe\n"); +#endif + + writecsr6(ll, SG_NICSR6_IE | SG_NICSR6_BL_8|SG_NICSR6_ST|SG_NICSR6_SR|SG_NICSR6_DC); + return 0; +} + + +static int sgec_rx(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *)dev->priv; + volatile struct sgec_init_block *ib = lp->init_block; + volatile struct sgec_rx_desc *rd = 0; + unsigned char bits; + int len = 0; + struct sk_buff *skb = 0; + +#ifdef SGEC_DEBUG_BUFFERS + int i; + + printk("["); + for (i=0; i < RX_RING_SIZE; i==){ + if (i == lp->rx_new) + printk("%s",ib->brx_ring[i].framelen & SG_FR_OWN ? "_" : "X"); + else + printk("%s",ib->brx_ring[i].framelen & SG_FR_OWN ? "." : "1"); + } + printk("]"); +#endif + + for (rd=&ib->brx_ring[lp->rx_new]; + !((bits = rd->framelen) & SG_FR_OWN); + rd=&ib->brx_ring[lp->rx_new]){ + + /* + * check for incomplete frame + if ((bits & SG_R0_POK) != SG_R0_POK) { + lp->stats.rx_over_errors ++; + lp->stats_rx_errors++; + } + else if (bits & SG_R0_ERR) { + * only count last frame as the error + if (bits & SG_R0_BUF) lp->stats.rx_fifo_errors++; + if (bits & SG_R0_CRC) lp->stats.rx_crc_errors++; + if (bits & SG_R0_OFL) lp->stats.rx_over_errors++; + if (bits & SG_R0_FRA) lp->stats.rx_frame_errors++; + if (bits & SG_R0_EOP) lp->stats.rx_errors++; + } else { */ + len = rd->framelen; + skb = dev_alloc_skb((unsigned int)len + 2); + if (skb == 0) { + printk("%s: SGEC Deferring Packet\n", dev->name); + lp->stats.rx_dropped++; + //rd->mblength = 0; + rd->framelen = SG_FR_OWN; + lp->rx_new =(lp->rx_new+1) & RX_RING_MOD_MASK; + return 0; + /* } */ + lp->stats.rx_bytes += len; + skb->dev = dev; + skb_reserve(skb,2); /*16 byte align */ + + skb_put(skb, len); /* make room */ + cp_from_buf(skb->data, + (char *) lp->sgec_mem->rx_buf[lp->rx_new], + len); + skb->protocol = eth_type_trans(skb,dev); + netif_rx(skb); + dev->last_rx=jiffies; + lp->stats.rx_packets++; + } + // rd->mblength=0; + rd->framelen = len; + rd->framelen &= SG_FR_OWN; + lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK; + } + return 0; +} + +static void sgec_tx(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_init_block *ib = lp->init_block; + volatile struct sgec_regs *ll = lp->ll; + volatile struct sgec_tx_desc *td; + int i, j; + j = lp->tx_old; + + spin_lock(&lp->lock); + + for (i = j; i != lp->tx_new; i = j) { + td = &ib->btx_ring[i]; + /* If we hit a packet not owned by us, stop */ + if (td->word1 & SG_FR_OWN) break; + /* if (td->tmd1_bits & SG_T0_ERR) { + status = td->misc; + lp->stats.tx_errors++; + if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + if (status & LE_T3_CLOS) { + lp->stats.tx_carrier_errors++; + printk("%s: Carrier Lost", dev->name); + sgec_stop(ll); + sgec_init_ring(dev); + load_csrs(lp); + init_restart_sgec(lp); + goto out; + } + */ + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + /* if (status & (LE_T3_BUF | LE_T3_UFL)) { + lp->stats.tx_fifo_errors++; + printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); + sgec_stop(ll); + sgec_init_ring(dev); + load_csrs(lp); + init_restart_sgec(lp); + goto out; + } + } else + */ + if ((td->word1 & SG_TD1_POK) == SG_TD1_POK) { + /* + * So we don't count the packet more than once. + */ + td->word1 &= ~(SG_TD1_POK); + + /* * One collision before packet was sent. + if (td->word1 & SG_T1_EONE) + lp->stats.collisions++; + + * More than one collision, be optimistic. + if (td->tmd1_bits & LE_T1_EMORE) + lp->stats.collisions += 2; + */ + lp->stats.tx_packets++; + } + j = (j + 1) & TX_RING_MOD_MASK; + } + lp->tx_old = j; + out: + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); +} + +static irqreturn_t sgec_interrupt(const int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_regs *ll = lp->ll; + unsigned long csr5; + + csr5 = ll->sg_nicsr5; + + if ((csr5 & SG_NICSR5_IS) == 0) { + /* Hmmm, not for us... */ + return IRQ_NONE; + } + writereg(&ll->sg_nicsr5, csr5); /* reset interrupt */ + + /* if ((csr0 & LE_C0_ERR)) { + * Clear the error condition + writecsr0(ll, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | + LE_C0_CERR | LE_C0_MERR); + }*/ + if (csr5 & SG_NICSR5_RI) sgec_rx(dev); + if (csr5 & SG_NICSR5_TI) sgec_tx(dev); +/* + if (csr0 & LE_C0_BABL) + lp->stats.tx_errors++; + + if (csr0 & LE_C0_MISS) + lp->stats.rx_errors++; + + if (csr0 & LE_C0_MERR) { + printk("%s: Memory error, status %04x", dev->name, csr0); + + sgec_stop(ll); + + sgec_init_ring(dev); + load_csrs(lp); + init_restart_sgec(lp); + netif_wake_queue(dev); + } +*/ + + return IRQ_HANDLED; +} + +extern struct net_device *last_dev; + +static int sgec_open(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_init_block *ib = lp->init_block; + volatile struct sgec_regs *ll = lp->ll; + + last_dev = dev; + + /* Associate IRQ with sgec_interrupt */ + if (0){ + if (vsbus_request_irq (lp->vsbus_int, &sgec_interrupt, 0, lp->name, dev)) { + printk("SGEC: Can't get irq %d\n", dev->irq); + return -EAGAIN; + } + } else { + printk (KERN_ERR "Ignoring interrupt for now...\n"); + } + + sgec_stop(ll); + + /* Clear the multicast filter */ + ib->mode=0; + ib->filter[0] = 0; + ib->filter[1] = 0; + ib->filter[2] = 0; + ib->filter[3] = 0; + + sgec_init_ring(dev); + load_csrs(lp); + + netif_start_queue(dev); + + return init_restart_sgec(lp); +} + +static int sgec_close(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_regs *ll = lp->ll; + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + sgec_stop(ll); + + free_irq(dev->irq, (void *) dev); + /* + MOD_DEC_USE_COUNT; + */ + return 0; +} + +static inline int sgec_reset(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_regs *ll = lp->ll; + int status; + + sgec_stop(ll); + + sgec_init_ring(dev); + load_csrs(lp); + dev->trans_start = jiffies; + status = init_restart_sgec(lp); +#ifdef VAX_SGEC_DEBUG + printk("SGEC restart=%d\n", status); +#endif + return status; +} + +static void sgec_tx_timeout(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_regs *ll = lp->ll; + + printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", + dev->name, ll->sg_nicsr6); + sgec_reset(dev); + netif_wake_queue(dev); +} + +static int sgec_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_regs *ll = lp->ll; + volatile struct sgec_init_block *ib = lp->init_block; + int entry, skblen, len; + + skblen = skb->len; + + len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; + + spin_lock_irq(&lp->lock); + + lp->stats.tx_bytes += len; + + entry = lp->tx_new & TX_RING_MOD_MASK; + ib->btx_ring[entry].word1 = len; + // ib->btx_ring[entry].misc = 0; + + cp_to_buf((char *) lp->sgec_mem->tx_buf[entry], skb->data, skblen); + + /* Clear the slack of the packet, do I need this? */ + /* For a firewall its a good idea - AC */ +/* + if (len != skblen) + memset ((char *) &ib->tx_buf [entry][skblen], 0, (len - skblen) << 1); + */ + /* Now, give the packet to the card */ + ib->btx_ring[entry].word1 = SG_FR_OWN; /* (LE_T1_POK | LE_T1_OWN);*/ + lp->tx_new = (lp->tx_new + 1) & TX_RING_MOD_MASK; + + if (TX_BUFFS_AVAIL <= 0) + netif_stop_queue(dev); + + /* Kick the SGEC: transmit now */ + writereg(ll->sg_nicsr5, SG_NICSR5_TI); + + spin_unlock_irq(&lp->lock); + + dev->trans_start = jiffies; + dev_kfree_skb(skb); + + return 0; +} + +static struct net_device_stats *sgec_get_stats(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + + return &lp->stats; +} + +static void sgec_load_multicast(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_init_block *ib = lp->init_block; + volatile u16 *mcast_table = (u16 *)&ib->filter; + struct dev_mc_list *dmi = dev->mc_list; + char *addrs; + int i, j, bit, byte; + u32 crc, poly = CRC_POLYNOMIAL_LE; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = 0xffff; + ib->filter[1] = 0xffff; + ib->filter[2] = 0xffff; + ib->filter[3] = 0xffff; + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + ib->filter[2] = 0; + ib->filter[3] = 0; + + /* Add addresses */ + for (i = 0; i < dev->mc_count; i++) { + addrs = dmi->dmi_addr; + dmi = dmi->next; + + /* multicast address? */ + if (!(*addrs & 1)) + continue; + + crc = 0xffffffff; + for (byte = 0; byte < 6; byte++) + for (bit = *addrs++, j = 0; j < 8; j++, bit >>= 1) { + int test; + + test = ((bit ^ crc) & 0x01); + crc >>= 1; + + if (test) { + crc = crc ^ poly; + } + } + + crc = crc >> 26; + mcast_table[crc >> 4] |= 1 << (crc & 0xf); + } + return; +} + +static void sgec_set_multicast(struct net_device *dev) +{ + struct sgec_private *lp = (struct sgec_private *) dev->priv; + volatile struct sgec_init_block *ib = lp->init_block; + volatile struct sgec_regs *ll = lp->ll; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + sgec_stop(ll); + + sgec_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + ib->mode |= SG_NICSR6_AF_PROM; + } else { + ib->mode &= ~SG_NICSR6_AF_PROM; + sgec_load_multicast(dev); + } + load_csrs(lp); + init_restart_sgec(lp); + netif_wake_queue(dev); +} + +static void sgec_set_multicast_retry(unsigned long _opaque) +{ + struct net_device *dev = (struct net_device *) _opaque; + + sgec_set_multicast(dev); +} + +static int __init vax_sgec_init(struct net_device *dev, + struct vsbus_device *vsbus_dev) +{ + static unsigned version_printed = 0; + struct sgec_private *lp; + volatile struct sgec_regs *ll; + int i, ret; + volatile unsigned long __iomem *esar; + + /* Could these base addresses be different on other CPUs? */ + unsigned long sgec_phys_addr = vsbus_dev->phys_base; + unsigned long esar_phys_addr = NISA_ROM; + printk (KERN_INFO "esar_phys_addr = 0x%08x\n", esar_phys_addr); + + if (version_printed++ == 0) + printk(version); + + lp = (struct sgec_private *) dev->priv; + + spin_lock_init(&lp->lock); + + /* Need a block of 64KB */ + /* At present, until we figure out the address extension + * parity control bit, ask for memory in the DMA zone */ + dev->mem_start = __get_free_pages(GFP_DMA, 4); + if (!dev->mem_start) { + /* Shouldn't we free dev->priv here if dev was non-NULL on entry? */ + return -ENOMEM; + } + + dev->mem_end = dev->mem_start + 65536; + + dev->base_addr = (unsigned long) ioremap (sgec_phys_addr, 0x8); + dev->irq = vsbus_irqindex_to_irq (vsbus_dev->vsbus_irq); + + lp->sgec_mem = (volatile struct sgec_shared_mem *)(dev->mem_start); + lp->init_block = &(lp->sgec_mem->init_block); + + lp->vsbus_int = vsbus_dev->vsbus_irq; + + ll = (struct sgec_regs *) dev->base_addr; + + /* FIXME: deal with failure here */ + esar = ioremap (esar_phys_addr, 0x80); + + /* 3rd byte contains address part in 3100/85 -RB */ + /* Note that 660 board types use a different position */ + /* Copy the ethernet address to the device structure, later to the + * sgec initialization block so the card gets it every time it's + * (re)initialized. + */ + printk("Ethernet address in ROM: "); + for (i = 0; i < 6; i++) { +#if 0 /* Not yet */ + if (is_ka670 ()) + dev->dev_addr[i] = (esar[i] & 0xff00) >> 8; + else +#endif + dev->dev_addr[i] = esar[i] & 0xff; + printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + } + + /* Don't need this any more */ + iounmap (esar); + + printk (KERN_INFO "Using SGEC interrupt vector %d, vsbus irq %d\n", + dev->irq, lp->vsbus_int); + + dev->open = &sgec_open; + dev->stop = &sgec_close; + dev->hard_start_xmit = &sgec_start_xmit; + dev->tx_timeout = &sgec_tx_timeout; + dev->watchdog_timeo = 5*HZ; + dev->get_stats = &sgec_get_stats; + dev->set_multicast_list = &sgec_set_multicast; + dev->dma = 0; + + /* lp->ll is the location of the registers for card */ + lp->ll = ll; + + lp->name = sgecstr; + + /* busmaster_regval (CSR3) should be zero according to the PMAD-AA + * specification. + */ + lp->busmaster_regval = 0; + lp->dev = dev; + + ether_setup(dev); + + /* We cannot sleep if the chip is busy during a + * multicast list update event, because such events + * can occur from interrupts (ex. IPv6). So we + * use a timer to try again later when necessary. -DaveM + */ + init_timer(&lp->multicast_timer); + lp->multicast_timer.data = (unsigned long) dev; + lp->multicast_timer.function = &sgec_set_multicast_retry; + + SET_NETDEV_DEV(dev, &vsbus_dev->dev); + + return 0; + +err_out: + unregister_netdev(dev); + kfree(dev); + return ret; +} + + +/* Find all the SGEC cards on the system and initialize them */ +static int __init vax_sgec_probe (struct vsbus_device *vsbus_dev) +{ + struct net_device *netdev; + int retval; + + printk("vax_sgec_probe: name = %s, base = 0x%08x, irqindex = %d\n", + vsbus_dev->dev.bus_id, vsbus_dev->phys_base, vsbus_dev->vsbus_irq); + + netdev = alloc_etherdev (sizeof (struct sgec_private)); + if (!netdev) + return -ENOMEM; + + retval = vax_sgec_init (netdev, vsbus_dev); + if (retval == 0) { + retval = register_netdev (netdev); + if (retval) + free_netdev (netdev); + } + + return 0; +} + +static struct vsbus_driver vax_sgec_driver = { + .probe = vax_sgec_probe, + .drv = { + .name = "sgec", + }, +}; + +int __init sgec_init_module (void) +{ + return vsbus_register_driver (&vax_sgec_driver); +} + +void __exit sgec_exit_module (void) +{ + printk (KERN_ERR "vax_sgec_exit: What to do???\n"); +} + +module_init (sgec_init_module); +module_exit (sgec_exit_module); + diff -Nru a/drivers/vax/scsi/Makefile b/drivers/vax/scsi/Makefile --- a/drivers/vax/scsi/Makefile 1970-01-01 01:00:00 +++ b/drivers/vax/scsi/Makefile 2004-08-09 01:26:24 @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/VAX serial UART drivers. +# + +obj-$(CONFIG_SCSI_VAX_5380) += vax-5380.o + diff -Nru a/drivers/vax/scsi/vax-5380.c b/drivers/vax/scsi/vax-5380.c --- a/drivers/vax/scsi/vax-5380.c 1970-01-01 01:00:00 +++ b/drivers/vax/scsi/vax-5380.c 2005-04-25 11:37:12 @@ -0,0 +1,165 @@ +/* + * Driver for NCR5380 SCSI controller on KA42 and KA43 CPU boards. + * + * Copyright 2000, 2004 Kenn Humborg + * + * Based on ARM SCSI drivers by Russell King + */ + +#include +#include +#include +#include + +#include + +#include "../../scsi/scsi.h" +#include + +/* See NCR5380.c for the options that can be set */ +#define AUTOSENSE + +#define NCR5380_implementation_fields \ + unsigned volatile char *base + +#define NCR5380_local_declare() \ + unsigned volatile char *base + +#define NCR5380_setup(instance) \ + base = (unsigned volatile char *)((instance)->base) + +#define VAX_5380_address(reg) (base + ((reg) * 0x04)) + +#if !(VDEBUG & VDEBUG_TRANSFER) +#define NCR5380_read(reg) (*(VAX_5380_address(reg))) +#define NCR5380_write(reg, value) (*(VAX_5380_address(reg)) = (value)) +#else +#define NCR5380_read(reg) \ + (((unsigned char) printk("scsi%d : read register %d at address %08x\n"\ + , instance->hostno, (reg), VAX_5380_address(reg))), *(VAX_5380_address(reg))) + +#define NCR5380_write(reg, value) { \ + printk("scsi%d : write %02x to register %d at address %08x\n", \ + instance->hostno, (value), (reg), VAX_5380_address(reg)); \ + *(VAX_5380_address(reg)) = (value); \ +} +#endif + + +#include "../../scsi/NCR5380.h" +#include "../../scsi/NCR5380.c" + + +const char *vax_5380_info (struct Scsi_Host *spnt) +{ + return ""; +} + +static Scsi_Host_Template vax_5380_template = { + .name = "VAXstation 3100/MicroVAX 3100 NCR5380 SCSI", + .info = vax_5380_info, + .queuecommand = NCR5380_queue_command, + .eh_abort_handler = NCR5380_abort, + .eh_bus_reset_handler = NCR5380_bus_reset, + .eh_device_reset_handler = NCR5380_device_reset, + .eh_host_reset_handler = NCR5380_host_reset, + .can_queue = 32, + .this_id = 6, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .use_clustering = DISABLE_CLUSTERING, + .proc_name = "vax-5380", + .proc_info = NCR5380_proc_info, +}; + +static int __init +vax_5380_probe(struct vsbus_device *vsbus_dev) +{ + struct Scsi_Host *host; + int retval = -ENOMEM; + + printk("vax_5380_probe: name = %s, base = 0x%08x, irqindex = %d\n", + vsbus_dev->dev.bus_id, vsbus_dev->phys_base, vsbus_dev->vsbus_irq); + + host = scsi_host_alloc(&vax_5380_template, sizeof(struct NCR5380_hostdata)); + if (!host) + goto out; + + host->base = (unsigned long) ioremap(vsbus_dev->phys_base, 0x80); + if (!host->base) + goto out_unreg; + + NCR5380_init(host, 0); + + host->irq = vsbus_dev->vsbus_irq; + + retval = vsbus_request_irq(host->irq, NCR5380_intr, SA_INTERRUPT, "vax-5380", host); + if (retval) { + printk("scsi%d: IRQ%d not free: %d\n", + host->host_no, host->irq, retval); + goto out_iounmap; + } + + printk("scsi%d: at virt 0x%08lx VSBUS irq %d", + host->host_no, host->base, host->irq); + printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d", + host->can_queue, host->cmd_per_lun); + printk("\nscsi%d:", host->host_no); + NCR5380_print_options(host); + printk("\n"); + + retval = scsi_add_host(host, &vsbus_dev->dev); + if (retval) + goto out_free_irq; + + scsi_scan_host(host); + goto out; + +out_free_irq: + vsbus_free_irq(host->irq); +out_iounmap: + iounmap((void *)host->base); +out_unreg: + scsi_host_put(host); +out: + return retval; +} + +static void __devexit vax_5380_remove(struct vsbus_device *vsbus_dev) +{ + struct Scsi_Host *host = dev_get_drvdata(&vsbus_dev->dev); + + scsi_remove_host(host); + + vsbus_free_irq(host->irq); + NCR5380_exit(host); + iounmap((void *)host->base); + + scsi_host_put(host); +} + +static struct vsbus_driver vax_5380_driver = { + .probe = vax_5380_probe, + .remove = __devexit_p(vax_5380_remove), + .drv = { + .name = "vax-5380", + }, +}; + +static int __init vax_5380_init(void) +{ + return vsbus_register_driver(&vax_5380_driver); +} + +static void __exit vax_5380_exit(void) +{ + vsbus_unregister_driver(&vax_5380_driver); +} + +module_init(vax_5380_init); +module_exit(vax_5380_exit); + +MODULE_AUTHOR("Kenn Humborg"); +MODULE_DESCRIPTION("VAX NCR5380 SCSI driver for KA42,KA43"); +MODULE_LICENSE("GPL"); + diff -Nru a/drivers/vax/serial/Makefile b/drivers/vax/serial/Makefile --- a/drivers/vax/serial/Makefile 1970-01-01 01:00:00 +++ b/drivers/vax/serial/Makefile 2003-10-13 02:34:45 @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/VAX serial UART drivers. +# + +obj-$(CONFIG_SERIAL_IPR) += ipr.o + diff -Nru a/drivers/vax/serial/ipr.c b/drivers/vax/serial/ipr.c --- a/drivers/vax/serial/ipr.c 1970-01-01 01:00:00 +++ b/drivers/vax/serial/ipr.c 2005-05-09 00:56:13 @@ -0,0 +1,483 @@ +/* + * UART driver for the internal console port in most VAX CPUs + * + * Most VAX CPU implementations have a serial console port which + * can be driven via 4 internal processor registers (IPRs), without + * having to care about the underlying hardware implementation. + * The are very simple devices, without modem control and no variable + * baud rates or character formats. + * + * This driver is derived from the ARM AMBA serial driver by + * + * + * BTW - this driver doesn't actually work yet. I don't get any + * boot-time output in SIMH - KPH 2003-10-13 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* FIXME: this should go into serial_core.h */ +#define PORT_VAX_IPR 100 + +/* Need to think about major/minor numbers for this driver */ +#define SERIAL_VAX_IPR_MAJOR 4 +#define SERIAL_VAX_IPR_MINOR 64 + +/* Register definitions */ + +#define PR_RXCS_RDY 0x0080 +#define PR_RXCS_IE 0x0040 + +#define PR_RXDB_ERROR 0x8000 +#define PR_RXDB_ID 0x0f00 +#define PR_RXDB_DATA 0x00ff + +#define PR_TXCS_RDY 0x0080 +#define PR_TXCS_IE 0x0040 + +#define PR_TXDB_ID 0x0f00 +#define PR_TXDB_DATA 0x00ff + +/* These vectors are defined by the VAX Architecture Reference Manual */ +#define IPRCONS_RX_VECTOR 0x3e +#define IPRCONS_TX_VECTOR 0x3f + +/******************************************************************* + * + * First we have the hardware handling code + * + */ + +static inline void iprcons_enable_rx_interrupts(void) +{ + __mtpr(PR_RXCS_IE, PR_RXCS); +} + +static inline void iprcons_disable_rx_interrupts(void) +{ + __mtpr(0, PR_RXCS); +} + +static inline void iprcons_enable_tx_interrupts(void) +{ + __mtpr(PR_TXCS_IE, PR_TXCS); +} + +static inline void iprcons_disable_tx_interrupts(void) +{ + __mtpr(0, PR_TXCS); +} + +static inline void iprcons_rx_char(struct uart_port *port, + unsigned int rxcs, unsigned int rxdb) +{ + struct tty_struct *tty = port->info->tty; + unsigned char ch = rxdb & PR_RXDB_DATA; + + if (tty->flip.count >= TTY_FLIPBUF_SIZE) { + tty->flip.work.func((void *)tty); + if (tty->flip.count >= TTY_FLIPBUF_SIZE) { + printk(KERN_WARNING "iprcons_rx_char: TTY_DONT_FLIP set\n"); + return; + } + } + + *tty->flip.char_buf_ptr = ch; + *tty->flip.flag_buf_ptr = TTY_NORMAL; + port->icount.rx++; + + /* FIXME: properly record receive errors signalled in RXCS */ + + if (port->ignore_status_mask == 0) { + tty->flip.flag_buf_ptr++; + tty->flip.char_buf_ptr++; + tty->flip.count++; + } + + tty_flip_buffer_push(tty); +} + +static irqreturn_t iprcons_rx_interrupt(int irq, void *dev, struct pt_regs *regs) +{ + unsigned int rxcs = __mfpr(PR_RXCS); + unsigned int rxdb = __mfpr(PR_RXDB); + struct uart_port *port = dev; + + if (rxcs & PR_RXCS_RDY) { + iprcons_rx_char(port, rxcs, rxdb); + } + + return IRQ_HANDLED; +} + +static inline void iprcons_tx_char(unsigned int c) +{ + __mtpr(c, PR_TXDB); +} + +static inline void iprcons_tx_ready(struct uart_port *port) +{ + struct circ_buf *xmit = &port->info->xmit; + + if (port->x_char) { + iprcons_tx_char(port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + return; + } + + iprcons_tx_char(xmit->buf[xmit->tail]); + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static irqreturn_t iprcons_tx_interrupt(int irq, void *dev, struct pt_regs *regs) +{ + unsigned int txcs = __mfpr(PR_TXCS); + struct uart_port *port = dev; + + if (txcs & PR_TXCS_RDY) { + iprcons_tx_ready(port); + } + + return IRQ_HANDLED; +} + + +/******************************************************************* + * + * Next comes the plumbing to hook us into the serial core + * + */ + +static void iprcons_stop_tx(struct uart_port *port, unsigned int tty_stop) +{ + /* Nothing to do - our "FIFO" is only 1 character deep */ +} + +static void iprcons_start_tx(struct uart_port *port, unsigned int tty_start) +{ + iprcons_disable_tx_interrupts(); + iprcons_enable_tx_interrupts(); +} + +static void iprcons_stop_rx(struct uart_port *port) +{ + /* Nothing to do - our "FIFO" is only 1 character deep */ +} + +static void iprcons_enable_ms(struct uart_port *port) +{ + /* Nothing to do - no modem control lines */ +} + +static unsigned int iprcons_tx_empty(struct uart_port *port) +{ + if (__mfpr(PR_TXCS) & PR_TXCS_RDY) { + return TIOCSER_TEMT; + } else { + return 0; + } +} + +static unsigned int iprcons_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR; +} + +static void iprcons_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* Nothing to do - no modem control lines */ +} + +static void iprcons_break_ctl(struct uart_port *port, int break_state) +{ + /* Cannot generate BREAK */ +} + +static int iprcons_startup(struct uart_port *port) +{ + unsigned int retval; + + retval = request_irq(IPRCONS_TX_VECTOR, iprcons_tx_interrupt, 0, "iprcons-tx", port); + if (retval) { + printk("iprcons: unable to acquire TX interrupt vector\n"); + } else { + retval = request_irq(IPRCONS_RX_VECTOR, iprcons_rx_interrupt, 0, "iprcons-rx", port); + if (retval) { + free_irq(IPRCONS_TX_VECTOR, port); + printk("iprcons: unable to acquire RX interrupt vector\n"); + } + } + + if (!retval) { + iprcons_enable_rx_interrupts(); + iprcons_enable_tx_interrupts(); + } + + return retval; +} + + +static void iprcons_shutdown(struct uart_port *port) +{ + iprcons_disable_rx_interrupts(); + iprcons_disable_tx_interrupts(); + + free_irq(IPRCONS_RX_VECTOR, port); + free_irq(IPRCONS_TX_VECTOR, port); +} + +static void iprcons_set_termios(struct uart_port *port, struct termios *termios, + struct termios *old) +{ + /* This port is not software configurable. It is fixed in + hardware to 9600, 8 bits, no parity, one stop bit. + (Actually - not completely true. The KA650 console has a + physical rotary switch for selecting the baud rate. But + we'll ignore this for now. */ + + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, CS8, 9600); + + /* + * Ignore all characters if CREAD is not set. + */ + port->ignore_status_mask = 0; + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask = 1; + } + + spin_unlock_irqrestore(&port->lock, flags); +} + + +static const char *iprcons_type(struct uart_port *port) +{ + if (port->type == PORT_VAX_IPR) { + return "VAX CPU Console"; + } else { + return NULL; + } +} + +static void iprcons_release_port(struct uart_port *port) +{ + /* No memory or IO regions used */ +} + +static int iprcons_request_port(struct uart_port *port) +{ + /* No memory or IO regions used */ + return 0; +} + +/* + * Configure/autoconfigure the port. + */ +static void iprcons_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_VAX_IPR; + } +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). We don't let the + * user attempt to change IRQ or baud rate. + */ +static int iprcons_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_VAX_IPR) + ret = -EINVAL; + if (ser->irq != IPRCONS_RX_VECTOR) + ret = -EINVAL; + if (ser->baud_base != 9600) + ret = -EINVAL; + return ret; +} + +static struct uart_ops iprcons_pops = { + .tx_empty = iprcons_tx_empty, + .set_mctrl = iprcons_set_mctrl, + .get_mctrl = iprcons_get_mctrl, + .stop_tx = iprcons_stop_tx, + .start_tx = iprcons_start_tx, + .stop_rx = iprcons_stop_rx, + .enable_ms = iprcons_enable_ms, + .break_ctl = iprcons_break_ctl, + .startup = iprcons_startup, + .shutdown = iprcons_shutdown, + .set_termios = iprcons_set_termios, + .type = iprcons_type, + .release_port = iprcons_release_port, + .request_port = iprcons_request_port, + .config_port = iprcons_config_port, + .verify_port = iprcons_verify_port, +}; + +static struct uart_port iprcons_port = { + .membase = 0, + .mapbase = 0, + .iotype = SERIAL_IO_PORT, + .irq = IPRCONS_RX_VECTOR, + .uartclk = 0, + .fifosize = 1, + .ops = &iprcons_pops, + .flags = 0, + .line = 0, + .type = PORT_VAX_IPR, +}; + +static struct uart_driver iprcons_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "ttyS", + .dev_name = "ttyS", + .major = SERIAL_VAX_IPR_MAJOR, + .minor = SERIAL_VAX_IPR_MINOR, + .nr = 1, +}; + +#ifdef CONFIG_SERIAL_CONSOLE + +static void iprcons_console_write(struct console *co, const char *p, unsigned int count) +{ + unsigned int old_inten_rx; + unsigned int old_inten_tx; + + /* + * First save the interrupt enable flag, then disable interrupts + */ + + old_inten_rx = __mfpr(PR_RXCS) & PR_RXCS_IE; + old_inten_tx = __mfpr(PR_TXCS) & PR_TXCS_IE; + + iprcons_disable_rx_interrupts(); + iprcons_disable_tx_interrupts(); + + /* + * Now, do each character + */ + while (count--) { + + /* Ensure bits 31..8 are all 0 */ + unsigned int c = *p++; + + while ((__mfpr(PR_TXCS) & PR_TXCS_RDY) == 0) { + /* Busy wait */ + } + + __mtpr(c, PR_TXDB) + + if (c == '\n') { + while ((__mfpr(PR_TXCS) & PR_TXCS_RDY) == 0) { + /* Busy wait */ + } + __mtpr('\r', PR_TXDB) + } + } + + /* + * Finally, wait for transmitter to become empty + * and restore the interrupt enables + */ + while ((__mfpr(PR_TXCS) & PR_TXCS_RDY) == 0) { + /* Busy wait */ + } + + __mtpr(old_inten_rx, PR_RXCS); + __mtpr(old_inten_tx, PR_TXCS); +} + +static struct console iprcons_console = { + .name = "ttyS", + .write = iprcons_console_write, + .device = uart_console_device, + .index = -1, + .data = &iprcons_uart_driver, +}; + +static int __init iprcons_console_init(void) +{ + iprcons_uart_driver.cons = &iprcons_console; + register_console(&iprcons_console); + return 0; +} +console_initcall(iprcons_console_init); + +#endif /* CONFIG_SERIAL_CONSOLE */ + +static void __exit iprcons_exit(void) +{ + /* + * FIXME: this is probably very broken. How should + * we handled module removal with the driver model + * and the serial core involved? + */ + uart_remove_one_port(&iprcons_uart_driver, &iprcons_port); + uart_unregister_driver(&iprcons_uart_driver); +} + +static int __init iprcons_probe(struct device *busdev) +{ + int ret; + + printk(KERN_INFO "Serial: VAX IPR CPU console driver $Revision: 1.11 $\n"); + + /* + * We are a platform device. We'll only get probed if + * the per-cpu init code registers a platform device called + * 'iprcons'. So it's safe to go ahead and register the + * UART driver here without checking the presence of any + * hardware. + */ + + ret = uart_register_driver(&iprcons_uart_driver); + if (ret == 0) { + uart_add_one_port(&iprcons_uart_driver, &iprcons_port); + } + return ret; +} + +static struct device_driver iprcons_driver = { + .name = "iprcons", + .bus = &platform_bus_type, + .probe = iprcons_probe, +}; + +static int __init iprcons_init(void) +{ + return driver_register(&iprcons_driver); +} + +module_init(iprcons_init); +module_exit(iprcons_exit); + +MODULE_AUTHOR("Kenn Humborg "); +MODULE_DESCRIPTION("VAX IPR CPU Console Driver $Revision: 1.11 $"); +MODULE_LICENSE("GPL"); + diff -Nru a/drivers/video/Kconfig b/drivers/video/Kconfig --- a/drivers/video/Kconfig 2005-06-17 21:48:29 +++ b/drivers/video/Kconfig 2005-07-31 19:28:22 @@ -612,6 +612,14 @@ (). Please see the file . +config FB_VAXLCG + tristate "VAX LCG framebuffer support" + depends on FB && VAX + select FB_SOFT_CURSOR + help + This is a very dumb framebuffer driver for the LCG (Low Cost + Graphics) framebuffer found on VAXstation 4000/60 systems. + config FB_EPSON1355 bool "Epson 1355 framebuffer support" depends on (FB = y) && (SUPERH || ARCH_CEIVA) diff -Nru a/drivers/video/Makefile b/drivers/video/Makefile --- a/drivers/video/Makefile 2005-06-17 21:48:29 +++ b/drivers/video/Makefile 2005-07-24 23:36:48 @@ -92,6 +92,8 @@ obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o obj-$(CONFIG_FB_IMX) += imxfb.o +obj-$(CONFIG_FB_VAXLCG) += vaxlcgfb.o cfbcopyarea.o cfbfillrect.o cfbimgblt.o + # Platform or fallback drivers go here obj-$(CONFIG_FB_VESA) += vesafb.o obj-$(CONFIG_FB_VGA16) += vga16fb.o vgastate.o diff -Nru a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig --- a/drivers/video/console/Kconfig 2005-06-17 21:48:29 +++ b/drivers/video/console/Kconfig 2005-07-24 23:36:48 @@ -6,7 +6,7 @@ config VGA_CONSOLE bool "VGA text console" if EMBEDDED || !X86 - depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC32 && !SPARC64 && !M68K && !PARISC + depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC32 && !SPARC64 && !M68K && !VAX && !PARISC default y help Saying Y here will allow you to use Linux in text mode through a diff -Nru a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig --- a/drivers/video/logo/Kconfig 2005-06-17 21:48:29 +++ b/drivers/video/logo/Kconfig 2005-07-24 23:43:57 @@ -25,7 +25,7 @@ config LOGO_DEC_CLUT224 bool "224-color Digital Equipment Corporation Linux logo" - depends on LOGO && (MACH_DECSTATION || ALPHA) + depends on LOGO && (MACH_DECSTATION || ALPHA || VAX) default y config LOGO_MAC_CLUT224 diff -Nru a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c --- a/drivers/video/logo/logo.c 2005-06-17 21:48:29 +++ b/drivers/video/logo/logo.c 2005-07-31 18:23:48 @@ -60,15 +60,15 @@ logo = &logo_superh_vga16; #endif } - + if (depth >= 8) { #ifdef CONFIG_LOGO_LINUX_CLUT224 /* Generic Linux logo */ logo = &logo_linux_clut224; #endif #ifdef CONFIG_LOGO_DEC_CLUT224 - /* DEC Linux logo on MIPS/MIPS64 or ALPHA */ -#ifndef CONFIG_ALPHA + /* DEC Linux logo on MIPS/MIPS64, VAX or ALPHA */ +#ifdef CONFIG_MIPS if (mips_machgroup == MACH_GROUP_DEC) #endif logo = &logo_dec_clut224; diff -Nru a/drivers/video/vaxlcgfb.c b/drivers/video/vaxlcgfb.c --- a/drivers/video/vaxlcgfb.c 1970-01-01 01:00:00 +++ b/drivers/video/vaxlcgfb.c 2004-11-19 21:11:26 @@ -0,0 +1,678 @@ +/* + * ./linux/drivers/video/vaxlcgfb.c - Driver for VAX Low Cost Graphics Framebuffer + * + * Copyright (C) 2003,2004 by Jan-Benedict Glaw + * + * The initial skeleton was stolen from: + * pmagb-b-fb.c, (C) by Michael Engel , + * Karsten Merker , + * Harald Koerfgen. + * + * The sparse documentation that was available is partially misleading + * and wrong. Blaz Antonic (remove the underscores) + * actually figured out how to set the RAMDAC's colour lookup table. You need + * to program four things different to what documentation states, so many + * thanks to him for figuring out! + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Debugging cruft + */ +#define PROBE_DEBUG +#undef DEBUG_HELPERS + +#ifdef PROBE_DEBUG +#define PROBE_PRINTK(x...) printk(x) +#else +#define PROBE_PRINTK(x...) do {} while (0) +#endif + +/* + * Size constants + */ +#define BYTE 1 +#define KILOBYTE (1024 * BYTE) +#define MEGABYTE (1024 * KILOBYTE) + +/* + * Where is the LCG located? + */ +#define VAXLCGFB_BASE 0x21000000 + +/* + * Where is the LCG FB located? + */ +#define VAXLCGFB_FB_BASE 0x21801000 +/* Size depends on screen resolution */ + +#define VAXLCGFB_LUT_BASE 0x21800800 +#define VAXLCGFB_LUT_SIZE (2 * KILOBYTE) +#define VAXLCGFB_LUT_OFFSET 0x800 + +#define VAXLCGFB_REG_BASE 0x20100000 +#define VAXLCGFB_REG_SIZE (16 * KILOBYTE) + + +#define REG_VIDEO_CONFIG 0x1e00 +#define REG_MEM_CONFIG 0x1800 +#define REG_GRAPHICS_CONFIG 0x1c90 +#define REG_LUT_CONSOLE_SEL 0x0ee0 +#define REG_LUT_CONTROL_BASE 0x0ee8 +#define REG_LUT_COLOUR_BASE 0x06e4 + +/* For debugging only: */ +#define REG_VIDEO_HTIMING 0x1e100 +#define REG_VIDEO_VTIMING 0x1e104 +#define REG_VIDEO_TIMING 0x1e108 + +#define GET_REG(X, reg) ((unsigned long) readl (((unsigned long) (X)->reg_base) + (reg))) +#define SET_REG(X, reg, val) (writel ((val), ((unsigned long) ((X)->reg_base)) + (reg))) + +enum vaxlcgfb_type { /* heads, x-res, y-res, bpp */ + VAXLCGFB_UNKNOWN, /* No device */ + + VAXLCGFB_46_1024x864x8_SH, /* 4000/60 */ + VAXLCGFB_46_1024x768x8_SH, + VAXLCGFB_46_1280x1024x4_SH, + VAXLCGFB_46_1280x1024x8_SH, + VAXLCGFB_46_1280x1024x8_DH, + + VAXLCGFB_48_1280x1024x8_SH, /* 4000/VLC */ + VAXLCGFB_48_1024x768x8_SH, + VAXLCGFB_48_640x480x8_SH, + VAXLCGFB_48_1024x864x8_SH, +}; + + +/* + * Framebuffer variables. The largest model is quad-head, so we need 4 of + * these to drive them all. Oh, remember this is the "Low Cost Graphics" + * DEC offered with their VAXstations. + */ +static struct fb_info vaxlcgfb_fb_info[4]; +static struct fb_var_screeninfo vaxlcgfb_var_si; +static struct fb_fix_screeninfo vaxlcgfb_fix_si[4]; + +static struct vaxlcgfb_private { + int online; + enum vaxlcgfb_type type; + unsigned char *fb_base; + unsigned char *reg_base; + unsigned char *lut_base; +} vaxlcgfb_priv[4]; + + +#define TYPE(TYP, X, Y, BPP, HEADS) { \ + .type = (TYP), .x = (X), .y = (Y), .bpp = (BPP), \ + .heads = (HEADS), .name = __stringify(TYP), \ +} +static struct vaxlcgfb_phys_info { + enum vaxlcgfb_type type; + int x; + int y; + int bpp; + int heads; + char name[30]; +} vaxlcgfb_screen_info[] = { + TYPE (VAXLCGFB_46_1024x864x8_SH, 1024, 864, 8, 1), + TYPE (VAXLCGFB_46_1024x768x8_SH, 1024, 768, 8, 1), + TYPE (VAXLCGFB_46_1280x1024x4_SH, 1280, 1024, 4, 1), + TYPE (VAXLCGFB_46_1280x1024x8_SH, 1280, 1024, 8, 1), + TYPE (VAXLCGFB_46_1280x1024x8_DH, 1280, 1024, 8, 2), + TYPE (VAXLCGFB_48_1280x1024x8_SH, 1280, 1024, 8, 1), + TYPE (VAXLCGFB_48_1024x768x8_SH, 1024, 768, 8, 1), + TYPE (VAXLCGFB_48_640x480x8_SH, 640, 480, 8, 1), + TYPE (VAXLCGFB_48_1024x864x8_SH, 1024, 864, 8, 1), +}; + +static unsigned long +get_long (unsigned long address) +{ + unsigned long *virt; + unsigned long value; + + virt = ioremap (address, sizeof (unsigned long)); + if (!virt) { + printk (KERN_ERR "iomapping %p failed\n", (void *) address); + return 0; + } + + value = *virt; + PROBE_PRINTK (KERN_DEBUG "vaxlcgfb: probe: 0x%p(@0x%p) = 0x%08lx)\n", + (void *) address, virt, value); + iounmap (virt); + + return value; +} + +enum vaxlcgfb_type +vaxlcgfb_detect (void) +{ + unsigned long value1, value2; + enum vaxlcgfb_type type = VAXLCGFB_UNKNOWN; + + if (is_ka46 ()) { /* 4000/60 */ + value1 = get_long (0x200f0010) & 0xf0; + + if (value1 == 0x20 || value1 == 0x60) + type = VAXLCGFB_46_1024x864x8_SH; + else if (value1 == 0x40) + type = VAXLCGFB_46_1024x768x8_SH; + else if (value1 == 0x80) + type = VAXLCGFB_46_1280x1024x4_SH; + else if (value1 == 0x90) + type = VAXLCGFB_46_1280x1024x8_SH; + else if (value1 == 0xb0) + type = VAXLCGFB_46_1280x1024x8_DH; + } + + if (is_ka48 ()) { /* 4000/VLC */ + value1 = get_long (0x200f0000) & 0x07; + value2 = get_long (0x20020000) & 0x80; + + if (value1 == 0x05) + type = VAXLCGFB_48_1280x1024x8_SH; + else if (value1 == 0x06 && value2 == 0x80) + type = VAXLCGFB_48_1024x768x8_SH; + else if (value1 == 0x06 && value2 == 0x00) + type = VAXLCGFB_48_640x480x8_SH; + else if (value1 == 0x07 && value2 == 0x80) + type = VAXLCGFB_48_1024x768x8_SH; + else if (value1 == 0x07 && value2 == 0x00) + type = VAXLCGFB_48_1024x864x8_SH; + } + + printk (KERN_ERR "Detected LCG type is %d\n", type); + + return type; +} + + +static int +n_heads (enum vaxlcgfb_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE (vaxlcgfb_screen_info); i++) + if (vaxlcgfb_screen_info[i].type == type) + return vaxlcgfb_screen_info[i].heads; + + printk (KERN_ERR "Unknown # of heads for type=%d\n", type); + return 0; +} + +static int +vaxlcgfb_screen_exists (int screen, enum vaxlcgfb_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE (vaxlcgfb_screen_info); i++) { + if (vaxlcgfb_screen_info[i].type == type) { + if (screen + 1 <= n_heads (type)) + return 1; + else + return 0; + } + } + + printk (KERN_ERR "No screen #%d on type %d\n", screen, type); + return 0; +} + +static int +n_bpp (enum vaxlcgfb_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE (vaxlcgfb_screen_info); i++) + if (vaxlcgfb_screen_info[i].type == type) + return vaxlcgfb_screen_info[i].bpp; + + printk (KERN_ERR "Unknown bpp depth for type=%d\n", type); + return 0; +} + + +static int +xres (enum vaxlcgfb_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE (vaxlcgfb_screen_info); i++) + if (vaxlcgfb_screen_info[i].type == type) + return vaxlcgfb_screen_info[i].x; + + printk (KERN_ERR "Unknown X res for type=%d\n", type); + return 0; +} + +static int +yres (enum vaxlcgfb_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE (vaxlcgfb_screen_info); i++) + if (vaxlcgfb_screen_info[i].type == type) + return vaxlcgfb_screen_info[i].y; + + printk (KERN_ERR "Unknown Y res for type=%d\n", type); + return 0; +} + + +static int +vaxlcgfb_init_fix_info (struct fb_fix_screeninfo *info, enum vaxlcgfb_type type, + int nr) +{ + sprintf (info->id, "VAX LCGFB %d", nr); + info->type = FB_TYPE_PACKED_PIXELS; + info->visual = FB_VISUAL_PSEUDOCOLOR; + + info->smem_len = xres (type) * yres (type); /* Maybe only correct for 8bpp, I don't know how 1bpp and 4bpp is organized, really wrong for 24bpp if that exists */ + info->line_length = xres (type); + + return 0; +} + + +static int +vaxlcgfb_init_var_info (struct fb_var_screeninfo *info, enum vaxlcgfb_type type) +{ + info->bits_per_pixel = n_bpp (type); + info->red.length = n_bpp (type); + info->green.length = n_bpp (type); + info->blue.length = n_bpp (type); + info->activate = FB_ACTIVATE_NOW; + info->height = 400; + info->width = 500; + info->accel_flags = FB_ACCEL_NONE; + info->vmode = FB_VMODE_NONINTERLACED; + + info->xres = xres (type); + info->yres = yres (type); + info->xres_virtual = xres (type); + info->yres_virtual = yres (type); + + return 0; +} + +#ifdef DEBUG_HELPERS +static int +set_dot (struct fb_info *info, int x, int y, unsigned int colour) +{ + volatile unsigned char *dot; + + dot = (volatile unsigned char *) info->screen_base; + + dot += info->fix.line_length * y + x; + + *dot = (unsigned char) (colour & 0xff); + + return 0; +} + +/* + * (x1|y1) needs to be the upper left corner, + * (x2|y2) needs to be the lower right corner. + */ +static int +draw_rectangle (struct fb_info *info, int x1, int y1, int x2, int y2, unsigned int colour) +{ + int x, y; + + for (x = x1; x <= x1; x++) { + set_dot (info, x, y1, colour); /* top horizonthal line */ + set_dot (info, x, y2, colour); /* bottom horizonthal line */ + } + + for (y = y1; y <= y2; y++) { + set_dot (info, x1, y, colour); /* left vertical line */ + set_dot (info, x2, y, colour); /* right vertical line */ + } + + return 0; +} + +static int +draw_some_bullshit (struct fb_info *info) +{ +#define RECT_SIZE 20 + int x = info->var.xres - 1; + int y = info->var.yres - 1; + + draw_rectangle (info, 0, 0, RECT_SIZE - 1, RECT_SIZE - 1, 1); /* top-left */ + draw_rectangle (info, x - RECT_SIZE, 0, x, RECT_SIZE - 1, 1); /* top-right */ + draw_rectangle (info, 0, y - RECT_SIZE, RECT_SIZE - 1, y, 1); /* bottom-left */ + draw_rectangle (info, x - RECT_SIZE, y - RECT_SIZE, x, y, 1); /* bottom-right */ + + draw_rectangle (info, 0, 0, x, y, 1); /* fullscreen square */ + + return 0; +} + +static void +vaxlcgfb_show_info (struct fb_info *info) +{ + struct vaxlcgfb_private *me = info->par; + unsigned long data; + + data = GET_REG (me, REG_MEM_CONFIG); + switch ((data >> 3) & 0x03) { + case 0x00: + printk (KERN_INFO "FB: 1Mx1 32MB max Memory Range\n"); + break; + case 0x02: + printk (KERN_INFO "FB: 128Kx8 4MB max Memory Range\n"); + break; + case 0x03: + printk (KERN_INFO "FB: 256Kx4 8MB max Memory Range\n"); + break; + default: + printk (KERN_WARNING "FB: Unknown configuration\n"); + break; + } + switch ((data >> 0) & 0x07) { + case 0x00: + printk (KERN_INFO "RAM: 32MB max Memory Range\n"); + break; + case 0x04: + printk (KERN_INFO "RAM: 56MB max Memory Range\n"); + break; + case 0x06: + printk (KERN_INFO "RAM: 80MB max Memory Range\n"); + break; + case 0x07: + printk (KERN_INFO "RAM: 104MB max Memory Range\n"); + break; + default: + printk (KERN_WARNING "RAM: Unknown configuration\n"); + break; + } + + printk (KERN_INFO "COL_BASE=0x%08lx\n", GET_REG (me, REG_LUT_COLOUR_BASE)); + printk (KERN_INFO "CTR_BASE=0x%08lx\n", GET_REG (me, REG_LUT_CONTROL_BASE)); + printk (KERN_INFO "VID_CONF=0x%08lx\n", GET_REG (me, REG_VIDEO_CONFIG)); + printk (KERN_INFO "MEM_CONF=0x%08lx\n", GET_REG (me, REG_MEM_CONFIG)); + printk (KERN_INFO "GRA_CONF=0x%08lx\n", GET_REG (me, REG_GRAPHICS_CONFIG)); + printk (KERN_INFO "VTIMING =0x%08lx\n", GET_REG (me, REG_VIDEO_VTIMING)); + printk (KERN_INFO "HTIMING =0x%08lx\n", GET_REG (me, REG_VIDEO_HTIMING)); + printk (KERN_INFO "TIMING =0x%08lx\n", GET_REG (me, REG_VIDEO_TIMING)); +} +#endif /* DEBUG_HELPERS */ + +static int +vaxlcgfb_setcolreg (unsigned int regno, unsigned int red, + unsigned int green, unsigned int blue, unsigned int transp, + struct fb_info *info) +{ + struct vaxlcgfb_private *me = info->par; + unsigned char reg = regno & 0xff; + volatile unsigned char *entry = me->lut_base + 8 * reg; + unsigned long data; + + /* + * Write LUT entry. + */ + entry[0] = 0x00; + entry[1] = reg; + entry[2] = 0x01; + entry[3] = (red >> 8) & 0xff; + entry[4] = 0x01; + entry[5] = (green >> 8) & 0xff; + entry[6] = 0x01; + entry[7] = (blue >> 8) & 0xff; + + /* + * Reload LUT. + */ + data = 0; + data |= 3 << 30; /* 2 - VIDEO_VSTATE */ + data |= 3 << 28; /* 2 - VIDEO_HSTATE */ + data |= 0 << 26; /* 2 - unused */ + data |= 0 << 25; /* 1 - VIDEO_CONSOLE_LUT_SELECT */ + data |= 0 << 24; /* 1 - VIDEO_CONTROL_LUT_SELECT */ + data |= 0 << 23; /* 1 - unused */ + data |= 0 << 22; /* 1 - VIDEO_CURSOR_ACTIVE */ + data |= 0 << 16; /* 6 - VIDEO_CURSOR_SCANLINE */ + data |= 0 << 15; /* 1 - VIDEO_RESET */ + data |= 0 << 14; /* 1 - unused */ + data |= 1 << 13; /* 1 - VIDEO_LUT_LOAD_SIZE */ + data |= 1 << 12; /* 1 - unused / VIDEO_SYNC_ENABLE_H. Aka. "the flicker bit" */ + data |= 1 << 11; /* 1 - VIDEO_LUT_SHIFT_SEL (full/split load for LUTs) */ + data |= 1 << 10; /* 1 - VIDEO_CLOCK_SEL */ + data |= 2 << 8; /* 2 - VIDEO_MEM_REFRESH_SEL */ + data |= 1 << 6; /* 2 - VIDEO_REFRESH_SEL */ + data |= 0 << 5; /* 1 - VIDEO_SHIFT_SEL (1=full, 0=split load) - maybe 1 would be better */ + data |= 1 << 4; /* 1 - VIDEO_CURSOR_PIN_TYPE */ + data |= 1 << 3; /* 1 - VIDEO_LOT_LOAD_ENABLE */ + data |= 0 << 2; /* 1 - VIDEO_CURSOR_ENABLE */ + data |= 1 << 1; /* 1 - VIDEO_ENABLE_VIDEO */ + data |= 1 << 0; /* 1 - VIDEO_TIMING_ENABLE */ + + SET_REG (me, REG_VIDEO_CONFIG, data); /* Signal reload */ + SET_REG (me, REG_LUT_CONSOLE_SEL, 1); /* Start reload */ + /* FIXME: Possibly the next one only needs to be set once */ + SET_REG (me, REG_LUT_COLOUR_BASE, VAXLCGFB_LUT_OFFSET); +#if 0 + /* FIXME: Sleeping here *may* be neccessary, but it wasn't for me */ + mdelay (10); + /* FIXME: I didn't need to reset this flag to zero */ + SET_REG (me, REG_LUT_CONSOLE_SEL, 0); /* Done with reload */ +#else + SET_REG (me, REG_LUT_CONSOLE_SEL, 0); /* Start reload */ +#endif + + return 0; +} + +static struct fb_ops vaxlcgfb_fb_ops = { + .owner = THIS_MODULE, + .fb_setcolreg = vaxlcgfb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_cursor = soft_cursor, +}; + + +static int __init +vaxlcgfb_init (void) +{ + unsigned char *n_lut_base; + unsigned char *n_reg_base; + unsigned char *n_fb_base; + enum vaxlcgfb_type type; + int base_fb = -1; + int i; + + printk (KERN_INFO "%s ()\n", __FUNCTION__); + + type = vaxlcgfb_detect (); + if (type == VAXLCGFB_UNKNOWN) { + printk (KERN_ERR "No VAXLCGFB found\n"); + return -ENODEV; + } + + n_reg_base = ioremap (VAXLCGFB_REG_BASE, VAXLCGFB_REG_SIZE); + if (!n_reg_base) { + printk (KERN_ERR "Could not ioremap() register memory\n"); + return -ENXIO; + } + + n_lut_base = ioremap (VAXLCGFB_LUT_BASE, VAXLCGFB_LUT_SIZE); + if (!n_lut_base) { + printk (KERN_ERR "Could not ioremap() LUT memory\n"); + iounmap (n_reg_base); + return -ENXIO; + } + + vaxlcgfb_init_var_info (&vaxlcgfb_var_si, type); + + for (i = 0; i < 3; i++) { + /* Iterate through all four possible FBs */ + + if (!vaxlcgfb_screen_exists (i, type)) + continue; + + vaxlcgfb_init_fix_info (&vaxlcgfb_fix_si[i], type, i); + + n_fb_base = ioremap (VAXLCGFB_FB_BASE + + i * xres (type) * yres (type), + xres (type) * yres (type)); + if (!n_fb_base) { + printk (KERN_ERR "Could not ioremap() FB(%d) memory\n", i); + continue; + } + + vaxlcgfb_fix_si[i].smem_start = (unsigned long) n_fb_base; + vaxlcgfb_priv[i].fb_base = (unsigned char *) n_fb_base; + vaxlcgfb_priv[i].reg_base = n_reg_base; + vaxlcgfb_priv[i].lut_base = n_lut_base; + vaxlcgfb_priv[i].type = type; + + /* + * Let there be consoles.. + */ + vaxlcgfb_fb_info[i].fbops = &vaxlcgfb_fb_ops; + vaxlcgfb_fb_info[i].var = vaxlcgfb_var_si; + vaxlcgfb_fb_info[i].fix = vaxlcgfb_fix_si[i]; + vaxlcgfb_fb_info[i].screen_base = n_fb_base; + vaxlcgfb_fb_info[i].flags = FBINFO_FLAG_DEFAULT; + vaxlcgfb_fb_info[i].par = &vaxlcgfb_priv[i]; + + fb_alloc_cmap (&vaxlcgfb_fb_info[i].cmap, 256, 0); + + if (register_framebuffer (&vaxlcgfb_fb_info[i]) < 0) { + iounmap (n_fb_base); + fb_dealloc_cmap (&vaxlcgfb_fb_info[i].cmap); + continue; + } + + base_fb = i; + vaxlcgfb_priv[i].online = 1; + } + +#ifdef DEBUG_HELPERS + vaxlcgfb_show_info (&vaxlcgfb_fb_info[0]); + + for (i = 0; i < 4; i++) { + if (vaxlcgfb_priv[i].online) + draw_some_bullshit (&vaxlcgfb_fb_info[i]); + } + + { + volatile unsigned char *pixel; + int x, y; + + for (i = 0; i < 4; i++) { + /* Draw a square on all FBs */ + if (vaxlcgfb_priv[i].online) { + pixel = vaxlcgfb_priv[i].fb_base; + for (y = 500; y < 550; y++) + for (x = 500; x < 550; x++) + *(pixel + (y * xres (type)) + x) = 1; + } + } + + printk (KERN_ERR "Done with pattern\n"); + mdelay (5000); + + if (base_fb != -1) { + /* Cycle colours */ + int shade; + + for (shade = 0; shade < 256; shade++) { + vaxlcgfb_setcolreg (1, shade << 8, + shade << 8, shade << 8, + 0, &vaxlcgfb_fb_info[base_fb]); + if (shade % 5 == 0) + mdelay (20); + + } + } + } +#endif /* DEBUG_HELPERS */ + + /* + * Display what we've found + */ + { + int base_fb = -1; + struct vaxlcgfb_phys_info *entry = NULL; + + /* Find a filled slot */ + for (i = 0; i < 4; i++) + if (vaxlcgfb_priv[i].online) + base_fb = i; + + /* Get in info structure therefor */ + for (i = 0; i < ARRAY_SIZE (vaxlcgfb_screen_info); i++) { + if (vaxlcgfb_screen_info[i].type == + vaxlcgfb_priv[base_fb].type) { + entry = &vaxlcgfb_screen_info[i]; + } + } + + /* Display infos */ + if (entry) { + printk (KERN_INFO "vaxlcgfb: Found FB: %s (= %d)\n", + entry->name, entry->type); + printk (KERN_INFO "vaxlcgfb: x = %d\n", entry->x); + printk (KERN_INFO "vaxlcgfb: y = %d\n", entry->y); + printk (KERN_INFO "vaxlcgfb: bpp = %d\n", entry->bpp); + printk (KERN_INFO "vaxlcgfb: head = %d\n", entry->heads); + } + } + + return 0; +} + +static void __exit +vaxlcgfb_exit (void) +{ + int i; + unsigned char *lut = NULL; + unsigned char *regs = NULL; + + printk (KERN_INFO "%s ()\n", __FUNCTION__); + + for (i = 0; i < 4; i++) { + if (!vaxlcgfb_priv[i].online) + continue; + + unregister_framebuffer (&vaxlcgfb_fb_info[i]); + fb_dealloc_cmap (&vaxlcgfb_fb_info[i].cmap); + iounmap (vaxlcgfb_priv[i].fb_base); + + /* Get LUT and REG base from any fb - they're all the same */ + lut = vaxlcgfb_priv[i].lut_base; + regs = vaxlcgfb_priv[i].reg_base; + } + + if (lut) + iounmap (lut); + if (regs) + iounmap (regs); + + return; +} + +MODULE_LICENSE ("GPL"); +MODULE_AUTHOR ("Jan-Benedict Glaw "); +MODULE_DESCRIPTION ("Module for the VAX LCG framebuffer option found on " + "4000/60 and 4000/VLC like machines"); + +module_init (vaxlcgfb_init); +module_exit (vaxlcgfb_exit); + diff -Nru a/fs/Kconfig b/fs/Kconfig --- a/fs/Kconfig 2005-06-17 21:48:29 +++ b/fs/Kconfig 2005-07-24 23:36:48 @@ -1308,6 +1308,14 @@ Say Y here if you want to try writing to UFS partitions. This is experimental, so you should back up your UFS partitions beforehand. +config ODS2_FS + tristate "VMS ODS-2 filesystem support (read-only)" + depends on EXPERIMENTAL + default n + help + Say Y if you want to read ODS-2 filesystems, which are normally used + by the VMS operating system. + endmenu menu "Network File Systems" diff -Nru a/fs/Makefile b/fs/Makefile --- a/fs/Makefile 2005-06-17 21:48:29 +++ b/fs/Makefile 2005-03-28 01:46:47 @@ -92,6 +92,7 @@ obj-$(CONFIG_XFS_FS) += xfs/ obj-$(CONFIG_AFS_FS) += afs/ obj-$(CONFIG_BEFS_FS) += befs/ +obj-$(CONFIG_ODS2_FS) += ods2/ obj-$(CONFIG_HOSTFS) += hostfs/ obj-$(CONFIG_HPPFS) += hppfs/ obj-$(CONFIG_DEBUG_FS) += debugfs/ diff -Nru a/fs/ods2/CHANGES b/fs/ods2/CHANGES --- a/fs/ods2/CHANGES 1970-01-01 01:00:00 +++ b/fs/ods2/CHANGES 2004-09-22 08:25:50 @@ -0,0 +1,46 @@ +Changes from version 0.9.4c to 0.9.4d +===================================== + - Use Linux-2.6.x style option parsing infrastructure. + +Changes from version 0.9.4b to 0.9.4c +===================================== + - Removal for the argument parsing typedefs. + +Changes from version 0.9.4a to 0.9.4b +===================================== + - Removal of all typedefs. The source now properly uses + struct xxx instead of XXX + - Still compiles, but not tested even with one filesystem image. + +Changes from version 0.9.3 to 0.9.4a +==================================== + - Quick'n'dirty port from 2.4.x to 2.6.x + - Does compile cleanly, but contains known errors and for + sure won't work. This is a pure development release! + - Porting work done by Jan-Benedict Glaw + +Changes from version 0.9.2 to version 0.9.3 +=========================================== + - Added code to handle hard sector size of 1024 and 2048 bytes. + - Fix bug when checking RMS file type and record type. + - Files of type FIXED will now be treated as STREAM files. + +Changes from version 0.9.1 to version 0.9.2 +=========================================== + - Added check of hard sector size before trying to mount. + Current version of this driver only support 512 byte sectors. + - Fix bug in ods2_llseek_variable. The fixed record size + was not removed from variable coffs giving wrong offsets. + Another bug was that the VFC value was not taken into account + when testing if the location was found. + Yet another bug was the check for the end of file for variable + record files. + - Updated all definitions of vbn and lbn to use u32. + - Updated all definitions of currec to u64. + - Updated all structures to use u8, u16 and u32 so it will + work on 64 bits architectures such as Alpha. + +Changes from version 0.9.0 to version 0.9.1 +=========================================== + - This is a special version for kernel 2.2.X + - Changed all long unsigned in structures to u32. diff -Nru a/fs/ods2/Makefile b/fs/ods2/Makefile --- a/fs/ods2/Makefile 1970-01-01 01:00:00 +++ b/fs/ods2/Makefile 2004-09-23 15:43:02 @@ -0,0 +1,8 @@ +# +# Makefile for the Linux ods2 filesystem implementation. +# + +obj-$(CONFIG_ODS2_FS) += ods2.o + +ods2-objs := super.o inode.o file.o dir.o util.o + diff -Nru a/fs/ods2/README b/fs/ods2/README --- a/fs/ods2/README 1970-01-01 01:00:00 +++ b/fs/ods2/README 2004-09-19 11:47:49 @@ -0,0 +1,42 @@ +Overview +======== + +The ODS2 driver for Linux version 0.9.2 support read of ODS2 formated +file systems such as SCSI disks, CDROM's, container disks that have been +formatted using OpenVMS. +Currently only stream and variable record files are supported. + + +Limitations and workarounds +=========================== + +The nature of variable record files make it hard for utilities like less +to find the correct end. The less utility assume that the file size is the +true end of the last byte of data in the file but for a variable record +file the file size also include bytes not to be displayed. +The major problem is that if you use less and then hit the End key less +will try to go beyond the end of the virtual position. +A simple way around this is to use cat on the file and pipe that to less. +By this method you will go to end of file when you hit the End key for less. + +Utilities like less use llseek to find its positions in the file. +Without any workaround less would ending up in the wrong position in +the file. +To solve the problem the ODS2 driver keep track of the virtual position +in the file (the position less knows about) and create checkpoints every 64K +position. By these checkpoints function llseek can find its position by +searching from the closest position instead reading all records from start. + +License +======= + +The ODS2 source code is released under GNU General Public License (GPL). + +Author +====== + +The ODS2 driver is written by me, Jonas Lindholm, and I was doing it for fun and +to get deeper knowledge of file systems on Linux. +It is also a challange to get it to work and it seems that people are looking +for a ODS filoe system module for Linux. +Later versions will support write as well. diff -Nru a/fs/ods2/TODO b/fs/ods2/TODO --- a/fs/ods2/TODO 1970-01-01 01:00:00 +++ b/fs/ods2/TODO 2004-09-22 08:25:50 @@ -0,0 +1,3 @@ +- Check sector loading code (FS blocks vs. hard sectors) +- Put pointer/array arithmetic into function-like looking macros +- Add write support :-) diff -Nru a/fs/ods2/dir.c b/fs/ods2/dir.c --- a/fs/ods2/dir.c 1970-01-01 01:00:00 +++ b/fs/ods2/dir.c 2004-09-22 09:14:35 @@ -0,0 +1,182 @@ +/* + * linux/fs/ods2/dir.c + * + * COPYRIGHT + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from: + * http://www.gnu.org/licenses/gpl.txt + * Each contributing author retains all rights to their own work. + * + * Written 2003 by Jonas Lindholm + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ods2.h" + +/* + * This routine return one or more file names for a directory file. + * For an ODS2 file structure each file name can have one or more + * versions of a file, each file must be treated as a unique file. + */ +int +ods2_readdir (struct file *filp, void *dirent, filldir_t filldir) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct super_block *sb = inode->i_sb; + struct buffer_head *bh = NULL; + struct ods2sb *ods2p = ODS2_SB (sb); + struct ods2fh *ods2fhp = (struct ods2fh *) inode->u.generic_ip; + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + loff_t pos = filp->f_pos; + u32 vbn = (ods2filep->currec >> 9); /* get current VBN-1 to use */ + u32 lbn; + char cdirname[256]; /* FIXME: large stack useage!!! */ + + memset (cdirname, ' ', sizeof (cdirname)); + + /* + * When there are no more files to return the file position in file + * is set to -1. + */ + if (pos == -1) + return 0; + + /* + * When we get called the first time for a directory file, the file + * position is set to 0. We must then return two fake entries, + * "." for the current directory and ".." for the parent directory. + */ + if (pos == 0) { + filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR); + filldir(dirent, "..", 2, 1, ods2fhp->parent, DT_DIR); + ods2filep->currec = 0; + ods2filep->curbyte = 0; + vbn = 0; + } + + /* + * As long we can translate the virtual block number, VBN, to a + * logical block number, LBN, and read the block we continue to loop. + */ + while (vbn * 512 < inode->i_size + && (lbn = vbn2lbn(sb, ods2fhp->map, vbn + 1)) > 0 + && (bh = sb_bread(sb, GETBLKNO(sb, lbn))) != NULL && + bh->b_data != NULL) { + u16 *recp = (short unsigned *)((char *)(GETBLKP(sb, lbn, bh->b_data)) + (ods2filep->currec & 511)); + + /* + * For a ODS2 directory each block contains 1 to 62 directory + * entries. Note that a directory entry can not span between + * two or more blocks. We should be able to use the routine + * to read variable block size but because directory file is + * so specific we do our own block decoding here. + * When there are no more directory entries in the current + * block, the record length -1 is inserted as the last + * record. + */ + while (*recp != 65535 + && *recp <= 512 + && ods2filep->currec < inode->i_size) { + struct dirdef *dire = (struct dirdef *) recp; + char dirname[dire->u1.s1.dir_b_namecount + 1]; /* FIXME: Check stack useage */ + + memcpy(dirname, &dire->u1.s1.dir_t_name, dire->u1.s1.dir_b_namecount); + dirname[dire->u1.s1.dir_b_namecount] = 0; + + if (ods2p->dollar != '$' || ods2p->flags.v_lowercase) { + char *p = dirname; + char cnt = dire->u1.s1.dir_b_namecount; + + while (*p && cnt-- > 0) { + if (*p == '$') { + *p = ods2p->dollar; + } + if (ods2p->flags.v_lowercase) { + *p = tolower (*p); + } + p++; + } + } + if (ods2filep->curbyte == 0) { + ods2filep->curbyte = ((dire->u1.s1.dir_b_namecount + 1) & ~1) + 6; + } + filp->f_pos = ods2filep->currec + ods2filep->curbyte; + + while (ods2filep->curbyte < dire->u1.s1.dir_w_size + && !(ods2p->flags.v_version != SB_M_VERSALL + && strlen(dirname) == strlen(cdirname) + && strncmp(dirname, cdirname, strlen(dirname)) == 0)) { + struct dirdef *dirv = (struct dirdef *) ((char *)dire + ods2filep->curbyte); + u32 ino = (dirv->u1.s2.u2.s3.fid_b_nmx << 16) | le16_to_cpu(dirv->u1.s2.u2.s3.fid_w_num); + /* FIXME: Check stack useage! */ + char dirnamev[dire->u1.s1.dir_b_namecount + 1 + 5 + 1]; + + if (ino != 4) { /* we must ignore 000000.DIR as it is the same as . */ + if (ods2p->flags.v_version == SB_M_VERSNONE) + sprintf(dirnamev, "%s", dirname); + else + sprintf(dirnamev, "%s%c%d", dirname, ods2p->semicolon, dirv->u1.s2.dir_w_version); + + /* + * We don't really know if the file is a directory by just checking + * the file extension but it is the best we can do. + * Should the file have extension .DIR but be a regular file the mistake + * will be detected later on when the user try to walk down into + * the false directory. + */ + if (filldir(dirent, dirnamev, strlen(dirnamev), filp->f_pos, ino, + (strstr(dirnamev, (ods2p->flags.v_lowercase ? ".dir." : ".DIR")) == NULL ? DT_REG : DT_DIR))) { + /* + We come here when filldir is unable to handle more entries. + */ + + brelse(bh); + return 0; + } + if (ods2p->flags.v_version != SB_M_VERSALL) { + strcpy(cdirname, dirname); + } + } + if (ods2p->flags.v_version == SB_M_VERSALL) { + ods2filep->curbyte += 8; + filp->f_pos += 8; + } else { + ods2filep->curbyte = le16_to_cpu(dire->u1.s1.dir_w_size); + filp->f_pos += dire->u1.s1.dir_w_size; + } + } + + /* + When we come here there are no more versions for the file name. + We then reset our current byte offset and set current record offset + to the next directory entry. + */ + + ods2filep->curbyte = 0; + ods2filep->currec += le16_to_cpu(dire->u1.s1.dir_w_size) + 2; + recp = (u16 *)((char *)recp + le16_to_cpu(dire->u1.s1.dir_w_size) + 2); + } + + /* + When we come here there are no more directory entries in the current block + and we just release the buffer and increase the VBN counter. + */ + + brelse(bh); + vbn++; + ods2filep->currec = vbn * 512; + } + filp->f_pos = -1; /* this mark that we have no more files to return */ + return 0; +} + diff -Nru a/fs/ods2/file.c b/fs/ods2/file.c --- a/fs/ods2/file.c 1970-01-01 01:00:00 +++ b/fs/ods2/file.c 2004-09-22 09:14:35 @@ -0,0 +1,683 @@ +/* + * linux/fs/ods2/file.c + * + * COPYRIGHT + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from: + * http://www.gnu.org/licenses/gpl.txt + * Each contributing author retains all rights to their own work. + * + * Written 2003 by Jonas Lindholm + * + * Changes: 0.9.2 - A lot of bug fixes for keeping track of + * virtual position for variable record files. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ods2.h" + +/* + FUNCTION: + + This routine take care of ioctl command for an open file. + It is possible to put a file into raw mode independing if raw mode was + selected or not during the file system mount. This is used by the rms library. + + INPUT: + + *inode pointer to inode structure for the open file. + + *filp pointer to file structure for the open file. + + cmd ODS2 specific command. + + arg argument for the command. + + OUTPUT: + + 0 if everything went ok. + + -ENOTTY for invalid cmmand. + + Other negativ values for different errors. + + IMPLICIT: + + None. + +*/ + +int +ods2_file_ioctl (struct inode *inode, struct file *filp, int unsigned cmd, + long unsigned arg) +{ + struct super_block *sb = inode->i_sb; + struct ods2sb *ods2p = ODS2_SB (sb); + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + int error = -ENOTTY; + int onoff; + + switch (cmd) { + case ODS2_IOC_FISETRAW: + if ((error = get_user(onoff, (int *)arg)) == 0) { + ods2filep->u1.s1.v_raw = (onoff == 1); + } + break; + + case ODS2_IOC_FIGETRAW: + onoff = ods2filep->u1.s1.v_raw; + error = put_user(onoff, (int *)arg); + break; + + case ODS2_IOC_SBGETRAW: + onoff = ods2p->flags.v_raw; + error = put_user(onoff, (int *)arg); + break; + } + return error; +} + +/* + FUNCTION: + + This routine update the memory structure used to keep track of the virtual + position in a variable record file. + + INPUT: + + loff virtual position. + + *ods2vari pointer to memory structure used to keep tracj of position. + + currec current record position in file. This is the offset in bytes + from the start of the file. + + OUTPUT: + + 1 if the update was successful. + + 0 if something went wrong such as memory allocaion. + + IMPLICIT: + + The only requirement is that a linked list of varp structures are at least + the number of entries allocated by macro IDXBLOCK. + + */ + +int +update_virtual_file_pos (loff_t loff, struct ods2vari *ods2vari, u64 currec) +{ + struct ods2var *ods2varp; + int idxvar = IDXVAR (loff); + int idxvari = IDXVARI (loff); + int idxblock = IDXBLOCK (loff); + + if (!ods2vari->ods2varp[idxvari]) { + ods2vari->ods2varp[idxvari] = kmalloc (sizeof (struct ods2var), GFP_KERNEL); + if (ods2vari->ods2varp[idxvari]) + memset(ods2vari->ods2varp[idxvari], 0, sizeof (struct ods2var)); + else { + printk("ODS2-fs kmalloc failed for new varp (1)\n"); + return 0; + } + } + ods2varp = ods2vari->ods2varp[idxvari]; + for (; idxblock > 0; idxblock--) { + if (ods2varp->nxt == NULL) { + ods2varp->nxt = kmalloc (sizeof (struct ods2var), GFP_KERNEL); + if (ods2varp->nxt) + memset(ods2varp->nxt, 0, sizeof (struct ods2var)); + else { + printk("ODS2-fs kmalloc failed for new varp (2)\n"); + return 0; + } + } + ods2varp = ods2varp->nxt; + } + if (ods2varp && ods2varp->s1[idxvar].loff == 0) { + ods2varp->s1[idxvar].recoffs = currec; + ods2varp->s1[idxvar].loff = loff; + ods2vari->highidx = loff; + } + return 1; +} + +/* + FUNCTION: + + This routine take care of reading of variable record files. + This routine will add a LF after each record if one of the following + record attributes are set: FAT_M_FORTRANCC, FAT_M_IMPLIEDCC, FAT_M_PRINTCC. + + Note that a correct handling of all record structures should be able + to handle form feed and insertion of more than one LF after each record. + All this extra functionality must be handled outside of this driver. + + It will also handle the FAT_M_NOSPAN. This attributes indicates that no + record must span between blocks. In each block a record with length + 65535 (-1) is inserted to indicate that there are no more records in the + current block. + + INPUT: + + *filp pointer to the file. + + *buf buffer where to return data. + + buflen size of buf area. + + *loff virtual position in file where to read from. + + OUTPUT: + + The number of bytes read. + 0 if no bytes was read. + + IMPLICIT: + + The ODS2 specific part of the file header must have a ODS2VARI structure + attached to it. + + +*/ + +ssize_t +ods2_read_variable(struct file *filp, char *buf, size_t buflen, loff_t *loff) +{ + struct inode *inode = filp->f_dentry->d_inode; + char *buforg = buf; + struct ods2fh *ods2fhp = (struct ods2fh *) inode->u.generic_ip; + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + struct fatdef *fatp = (struct fatdef *) &(ods2fhp->fat); + struct ods2vari *ods2vari = ods2fhp->ods2vari; + u32 vbn = 0; + u16 cpylen; + + + if (*loff == 0) { + ods2filep->currec = 0; + ods2filep->curbyte = 0; + ods2filep->reclen = 0; + } + + if (ods2filep->reclen == 65535) { + brelse(ods2filep->bhp); + ods2filep->bhp = NULL; + return 0; + } + + while (1) { + + /* + We need to loop until the calculated value of currec offset plus currect byte offset from currec give + the same VBN as the last one we fetched. + There is one case when we will loop. That case is when a record start is at the last two bytes of the + block. In that case the length will be fetched from current block but all data will start on next block. + */ + + do { + vbn = (ods2filep->currec + ods2filep->curbyte) >> 9; + if (!(getfilebh(filp, vbn + 1))) { + ods2filep->reclen = 65535; + return (buf - buforg); + } + + /* + If curbyte is zero we will start on a new record. + */ + + if (ods2filep->curbyte == 0) { + ods2filep->reclen = le16_to_cpu(*((u16 *)((char *)ods2filep->data + (ods2filep->currec & 511)))); + + if ((*loff >> 16) != 0) { + down(&(ods2vari->sem)); + update_virtual_file_pos(*loff, ods2vari, ods2filep->currec); + up(&(ods2vari->sem)); + } + + if ((ods2filep->reclen == 65535 && !(fatp->fat_b_rattrib & FAT_M_NOSPAN)) || + (ods2filep->currec >= inode->i_size)) { /* end of records */ + + ods2filep->reclen = 65535; + return (buf - buforg); + } + + if (ods2filep->reclen == 65535 && (fatp->fat_b_rattrib & FAT_M_NOSPAN)) { + ods2filep->currec = (vbn + 1) * 512; /* could be a new record at next block */ + } else { + ods2filep->curbyte = 2; + ods2filep->curbyte += (fatp->u0.s0.fat_v_rtype == FAT_C_VFC ? fatp->fat_b_vfcsize : 0); + } + } + } while (((ods2filep->currec + ods2filep->curbyte) >> 9) != vbn); + + cpylen = MIN(MIN((ods2filep->reclen - ods2filep->curbyte + 2), buflen), (512 - ((ods2filep->currec + ods2filep->curbyte) & 511))); + + if (cpylen > 0) { + u8 *recp = (u8 *)((char *)ods2filep->data + ((ods2filep->currec + ods2filep->curbyte) & 511)); + + memcpy(buf, recp, cpylen); + *loff += cpylen; /* loff will always be a virtual offset for a variable record file */ + buf += cpylen; + buflen -= cpylen; + ods2filep->curbyte += cpylen; + } + + if (ods2filep->curbyte - 2 == ods2filep->reclen) { + if (buflen > 0) { + if (fatp->fat_b_rattrib & FAT_M_FORTRANCC || fatp->fat_b_rattrib & FAT_M_IMPLIEDCC || fatp->fat_b_rattrib & FAT_M_PRINTCC) { + buflen--; + *buf++ = '\n'; + *loff += 1; + } + ods2filep->currec = ((ods2filep->currec + ods2filep->reclen + 1) & ~1) + 2; /* each record is always even aligned */ + ods2filep->curbyte = 0; + } + } + + if (buflen == 0) { return (buf - buforg); } + } +} + +/* + FUNCTION: + + This routine is invoked when the file type is one of STREAM, STREAMLF or STREAMCR. + For a non-RMS machine that doesn't know anything about records these three formats + are the same. + For RMS the different between these formats is the following: + + STREAM: Records are delimited by FF, VT, LF, or CRLF. + STREAMLF: Records are delimited by LF. + STREAMCR: Records are delimited by CR. + + Note that we can not use generic read routines even if we treat the data as just a + stream of bytes because the way we need to translate from VBN to LBN. + + INPUT: + + *filp pointer to the file. + + *buf buffer where to return data. + + buflen size of buf area. + + *loff virtual position in file where to read from. + + OUTPUT: + + The number of bytes read. + 0 if no bytes was read. + + IMPLICIT: + + None. +*/ + +ssize_t +ods2_read_stream(struct file *filp, char *buf, size_t buflen, loff_t *loff) +{ + struct inode *inode = filp->f_dentry->d_inode; + char *buforg = buf; + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + u32 vbn = 0; + u16 cpylen; + + while (*loff < inode->i_size) { + vbn = *loff >> 9; + if (!(getfilebh(filp, vbn + 1))) { + *loff = inode->i_size; + return (buf - buforg); + } + if ((cpylen = MIN(MIN(inode->i_size - *loff, buflen), 512 - (*loff & 511))) > 0) { + u8 *recp = (u8 *)((char *)ods2filep->data + (*loff & 511)); + + memcpy(buf, recp, cpylen); + *loff += cpylen; + buf += cpylen; + buflen -= cpylen; + if (buflen == 0) { + return (buf - buforg); + } + } + } + brelse(ods2filep->bhp); + ods2filep->bhp = NULL; + return (buf - buforg); +} + +/* + FUNCTION: + + This routine is called when a read request is done for any file. + The routine will invoke one of two functions. One function is for + files of STREAM types. + The other routine is for VARIABLE record files. + File of type RELATIVE or INDEXED are not supported by this module. + + Should the file system be mounted by option raw or if the file has + been set to raw mode the routine to hamdle STREAM format is invoked + for ALL file types including RELATIVE and INDEXED files. + + *filp pointer to the file. + + *buf buffer where to return data. + + buflen size of buf area. + + *loff virtual position in file where to read from. + + OUTPUT: + + The number of bytes read. + 0 if no bytes was read. + + IMPLICIT: + + None. +*/ + +ssize_t ods2_read(struct file *filp, char *buf, size_t buflen, loff_t *loff) { + struct inode *inode = filp->f_dentry->d_inode; + struct super_block *sb = inode->i_sb; + struct ods2sb *ods2p = ODS2_SB (sb); + struct ods2fh *ods2fhp = (struct ods2fh *) inode->u.generic_ip; + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + struct fatdef *fatp = (struct fatdef *) &(ods2fhp->fat); + + if (ods2p->flags.v_raw || ods2filep->u1.s1.v_raw) { + return ods2_read_stream(filp, buf, buflen, loff); + } else { + switch (fatp->u0.s0.fat_v_fileorg) { + case FAT_C_SEQUANTIAL: { + switch (fatp->u0.s0.fat_v_rtype) { + case FAT_C_VFC: + case FAT_C_VARIABLE: return ods2_read_variable(filp, buf, buflen, loff); + case FAT_C_FIXED: + case FAT_C_STREAMLF: + case FAT_C_STREAMCR: + case FAT_C_STREAM: return ods2_read_stream(filp, buf, buflen, loff); + default: return 0; + } + } + default: return 0; + } + } +} + + +/* + FUNCTION: + + This routine return a valid file offset for STREAM files. + Note that the current ODS2 driver does not support an offset that + is larger then file size. + + INPUT: + + *filp pointer to the file. + + loff virtual position in file where to read from. + + seek how loff should be calculated for the file. + 0 = absolute position. + 1 = offset from current file position. + 2 = offset from end of file. + + OUTPUT: + + The new position in the file is returned. + + IMPLICIT: + + This routine will not allow the current position to be beyond + the end of file position. +*/ + +static loff_t +ods2_llseek_stream (struct file *filp, loff_t loff, int seek) +{ + struct inode *inode = filp->f_dentry->d_inode; + loff_t offs; + + if (seek == 0) { /* SEEK_SET */ + offs = MIN(loff, inode->i_size); + } else { + if (seek == 1) { /* SEEK_CUR */ + if (loff > 0) { + offs = MIN(filp->f_pos + loff, inode->i_size); + } else { + offs = MAX(filp->f_pos + loff, 0); + } + } else { + offs = MIN(inode->i_size + loff, inode->i_size); + } + } + filp->f_pos = offs; + //filp->f_reada = 0; FIXME + filp->f_version++; + return offs; +} + +/* + FUNCTION: + + This routine return a valid file offset for VARIABLE files. + Note that the current ODS2 driver does not support an offset that + is larger then file size. + This routine will take care of the fact that Linux doesn't know + anything about records in a file so all routines and utilities + believe the file offset is the exact position in the file. + For a variable record file each record consists not only of data + but also of the record length (2 bytes). An additional fix part + of the record can contain meta data for the record such as print + control information. + All this make it complicated to calculate the record offset into + the file from a given offset. + To avoid to be forced to read from the start of the file to find + the correct position for a given offset checkpoints are stored + together with the inode for each 64K blocks of data. + By using these checkpoints this routine can calculate the record + position for a given offset by starting reading records from the + closest checkpoint. + If the requested position is within a part of the file already + read no more than 128 blocks of data must be read to find the + position. + On the other hand if no reading has been done for the requested + position before we could ending up to read all records for the + remaining of the file but that is no other good solution to the + problem. + + INPUT: + + *filp pointer to the file. + + loff virtual position in file where to read from. + + seek how loff should be calculated for the file. + 0 = absolute position. + 1 = offset from current file position. + 2 = offset from end of file. + + OUTPUT: + + The new position in the file is returned. + + IMPLICIT: + + This routine will not allow the current position to be beyond + the end of file position. +*/ + + +loff_t +ods2_llseek_variable(struct file *filp, loff_t loff, int seek) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct ods2var *ods2varp = NULL; + struct ods2fh *ods2fhp = (struct ods2fh *) inode->u.generic_ip; + struct ods2vari *ods2vari = ods2fhp->ods2vari; + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + struct fatdef *fatp = (struct fatdef *) &(ods2fhp->fat); + int idxblock = 0; + loff_t offs = 0; + loff_t coffs = 0; + loff_t currec = 0; + u32 vbn = 0; + u16 reclen = 0; + + offs = loff; + if (seek == 0) { /* SEEK_SET */ + offs = MIN(offs, inode->i_size); + } else { + if (seek == 1) { /* SEEK_CUR */ + if (offs > 0) { + offs = MIN(filp->f_pos + offs, inode->i_size); + } else { + offs = MAX(filp->f_pos + offs, 0); + } + } else { + offs = MIN(inode->i_size + offs, inode->i_size); + } + } + + /* + offs - the absolute virtual offset into the file we want to find. + coffs - offset counter. + */ + + down(&(ods2vari->sem)); + if (offs > 65535) { + coffs = offs; + if ((coffs >> 16) > (ods2vari->highidx >> 16)) { + coffs = ods2vari->highidx; + } + coffs += 65536; + do { + coffs -= 65536; + idxblock = IDXBLOCK(coffs); + ods2varp = ods2vari->ods2varp[IDXVARI(coffs)]; + for (; idxblock > 0; idxblock--) { + ods2varp = ods2varp->nxt; + } + } while (coffs > 65535 && ods2varp->s1[IDXVAR(coffs)].loff > offs); + if (coffs > 65535) { + currec = ods2varp->s1[IDXVAR(coffs)].recoffs; + coffs = ods2varp->s1[IDXVAR(coffs)].loff; + } else { + coffs = 0; + } + } + + while (1) { + + do { + vbn = currec >> 9; + if (!(getfilebh(filp, vbn + 1))) { + ods2filep->reclen = 65535; + up(&(ods2vari->sem)); + filp->f_pos = coffs; + //filp->f_reada = 0; FIXME + filp->f_version++; + return offs; + } + reclen = le16_to_cpu(*((u16 *)((char *)ods2filep->data + (currec & 511)))); + + if ((coffs >> 16) != 0) { + update_virtual_file_pos(coffs, ods2vari, currec); + } + + if ((reclen == 65535 && !(fatp->fat_b_rattrib & FAT_M_NOSPAN)) || currec > inode->i_size) { /* end of records */ + ods2filep->reclen = 65535; + up(&(ods2vari->sem)); + filp->f_pos = coffs; + //filp->f_reada = 0; FIXME + filp->f_version++; + return offs; + } + if (reclen == 65535 && (fatp->fat_b_rattrib & FAT_M_NOSPAN)) { + currec = (vbn + 1) * 512; /* next block... */ + } + } while (reclen == 65535); + + if (coffs <= offs && (coffs + reclen - (fatp->u0.s0.fat_v_rtype == FAT_C_VFC ? fatp->fat_b_vfcsize : 0)) >= offs) { /* we have found our location */ + ods2filep->currec = currec; + ods2filep->curbyte = (offs - coffs) + 2 + (fatp->u0.s0.fat_v_rtype == FAT_C_VFC ? fatp->fat_b_vfcsize : 0); + ods2filep->reclen = reclen; + up(&(ods2vari->sem)); + filp->f_pos = coffs; + //filp->f_reada = 0; FIXME + filp->f_version++; + return offs; + } + coffs += (reclen - (fatp->u0.s0.fat_v_rtype == FAT_C_VFC ? fatp->fat_b_vfcsize : 0)); + if (fatp->fat_b_rattrib & FAT_M_FORTRANCC || fatp->fat_b_rattrib & FAT_M_IMPLIEDCC || fatp->fat_b_rattrib & FAT_M_PRINTCC) { + coffs++; /* need to add one byte for LF */ + } + currec = ((currec + reclen + 1) & ~1) + 2; /* all records are even aligned */ + } +} + + +loff_t +ods2_llseek(struct file *filp, loff_t loff, int seek) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct super_block *sb = inode->i_sb; + struct ods2sb *ods2p = ODS2_SB (sb); + struct ods2fh *ods2fhp = (struct ods2fh *) inode->u.generic_ip; + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + struct fatdef *fatp = (struct fatdef *) &(ods2fhp->fat); + + if (ods2p->flags.v_raw || ods2filep->u1.s1.v_raw) { + return ods2_llseek_stream(filp, loff, seek); + } else { + switch (fatp->u0.s0.fat_v_fileorg) { + case FAT_C_SEQUANTIAL: { + switch (fatp->u0.s0.fat_v_rtype) { + case FAT_C_VFC: + case FAT_C_VARIABLE: return ods2_llseek_variable(filp, loff, seek); + case FAT_C_FIXED: + case FAT_C_STREAMLF: + case FAT_C_STREAMCR: + case FAT_C_STREAM: return ods2_llseek_stream(filp, loff, seek); + default: return loff; + } + } + default: return loff; + } + } +} + + +int ods2_open_release(struct inode *inode, struct file *filp) { + if (filp->private_data == NULL) { + filp->private_data = kmalloc (sizeof (struct ods2file), GFP_KERNEL); + if (filp->private_data) + memset (filp->private_data, 0, sizeof (struct ods2file)); + else { + printk("ODS2-fs kmalloc failed for open_release\n"); + return 0; + } + } else { + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + + if (ods2filep) { + brelse(ods2filep->bhp); + kfree(filp->private_data); + } + filp->private_data = NULL; + } + return 0; +} + diff -Nru a/fs/ods2/inode.c b/fs/ods2/inode.c --- a/fs/ods2/inode.c 1970-01-01 01:00:00 +++ b/fs/ods2/inode.c 2004-09-23 15:43:02 @@ -0,0 +1,328 @@ +/* + * linux/fs/ods2/inode.c + * + * COPYRIGHT + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from: + * http://www.gnu.org/licenses/gpl.txt + * Each contributing author retains all rights to their own work. + * + * Written 2003 by Jonas Lindholm + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ods2.h" + +struct file_operations ods2_dir_operations = { + .readdir = ods2_readdir, + .open = ods2_open_release, + .release = ods2_open_release, + //.read = NULL, + //.ioctl = NULL, + //.fsync = NULL, +}; + +struct file_operations ods2_file_operations = { + .read = ods2_read, + .llseek = ods2_llseek, + .open = ods2_open_release, + .release = ods2_open_release, + .ioctl = ods2_file_ioctl, + //.readdir = NULL, + //.fsync = NULL, +}; + + +struct inode_operations ods2_dir_inode_operations = { + .lookup = ods2_lookup, + //.create = NULL, + //.link = NULL, + //.unlink = NULL, + //.symlink = NULL, + //.mkdir = NULL, + //.rmdir = NULL, + //.mknod = NULL, + //.rename = NULL, +}; + + + +struct dentry * +ods2_lookup (struct inode *dir, struct dentry *dentry, struct nameidata *nd) +{ + struct super_block *sb = dir->i_sb; + struct ods2sb *ods2p = ODS2_SB (sb); + struct buffer_head *bh; + char *vp; + u16 *rec; + struct ods2fh *ods2fhp = (struct ods2fh *) dir->u.generic_ip; + u32 vbn = 1; + u32 lbn; + int vers = 0; + char name[dentry->d_name.len + 1]; + + memcpy(name, dentry->d_name.name, dentry->d_name.len); + name[dentry->d_name.len] = 0; + + /* + We need to extract any version number and terminate the file name with file type + at the ; character because in the directory file only the file name and type + is stored as text without the ; character. The version number for the file is + stored together with each FID. + */ + + vp = strrchr (name, ods2p->semicolon); + if (vp) { + *vp++ = 0; + + if (sscanf (vp, "%d", &vers) != 1) { + *--vp = ods2p->semicolon; + } else if (vers > 32767) { + printk("ODS2-fs error with version number for %s (%s)\n", name, vp); + return ERR_PTR(-EBADF); + } + } + + while ((lbn = vbn2lbn(sb, ods2fhp->map, vbn)) > 0 + && (bh = sb_bread(sb, GETBLKNO(sb, lbn))) != NULL + && bh->b_data != NULL) { + + rec = (u16 *) GETBLKP (sb, lbn, bh->b_data); + + while (*rec != 65535 && *rec != 0) { + struct dirdef *dire = (struct dirdef *) rec; + + if (dire->u1.s1.dir_b_namecount == strlen (name)) { + /* FIXME: Stack usage? */ + char dirname[dire->u1.s1.dir_b_namecount + 1]; + + memcpy (dirname, &dire->u1.s1.dir_t_name, dire->u1.s1.dir_b_namecount); + dirname[dire->u1.s1.dir_b_namecount] = 0; + if (ods2p->dollar != '$' || ods2p->flags.v_lowercase) { + char *p = dirname; + char cnt = dire->u1.s1.dir_b_namecount; + + while (*p && cnt-- > 0) { + if (*p == '$') + *p = ods2p->dollar; + if (ods2p->flags.v_lowercase) + *p = tolower (*p); + p++; + } + } + + if (strcmp (dirname, name) == 0) { + int curbyte = 0; + + while (curbyte < dire->u1.s1.dir_w_size) { + u32 ino; + struct dirdef *dirv = (struct dirdef *) ((char *)dire + ((dire->u1.s1.dir_b_namecount + 1) & ~1) + 6 + curbyte); + + if (dirv->u1.s2.dir_w_version == vers || vers == 0) { + struct inode *inode; + + ino = (dirv->u1.s2.u2.s3.fid_b_nmx << 16) | le16_to_cpu(dirv->u1.s2.u2.s3.fid_w_num); + brelse(bh); + if ((inode = iget(dir->i_sb, ino)) != NULL) { + d_add(dentry, inode); + return NULL; + } + printk (KERN_ERR "ODS2-fs error when iget for file %s\n", name); + return ERR_PTR(-EACCES); + } + curbyte += 8; + } + } + } + rec = (u16 *)((char *)rec + le16_to_cpu(dire->u1.s1.dir_w_size) + 2); + } + brelse(bh); + vbn++; + } + d_add(dentry, NULL); + return NULL; +} + + +/* + The array is used to map ODS2 protection bits to Unix protection bits. + There are two problems when doing the mapping. + The first one is that ODS2 have four types of classes, system, owner, group + and world. As you know Unix has only three, owner, group and other. + We solve that by mapping owner to owner, group to group and world to other. + The system class is ignored. + The other problem is that ODS2 have four different protection bits, read, + write, execute and delete. The read, write and execute can be mapped directly + to Unix bits but the delete bit must be mapped to something else. + As write access give the user delete access on Unix we map the delete bit to + write access. + Please note that on an ODS2 disk a set bit mean deny access where on Unix a + set bit mean granted access. +*/ + +unsigned char vms2unixprot[] = { /* ODS2 prot */ + S_IROTH | S_IWOTH | S_IXOTH , /* D E W R */ + 0 | S_IWOTH | S_IXOTH , /* D E W */ + S_IROTH | S_IWOTH | S_IXOTH , /* D E R */ + 0 | S_IWOTH | S_IXOTH , /* D E */ + S_IROTH | S_IWOTH | 0 , /* D W R */ + 0 | S_IWOTH | 0 , /* D W */ + S_IROTH | S_IWOTH | 0 , /* D R */ + 0 | S_IWOTH | 0 , /* D */ + S_IROTH | S_IWOTH | S_IXOTH , /* E W R */ + 0 | S_IWOTH | S_IXOTH , /* E W */ + S_IROTH | 0 | S_IXOTH , /* E R */ + 0 | 0 | S_IXOTH , /* E */ + S_IROTH | S_IWOTH | 0 , /* W R */ + 0 | S_IWOTH | 0 , /* W */ + S_IROTH | 0 | 0 , /* R */ + 0 | 0 | 0 , /* */ +}; + +void +ods2_read_inode (struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + struct ods2sb *ods2p = ODS2_SB (sb); + struct buffer_head *bh; + u32 fhlbn; + + if ((fhlbn = ino2fhlbn (sb, inode->i_ino)) > 0 + && (bh = sb_bread(sb, GETBLKNO(sb, fhlbn))) != NULL + && bh->b_data != NULL) { + struct fh2def *fh2p = (struct fh2def *) GETBLKP(sb, fhlbn, bh->b_data); + + inode->u.generic_ip = kmalloc (sizeof (struct ods2fh), GFP_KERNEL); + if (inode->u.generic_ip) { + struct ods2fh *ods2fhp; + struct fi2def *fi2p; + struct fatdef *fatp; + + ods2fhp = (struct ods2fh *) inode->u.generic_ip; + ods2fhp->map = NULL; + ods2fhp->ods2vari = NULL; + fi2p = (struct fi2def *) ((short unsigned *)fh2p + fh2p->fh2_b_idoffset); + fatp = (struct fatdef *) &(fh2p->fh2_w_recattr); + + if (verify_fh(fh2p, inode->i_ino)) { + memcpy (&ods2fhp->fat, fatp, sizeof (struct fatdef)); + ods2fhp->map = getmap (sb, fh2p); + + if (fh2p->u4.s1.fch_v_directory) { + inode->i_mode = S_IFDIR; + inode->i_op = &ods2_dir_inode_operations; + inode->i_fop = &ods2_dir_operations; + } else { + inode->i_mode = S_IFREG; + inode->i_fop = &ods2_file_operations; + } + + inode->i_uid = le16_to_cpu (fh2p->u5.s1.fh2_w_mem); + inode->i_gid = le16_to_cpu (fh2p->u5.s1.fh2_w_grp); + + vms2timespec (&inode->i_ctime, fi2p->fi2_q_credate); + vms2timespec (&inode->i_mtime, fi2p->fi2_q_revdate); + vms2timespec (&inode->i_atime, fi2p->fi2_q_revdate); + + /* + * Note that we don't use the system + * protection bits for ODS2. + */ + inode->i_mode |= vms2unixprot[(le16_to_cpu(fh2p->fh2_w_fileprot) >> 4) & 0x0f] << 6; /* owner */ + inode->i_mode |= vms2unixprot[(le16_to_cpu(fh2p->fh2_w_fileprot) >> 8) & 0x0f] << 3; /* group */ + inode->i_mode |= vms2unixprot[(le16_to_cpu(fh2p->fh2_w_fileprot) >> 12) & 0x0f]; /* world => other */ + + inode->i_blksize = 512; + inode->i_blocks = ((le16_to_cpu(fatp->u1.s1.fat_w_hiblkh) << 16) | le16_to_cpu(fatp->u1.s1.fat_w_hiblkl)); + inode->i_size = ((le16_to_cpu(fatp->u2.s1.fat_w_efblkh) << 16) | le16_to_cpu(fatp->u2.s1.fat_w_efblkl)) << 9; + if (inode->i_size > 0) + inode->i_size -= 512; + inode->i_size += le16_to_cpu(fatp->fat_w_ffbyte); + + if ((fatp->u0.s0.fat_v_rtype == FAT_C_VFC + || fatp->u0.s0.fat_v_rtype == FAT_C_VARIABLE) + && !ods2p->flags.v_raw) { + if ((ods2fhp->ods2vari = (struct ods2vari *) kmalloc(sizeof (struct ods2vari), GFP_KERNEL)) != NULL) { + memset(ods2fhp->ods2vari, 0 , sizeof (struct ods2vari)); + sema_init(&(ods2fhp->ods2vari->sem), 1); + } else { + printk(KERN_ERR "ODS2-fs kmalloc failed for vari data\n"); + } + } + + ods2fhp->parent = (fh2p->u6.s1.fid_b_nmx << 16) | le16_to_cpu(fh2p->u6.s1.fid_w_num); + //inode->i_version = ++event; FIXME + bforget(bh); + return; + } + printk (KERN_ERR "ODS2-fs not a valid file header\n"); + } else { + bforget (bh); + printk (KERN_ERR "ODS2-fs kmalloc failed for extension inode\n"); + kfree (inode->u.generic_ip); + } + } + printk (KERN_ERR "ODS2-fs error reading inode\n"); + make_bad_inode (inode); +} + +/* + * For a read only file system there is nothing to do for put_inode. + */ +void ods2_put_inode (struct inode *inode) { +} + +void ods2_clear_inode (struct inode *inode) { + struct ods2fh *ods2fhp = (struct ods2fh *) inode->u.generic_ip; + + if (ods2fhp != NULL) { + struct ods2map *map = ods2fhp->map; + + while (map != NULL) { + struct ods2map *nxt = map->nxt; + kfree (map); + map = nxt; + } + ods2fhp->map = NULL; + + if (ods2fhp->ods2vari != NULL) { /* in case the file was of variable record type */ + int idx; + + for (idx = 0; idx < 128; idx++) { + struct ods2var *ods2varp = ods2fhp->ods2vari->ods2varp[idx]; + + while (ods2varp != NULL) { + struct ods2var *nxt = ods2varp->nxt; + + kfree(ods2varp); + ods2varp = nxt; + } + } + kfree (ods2fhp->ods2vari); + ods2fhp->ods2vari = NULL; + } + kfree (inode->u.generic_ip); + inode->u.generic_ip = NULL; + } +} + +/* + * This routine doesn't need to be defined for a read only filesystem + * but we do it for fun so remember to call clear_inode otherwise you + * will run out of memory... + */ +void +ods2_delete_inode (struct inode *inode) { + clear_inode (inode); +} + diff -Nru a/fs/ods2/ods2.h b/fs/ods2/ods2.h --- a/fs/ods2/ods2.h 1970-01-01 01:00:00 +++ b/fs/ods2/ods2.h 2004-09-30 21:52:18 @@ -0,0 +1,538 @@ +#ifndef _ODS2_H +#define _ODS2_H + +/* + * linux/fs/ods2/ods2.h + * + * COPYRIGHT + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from: + * http://www.gnu.org/licenses/gpl.txt + * Each contributing author retains all rights to their own work. + * + * Written 2003 by Jonas Lindholm + * + */ + +#include + +/* + * The following structures are defined in the book + * "VMS File System Internals". Also, you can find quite some infos about + * the struct layouts at + * http://www.pi-net.dyndns.org/conan/sys$common/syslib/sys$lib_c.tlb . + */ + +/* + * Access to the ods2 superblock from a VFS struct super_block. + */ +#define ODS2_SB(sb) (sb->s_fs_info) + +/* + * VMS' timestamps are a 64bit unsigned integer, with little-endian + * on-disk format. So *always* keep a vms_timestamp_t in LE byteorder! + */ +typedef u64 vms_timestamp_t; + +/* + * This is the home block on a ODS2 disk. + */ +struct hm2def { + u32 hm2_l_homelbn; + u32 hm2_l_alhomelbn; + u32 hm2_l_altidxlbn; + union { + u16 hm2_w_struclev; + struct { + u8 hm2_b_structlevv; + u8 hm2_b_structlevl; + } s1; + } u1; + u16 hm2_w_cluster; + u16 hm2_w_homevbn; + u16 hm2_w_alhomevbn; + u16 hm2_w_altidxvbn; + u16 hm2_w_ibmapvbn; + u32 hm2_l_ibmaplbn; + u32 hm2_l_maxfiles; + u16 hm2_w_ibmapsize; + u16 hm2_w_resfiles; + u16 hm2_w_devtype; + u16 hm2_w_rvn; + u16 hm2_w_setcount; + u16 hm2_w_volchar; + union { + u32 hm2_l_owner; + struct { + u16 hm2_w_mem; + u16 hm2_w_grp; + } s1; + } u2; + u32 hm2_l_res1; + u16 hm2_w_protect; + u16 hm2_w_fileprot; + u16 hm2_w_res2; + u16 hm2_w_checksum1; + u32 hm2_q_credate[2]; + u8 hm2_b_window; + u8 hm2_b_lru_lim; + u16 hm2_w_extend; + u32 hm2_q_retainmin[2]; + u32 hm2_q_retainmax[2]; + u32 hm2_q_revdate[2]; + u8 hm2_r_min_class[20]; + u8 hm2_r_max_class[20]; + u8 hm2_b_res3[320]; + u32 hm2_l_serialnum; + char hm2_t_structname[12]; + char hm2_t_volname[12]; + char hm2_t_ownername[12]; + char hm2_t_format[12]; + u16 hm2_w_res4; + u16 hm2_w_checksum2; +}; + +/* + * This is the Storage Control Block. + * It is the first block in file BITMAP.SYS. + */ +struct scbdef { + union { + u16 scb_w_struclev; + struct { + u8 scb_b_structlevv; + u8 scb_b_structlevl; + } s1; + } u1; + u16 scb_w_cluster; + u32 scb_l_volsize; + u32 scb_l_blksize; + u32 scb_l_sectors; + u32 scb_l_tracks; + u32 scb_l_cylinders; + union { + u32 scb_l_status; + struct { + u32 scb_v_mapdirty:1; + u32 scb_v_mapalloc:1; + u32 scb_v_filalloc:1; + u32 scb_v_quodirty:1; + u32 scb_v_hdrwrite:1; + u32 scb_v_corrupt:1; + } s1; + } u2; + union { + u32 scb_l_status2; + struct { + u32 scb_v_mapdirty:1; + u32 scb_v_mapalloc:1; + u32 scb_v_filalloc:1; + u32 scb_v_quodirty:1; + u32 scb_v_hdrwrite:1; + u32 scb_v_corrupt:1; + } s1; + } u3; + u16 scb_w_writecnt; + char scb_t_volockname[12]; + u16 scb_q_mounttime[4]; // really __int64 + u16 scb_w_backrev; + u64 scb_q_genernum; + u8 scb_b_reserved[446]; + u16 scb_w_checksum; +}; + +/* + * This structure is part of the file header block and + * fives different tomes as well as the file name. + */ +struct fi2def { + char fi2_t_filename[20]; + u16 fi2_w_revision; + vms_timestamp_t fi2_q_credate; + vms_timestamp_t fi2_q_revdate; + vms_timestamp_t fi2_q_expdate; + vms_timestamp_t fi2_q_bakdate; + char fi2_filenameext[66]; + //char fi2_t_userlabel[80]; // from http://www.pi-net.dyndns.org/conan/sys$common/syslib/sys$lib_c.tlb?key=FI2DEF&title=Library%20/sys$common/syslib/sys$lib_c.tlb&referer= +}; + +/* + * This is the file header for any ODS2 file. + * It is located in file INDEXF.SYS. + */ +struct fh2def { + u8 fh2_b_idoffset; + u8 fh2_b_mpoffset; + u8 fh2_b_acoffset; + u8 fh2_b_rsoffset; + u16 fh2_w_seg_num; + union { + u16 fh2_w_struclev; + struct { + u8 fh2_b_structlevv; + u8 fh2_b_structlevl; + } s1; + } u1; + union { + u16 fh2_w_fid[3]; + struct { + u16 fh2_w_fid_num; + u16 fh2_w_fid_seq; + u8 fh2_b_fid_rvn; + u8 fh2_b_fid_nmx; + } s1; + } u2; + union { + u16 fh2_w_ext_fid[3]; + struct { + u16 fid_w_ex_fidnum; + u16 fid_w_ex_fidseq; + u8 fid_b_ex_fidrvn; + u8 fid_b_ex_fidnmx; + } s1; + } u3; + u32 fh2_w_recattr[8]; + union { + struct filechar { + u32 fch_v_wascontig:1; + u32 fch_v_nobackup:1; + u32 fch_v_writeback:1; + u32 fch_v_readcheck:1; + u32 fch_v_writecheck:1; + u32 fch_v_contigb:1; + u32 fch_v_locked:1; + u32 fch_v_contig:1; + u32 fch_v_res1:3; + u32 fch_v_badacl:1; + u32 fch_v_spool:1; + u32 fch_v_directory:1; + u32 fch_v_badblock:1; + u32 fch_v_markdel:1; + u32 fch_v_nocharge:1; + u32 fch_v_erase:1; + } s1; + u32 fh2_l_filechar; + } u4; + u16 fh2_w_res1; + u8 fh2_b_map_inuse; + u8 fh2_b_acc_mode; + union { + u32 fh2_l_fileowner; + struct { + u16 fh2_w_mem; + u16 fh2_w_grp; + } s1; + } u5; + u16 fh2_w_fileprot; + union { + u16 fh2_w_backlink[3]; + struct { + u16 fid_w_num; + u16 fid_w_seq; + u8 fid_b_rvn; + u8 fid_b_nmx; + } s1; + } u6; + u8 fh2_b_journal; + u8 fh2_b_ru_active; + u16 fh2_w_res2; + u32 fh2_l_highwater; + u8 fh2_b_res3[8]; + u8 fh2_r_class_prot[20]; + u8 fh2_b_res4[402]; + u16 fh2_w_checksum; +}; + +/* + * This is the file attribute structure. + * It is part of the file header. + * It defines RMS attributes for any file. + */ +#define FAT_C_UNDEFINED 0 +#define FAT_C_FIXED 1 +#define FAT_C_VARIABLE 2 +#define FAT_C_VFC 3 +#define FAT_C_STREAM 4 +#define FAT_C_STREAMLF 5 +#define FAT_C_STREAMCR 6 + +#define FAT_C_SEQUANTIAL 0 +#define FAT_C_RELATIVE 1 +#define FAT_C_INDEXED 2 +#define FAT_C_DIRECT 3 + +#define FAT_M_FORTRANCC 0x01 +#define FAT_M_IMPLIEDCC 0x02 +#define FAT_M_PRINTCC 0x04 +#define FAT_M_NOSPAN 0x08 +#define FAT_M_MSBRCW 0x10 + +struct fatdef { + union { + u8 fat_b_rtype; + struct { + u8 fat_v_rtype:4; + u8 fat_v_fileorg:4; + } s0; + } u0; + u8 fat_b_rattrib; + u8 fat_w_rsize; + union { + u32 fat_l_hiblk; + struct { + u16 fat_w_hiblkh; + u16 fat_w_hiblkl; + } s1; + } u1; + union { + u32 fat_l_efblk; + struct { + u16 fat_w_efblkh; + u16 fat_w_efblkl; + } s1; + } u2; + u16 fat_w_ffbyte; + u8 fat_b_bktsize; + u8 fat_b_vfcsize; + u16 fat_w_maxrec; + u16 fat_w_defext; + u8 fat_b_res1[6]; + u16 fat_w_notused; + u16 fat_w_versions; +}; + +/* + * This is the structure used for mapping virtual block + * number, VBN, to logical block numbers, LBN. + * One or more of this structure is part of the file header. + */ +struct fm2def { + union { + struct { + u8 fm2_b_count1; + u8 fm2_v_highlbn:6; + u8 fm2_v_format:2; + u16 fm2_w_lowlbn; + } fm1; + struct { + u16 fm2_v_count2:14; + u16 fm2_v_format:2; + u16 fm2_l_lbn2[2]; + } fm2; + struct { + u16 fm2_v_count2:14; + u16 fm2_v_format:2; + u16 fm2_w_lowcount; + u32 fm2_l_lbn3; + } fm3; + } u1; +}; + +/* + * This structure define a directory entry in a directory file. + */ +#define DIR_C_FID 0 +#define DIR_C_LINKNAME 1 +struct dirdef { + union { + struct { + u16 dir_w_size; + s16 dir_w_verlimit; + union { + u8 dir_b_flags; + struct { + u8 dir_v_type:3; + u8 dir_v_res1:3; + u8 dir_v_nextrec:1; + u8 dir_v_prevrec:1; + } s4; + } u4; + u8 dir_b_namecount; + char dir_t_name; + } s1; + struct { + u16 dir_w_version; + union { + u16 dir_w_fid[3]; + struct { + u16 fid_w_num; + u16 fid_w_seq; + u8 fid_b_rvn; + u8 fid_b_nmx; + } s3; + } u2; + } s2; + } u1; +}; + +/* + * From here we have our own ODS2 specific structures + * and definitions. + */ +struct ods2map { + struct ods2map *nxt; + struct { + u32 cnt; + u32 lbn; + } s1[16]; +}; + +/* + * Each block map 64Kbyte * 16 loff's. + * The number of bytes for this structure is 4 + 16 * 16 => 260. + * For a 1GB file we need a total of 1024 blocks. If each block is + * 260 bytes the total amount of bytes is 1024 * 260 => 266240 bytes + * The linked list will contain no more than 8 blocks as the structure + * below has 128 pointers. + */ +struct ods2var { + struct ods2var *nxt; /* next block if needed */ + struct { + u64 recoffs; /* offset to start of record */ + loff_t loff; /* virtual offset to start of record */ + } s1[16]; +}; + +/* + * Each file that is of variable record type has the following structure + * attached to it. + * This is the index for one or more ODS2VAR structures. By doing index as + * much as possible it is easy to calculate what structure to use by just + * doing some shifts and bit masking. + * Note that this structure and its sub structures are protected by a + * semaphore because more than one process at the same time can use the inode + * structure to read the file contents. + * The number of bytes for this structure is 128 * 4 + 12 (or 16) => 528. + * The overhead for small files are big but 528 bytes allocated using kmalloc + * should not be to much. + */ +#define IDXVAR(a) (((a) >> 16) & 0x0f) +#define IDXVARI(a) (((a) >> 20) & 0x7f) +#define IDXBLOCK(a) ((a) >> 27) +struct ods2vari { + struct ods2var *ods2varp[128]; /* pointers to ods2var blocks */ + struct semaphore sem; /* This is the semaphore used + to protect this structure */ + loff_t highidx; /* highest index so far... */ +}; + +/* + * Each open file has the following structure attached to it. + * It add the extra variables needed to handle directories and + * RMS data. + */ +struct ods2file { + struct buffer_head *bhp; + u8 *data; /* pointer to data portion in buffer */ + u64 currec; /* byte offset to current record --- from start of file */ + u16 curbyte; /* byte offset into current record */ + u16 reclen; /* length of current record */ + union { + u32 flags; + struct { + u32 v_raw:1; /* this file handler must + return data in raw mode */ + u32 v_res1:31; + } s1; + } u1; +}; + +/* + * Each inode has the following structure attached to it. + * It keep the file attributes and mapping information in memory. + */ +struct ods2fh { + struct ods2map *map; /* mapping information from VBN to LBN */ + struct ods2vari *ods2vari; /* only used for variable record files */ + struct fatdef fat; /* file attributes */ + u32 parent; /* ino of parent directory */ +}; + +/* + * The super block for an ODS2 disk has the following + * structure attached. + * It keep the home block and the inode for INDEXF.SYS;1 + * in memory. + */ +#define SB_M_VERSALL 0 +#define SB_M_VERSHIGH 1 +#define SB_M_VERSNONE 2 +#define SB_M_RAW 8 +#define SB_M_LOWERCASE 16 +struct ods2sb { + struct hm2def hm2; + struct inode *indexf; /* INDEXF.SYS */ + u8 *ibitmap; /* index file header bitmap */ + struct kstatfs kstatfs; + struct { + int v_version:3; /* what to do with file versions */ + int v_raw:1; /* force all files as stream */ + int v_lowercase:1; /* force all file names to lowercase */ + int v_res:27; /* reserved */ + } flags; + char dollar; /* character used for dollar */ + char semicolon; /* character used for semicolon */ +}; + +/* + * These two macros are used to support media with a sector size of + * 1024 or 2048 bytes. + * I.e. the RRD47 CDROM drive on my Alpha server 1200 report a sector + * size of 2048 even for an ODS2 CD. + * + * FIXME: These macros need deuglification. + */ +#define GETBLKNO(a, b) ((b) >> (a->s_blocksize_bits - 9)) +#define GETBLKP(a, b, c) ((void *)&(((char *)(c))[((b) & ((a)->s_blocksize_bits == 9 ? 0 : ((a)->s_blocksize_bits == 10 ? 1 : 3))) << 9])) + +/* + * This is our private ioctl operations for a file pointer. + */ +#define ODS2_IOC_FISETRAW _IOW('f', 0x0d0, long) /* enable/disable raw file mode */ +#define ODS2_IOC_FIGETRAW _IOR('g', 0x0d0, long) /* get raw file mode */ +#define ODS2_IOC_SBGETRAW _IOR('g', 0x0d1, long) /* get raw mode for super block */ + +#define MIN(a, b) ((a) < (b)? (a): (b)) /* FIXME - deprecated! */ +#define MAX(a, b) ((a) > (b)? (a): (b)) /* FIXME - deprecated! */ + +/* + * util.c + */ +extern u32 vbn2lbn(struct super_block *sb, struct ods2map *map, u32 vbn); +extern u32 ino2fhlbn(struct super_block *sb, u32 ino); +extern struct ods2map *getmap(struct super_block *sb, struct fh2def *fh2p); +extern struct buffer_head *getfilebh(struct file *filp, u32 vbn); +extern int verify_fh(struct fh2def *fh2p, u32 ino); + +/* + * inode.c + */ +extern struct dentry *ods2_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd); +extern void ods2_read_inode(struct inode *inode); +extern void ods2_put_inode(struct inode *inode); +extern void ods2_clear_inode(struct inode *inode); +extern void ods2_delete_inode(struct inode *inode); + +/* + * dir.c + */ +extern int ods2_readdir(struct file *filp, void *dirent, filldir_t filldir); + +/* + * file.c + */ +extern int ods2_file_ioctl(struct inode *inode, struct file *filp, int unsigned cmd, long unsigned arg); +extern ssize_t ods2_read(struct file *filp, char *buf, size_t buflen, loff_t *loff); +extern loff_t ods2_llseek(struct file *filp, loff_t loff, int seek); +extern int ods2_open_release(struct inode *inode, struct file *filp); + +/* + * util.c + */ +extern void vms2timespec (struct timespec *dest, vms_timestamp_t src); +extern void timespec2vms (vms_timestamp_t *dest, struct timespec *src); + +/* FIXME: hacks in util.c */ +extern int get_hardsect_size (int xx); + +#endif /* _ODS2_H */ diff -Nru a/fs/ods2/super.c b/fs/ods2/super.c --- a/fs/ods2/super.c 1970-01-01 01:00:00 +++ b/fs/ods2/super.c 2004-09-24 02:18:04 @@ -0,0 +1,436 @@ +/* + * linux/fs/ods2/super.c + * + * COPYRIGHT + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from: + * http://www.gnu.org/licenses/gpl.txt + * Each contributing author retains all rights to their own work. + * + * Written 2003 by Jonas Lindholm + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ods2.h" + +/* + * This routine is executed when the ODS2 file system is unmounted. + * The only thing we need to do is to release file INDEXF.SYS;1 and + * deallocate memory used for index file header bitmap. + */ +static void ods2_put_super(struct super_block *sb) { + struct ods2sb *ods2p = ODS2_SB (sb); + + if (ods2p != NULL) { + iput(ods2p->indexf); /* release INDEXF.SYS;1 */ + kfree(ods2p->ibitmap); + kfree(ods2p); + } +} + +/* + * This routine is executed when the user want to get information + * about the ODS2 file system. As we are read only we can just copy + * the information we were gathering during the mount into the buffer. + */ +int ods2_statfs(struct super_block *sb, struct kstatfs *buf) { + struct ods2sb *ods2p = ODS2_SB (sb); + + memcpy(buf, &ods2p->kstatfs, sizeof(struct kstatfs)); + return 0; +} + + +static struct super_operations ods2_sops = { + .read_inode = ods2_read_inode, + .put_inode = ods2_put_inode, + .delete_inode = ods2_delete_inode, + .clear_inode = ods2_clear_inode, + .put_super = ods2_put_super, + .statfs = ods2_statfs, + //write_inode: NULL, + //remount_fs: NULL, + //write_super: NULL, +}; + + +/* + * This array is used to get the number of bits set for a nibble value. + * */ +static char unsigned nibble2bits[] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; + +/* + * This routine open and read the BITMAP.SYS;1 file. + */ +int ods2_read_bitmap(struct super_block *sb) { + struct ods2sb *ods2p = ODS2_SB (sb); + struct inode *inode; + struct buffer_head *bh; + + if ((inode = iget(sb, 2)) != NULL) { /* this is BITMAP.SYS */ + struct ods2fh *ods2fhp = (struct ods2fh *) (inode->u.generic_ip); + u32 lbn; + + if ((lbn = vbn2lbn(sb, ods2fhp->map, 1)) > 0 + && (bh = sb_bread(sb, GETBLKNO(sb, lbn))) != NULL + && bh->b_data != NULL) { + struct scbdef *scb = (struct scbdef *) GETBLKP(sb, lbn, bh->b_data); + short unsigned *p; + short unsigned chksum = 0; + + for (p = (short unsigned *)scb ; p < (short unsigned *)&(scb->scb_w_checksum) ; chksum += *p++); + + if (scb->u1.s1.scb_b_structlevl == 2 + && scb->u1.s1.scb_b_structlevv >= 1 + && scb->scb_w_cluster == ods2p->hm2.hm2_w_cluster + && scb->scb_w_checksum == chksum) { + struct buffer_head *bh2; + u32 vbn = 1; + u32 bitset = 0; + + /* + * We need to loop through all bytes that make up the bitmap. + * The fastest way to count the number of bits set in the byte + * is to have a nibble table that has the number of bits for the + * values of 0 to 15. By adding the number of bits for the low + * and high nibble we can get the total amount of bits set. + */ + while (vbn * 512 < inode->i_size + && (lbn = vbn2lbn(sb, ods2fhp->map, vbn + 1)) > 0 + && (bh2 = sb_bread(sb, GETBLKNO(sb, lbn))) != NULL + && bh->b_data != NULL) { + u8 *bp = (char unsigned *)(GETBLKP(sb, lbn, bh2->b_data)); + int cnt; + + for (cnt = 0; cnt < 512; cnt++, bp++) { bitset += (nibble2bits[*bp & 0x0f] + nibble2bits[*bp >> 4]); } + brelse(bh2); + vbn++; + } + bitset *= scb->scb_w_cluster; /* each bit represent 1 or more blocks (cluster factor) */ + ods2p->kstatfs.f_blocks = scb->scb_l_volsize; + ods2p->kstatfs.f_bfree = bitset; + ods2p->kstatfs.f_bavail = bitset; + brelse(bh); + iput(inode); + return 1; /* everything went ok */ + } + brelse(bh); /* invalid data in VBN 1 */ + } + iput(inode); /* could not read VBN 1 */ + } + return 0; /* unable to get inode 2 OR some other problem */ +} + +/* + * This routine allocate memory for the index file header bitmap + * and copy data from the INDEXF.SYS file. At the same time the + * number of free file headers are counted. + */ +static int +ods2_read_ibitmap (struct super_block *sb) +{ + struct ods2sb *ods2p = ODS2_SB (sb); + int idx; + struct buffer_head *bh; + + ods2p->kstatfs.f_ffree = 0; + + ods2p->ibitmap = kmalloc (ods2p->hm2.hm2_w_ibmapsize << 9, GFP_KERNEL); + if (ods2p->ibitmap) { + memset (ods2p->ibitmap, 0, ods2p->hm2.hm2_w_ibmapsize << 9); + for (idx = 0; idx < ods2p->hm2.hm2_w_ibmapsize; idx++) { + bh = sb_bread (sb, GETBLKNO (sb, ods2p->hm2.hm2_l_ibmaplbn + idx)); + if (bh && bh->b_data) { + u8 *bp = GETBLKP (sb, ods2p->hm2.hm2_l_ibmaplbn + idx, bh->b_data); + int cnt; + + memcpy (ods2p->ibitmap + (idx << 9), GETBLKP (sb, ods2p->hm2.hm2_l_ibmaplbn + idx, bh->b_data), 512); + for (cnt = 0; cnt < 512; cnt++, bp++) + ods2p->kstatfs.f_ffree += (nibble2bits[(*bp & 0x0f) ^ 0xf] + nibble2bits[(*bp >> 4) ^ 0xf]); + bforget(bh); + } + } + + return 0; + } + + printk("ODS2-fs error when allocating memory for index file header bitmap\n"); + return -ENOMEM; +} + +/* + * Parse options that can be supplied (mount -o xxxxxxx) + */ +enum { + Opt_ods2_dollar, + Opt_ods2_semicolon, + Opt_ods2_version_all, + Opt_ods2_version_highest, + Opt_ods2_version_none, + Opt_ods2_lowercase, + Opt_ods2_raw, + Opt_ods2_err, +}; +static match_table_t ods2_tokens = { + { .token = Opt_ods2_dollar, .pattern = "dollar=%s", }, + { .token = Opt_ods2_semicolon, .pattern = "semicolon=%s", }, + { .token = Opt_ods2_version_all, .pattern = "version=all", }, + { .token = Opt_ods2_version_highest, .pattern = "version=highest", }, + { .token = Opt_ods2_version_none, .pattern = "version=none", }, + { .token = Opt_ods2_lowercase, .pattern = "lowercase", }, + { .token = Opt_ods2_raw, .pattern = "raw", }, + { .token = Opt_ods2_err, .pattern = NULL, }, +}; + +static int +ods2_set_defaults_and_user_options (struct super_block *sb, char *options) +{ + struct ods2sb *ods2sb = ODS2_SB (sb); + substring_t args[MAX_OPT_ARGS]; + int token; + char *p; + char *string_opt; + + /* + * Set default values first, ... + */ + ods2sb->flags.v_raw = 0; + ods2sb->flags.v_lowercase = 0; + ods2sb->flags.v_version = SB_M_VERSALL; + ods2sb->dollar = '$'; + ods2sb->semicolon = ';'; + + /* + * ...then try to parse user-supplied options + */ + if (!options) + return 0; + + while ((p = strsep (&options, ",")) != NULL) { + if (!*p) + continue; + + token = match_token (p, ods2_tokens, args); + switch (token) { + case Opt_ods2_dollar: + string_opt = match_strdup (&args[0]); + if (!string_opt) + return -ENOMEM; + ods2sb->dollar = string_opt[0]; + kfree (string_opt); + break; + + case Opt_ods2_semicolon: + string_opt = match_strdup (&args[0]); + if (!string_opt) + return -ENOMEM; + ods2sb->semicolon = string_opt[0]; + kfree (string_opt); + break; + + case Opt_ods2_version_all: + ods2sb->flags.v_version = SB_M_VERSALL; + break; + + case Opt_ods2_version_highest: + ods2sb->flags.v_version = SB_M_VERSHIGH; + break; + + case Opt_ods2_version_none: + ods2sb->flags.v_version = SB_M_VERSNONE; + break; + + case Opt_ods2_lowercase: + ods2sb->flags.v_lowercase = 1; + break; + + case Opt_ods2_raw: + ods2sb->flags.v_raw = 1; + break; + + case Opt_ods2_err: + default: + printk (KERN_ERR "Unrecognized ODS-2 option or " + "missing value \"%s\"\n", p); + return -EINVAL; + } + } + + return 0; +} + +/* + * This is the routine that is invoked when an ODS2 file system is mounted. + */ +static int +ods2_fill_super (struct super_block *sb, void *data, int silent) +{ + struct buffer_head *bh; + struct ods2sb *ods2p; + int ret; + + /* This should be something like (from ext2): + * + * int blocksize; + * blocksize = sb_min_blocksize(sb, BLOCK_SIZE); + * // See what the current blocksize for the device is, and + * // use that as the blocksize. Otherwise (or if the blocksize + * // is smaller than the default) use the default. + * // This is important for devices that have a hardware + * // sectorsize that is larger than the default. + * blocksize = sb_min_blocksize(sb, BLOCK_SIZE); + * if (!blocksize) { + * printk ("EXT2-fs: unable to set blocksize\n"); + * goto failed_sbi; + * } + * + * // If the superblock doesn't start on a hardware sector boundary, + * // calculate the offset. + * if (blocksize != BLOCK_SIZE) { + * logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; + * offset = (sb_block*BLOCK_SIZE) % blocksize; + * } else { + * logic_sb_block = sb_block; + * } + * + * if (!(bh = sb_bread(sb, logic_sb_block))) { + * ... + */ + sb_set_blocksize(sb, get_hardsect_size(sb->s_dev)); + if ((bh = sb_bread(sb, GETBLKNO(sb, 1))) != NULL && bh->b_data != NULL) { + u16 *p; + u16 chksum1 = 0; + u16 chksum2 = 0; + + ODS2_SB (sb) = kmalloc (sizeof (struct ods2sb), GFP_KERNEL); + if (!ODS2_SB (sb)) { + printk("ODS2-fs kmalloc failed for sb generic\n"); + return -ENOMEM; + } + + ods2p = ODS2_SB (sb); + memcpy(&ods2p->hm2, GETBLKP(sb, 1, bh->b_data), sizeof (struct hm2def)); + brelse(bh); + + ret = ods2_set_defaults_and_user_options (sb, data); + if (ret) { + kfree (ODS2_SB (sb)); + return ret; + } + + for (p = (u16 *)&(ods2p->hm2) ; p < (u16 *)&(ods2p->hm2.hm2_w_checksum1) ; chksum1 += *p++); + for (p = (u16 *)&(ods2p->hm2) ; p < (u16 *)&(ods2p->hm2.hm2_w_checksum2) ; chksum2 += *p++); + + /* + * This is the way to check for a valid home block. + */ + if (ods2p->hm2.hm2_l_homelbn != 0 + && ods2p->hm2.hm2_l_alhomelbn != 0 + && ods2p->hm2.hm2_l_altidxlbn != 0 + && ods2p->hm2.hm2_w_cluster != 0 + && ods2p->hm2.u1.s1.hm2_b_structlevl == 2 + && ods2p->hm2.u1.s1.hm2_b_structlevv >= 1 + && ods2p->hm2.hm2_w_homevbn != 0 + && ods2p->hm2.hm2_l_ibmaplbn != 0 + && ods2p->hm2.hm2_l_maxfiles > ods2p->hm2.hm2_w_resfiles + && ods2p->hm2.hm2_w_resfiles >= 5 + && chksum1 == ods2p->hm2.hm2_w_checksum1 + && chksum2 == ods2p->hm2.hm2_w_checksum2) { + sb->s_op = &ods2_sops; + + ods2p->indexf = iget(sb, 1); /* read INDEXF.SYS. */ + + sb->s_root = d_alloc_root(iget(sb, 4)); /* this is 000000.DIR;1 */ + + /* + * We need to be able to read the index file header bitmap. + */ + if (ods2_read_ibitmap (sb) == 0) { + /* + * We need to be able to read BITMAP.SYS as + * it contains the bitmap for allocated + * blocks. Without this file we need to + * rebuild it by reading ALL file mapping + * pointers for ALL files and create the + * file. That will be in a later release so + * it's a FIXME. + */ + if (ods2_read_bitmap(sb)) { + char format[13]; + char volname[13]; + char volowner[13]; + + /* + * We need to fill in statfs structure + * used when any user want to get + * information about the mounted ODS2 + * file system. Some of the information + * is static and other is found in + * BITMAP.SYS. + */ + ods2p->kstatfs.f_type = 0x3253444f; /* 2SDO */ + ods2p->kstatfs.f_bsize = 512; + ods2p->kstatfs.f_files = ods2p->hm2.hm2_l_maxfiles; + ods2p->kstatfs.f_namelen = 80; + + memcpy(format, ods2p->hm2.hm2_t_format, 12); + format[12] = 0; + memcpy(volname, ods2p->hm2.hm2_t_volname, 12); + volname[12] = 0; + memcpy(volowner, ods2p->hm2.hm2_t_ownername, 12); + volowner[12] = 0; + printk (KERN_NOTICE "ODS2-fs This is a valid ODS2 file system with format /%s/ and volume name /%s/ and owner /%s/\n", format, volname, volowner); + return 0; + } + kfree (ods2p->ibitmap); + } + } + kfree (ODS2_SB (sb)); + } + return -EINVAL; +} + +static struct super_block * +ods2_read_super (struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) { + return get_sb_bdev (fs_type, flags, dev_name, data, ods2_fill_super); +} + +static struct file_system_type ods2_fs_type = { + .owner = THIS_MODULE, + .name = "ods2", + .get_sb = ods2_read_super, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; + +static int __init +init_ods2_fs (void) +{ + return register_filesystem (&ods2_fs_type); +} + +static void __exit +exit_ods2_fs (void) +{ + unregister_filesystem (&ods2_fs_type); +} + +module_init (init_ods2_fs); +module_exit (exit_ods2_fs); + +MODULE_AUTHOR ("Jonas Lindholm "); +MODULE_DESCRIPTION ("ODS-2 filesystem driver"); +MODULE_LICENSE ("GPL"); + diff -Nru a/fs/ods2/util.c b/fs/ods2/util.c --- a/fs/ods2/util.c 1970-01-01 01:00:00 +++ b/fs/ods2/util.c 2004-09-30 21:52:18 @@ -0,0 +1,254 @@ +/* + * linux/fs/ods2/util.c + * + * COPYRIGHT + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from: + * ftp://prep.ai.mit.edu/pub/gnu/GPL + * Each contributing author retains all rights to their own work. + * + * Written 2003 by Jonas Lindholm + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ods2.h" + +#define DODGY_GCC /* Old compiler, FIXME */ + +u32 +vbn2lbn (struct super_block *sb, struct ods2map *map, u32 vbn) +{ + int idx = 0; + u32 curvbn = 1; /* VBN is 1 based - not 0 */ + + while (map && map->s1[idx].cnt > 0 && curvbn < vbn && curvbn + map->s1[idx].cnt <= vbn) { + curvbn += map->s1[idx].cnt; + if (++idx > 15) { + map = map->nxt; + idx = 0; + } + } + if (map && map->s1[idx].cnt > 0) { + return map->s1[idx].lbn + (vbn - curvbn); + } + return 0; +} + +u32 +ino2fhlbn (struct super_block *sb, u32 ino) +{ + struct ods2sb *ods2p = ODS2_SB (sb); + + if (ino < 17) { /* the first 16 file headers are located at known locations in INDEXF.SYS */ + return le16_to_cpu(ods2p->hm2.hm2_w_ibmapsize) + le32_to_cpu(ods2p->hm2.hm2_l_ibmaplbn) + ino - 1; + } else { + struct ods2fh *ods2fhp = (struct ods2fh *) ods2p->indexf->u.generic_ip; + + return vbn2lbn(sb, ods2fhp->map, le16_to_cpu(ods2p->hm2.hm2_w_cluster) * 4 + le16_to_cpu(ods2p->hm2.hm2_w_ibmapsize) + ino); + } + + return 0; +} + +/* + * This function retreives all file mapping pointers and create a linked list so + * VBN's can be translated to LBN's. + * Note that this routine read ALL mapping pointers thus creating a catedral window + * for the file. Should there be extension headers they are all read directly not + * using iget to fetch them. + */ +struct ods2map * +getmap (struct super_block *sb, struct fh2def *fh2p) { + struct fm2def *fm2p = (struct fm2def *) ((short unsigned *)fh2p + fh2p->fh2_b_mpoffset); + struct ods2map *map; + struct ods2map *mapfst; + struct buffer_head *bh = NULL; + int idx = 0; + u8 mapinuse = 0; + + mapfst = map = kmalloc (sizeof (struct ods2map), GFP_KERNEL); + if (!map) { + printk("ODS2-fs kmalloc failed for getmap (1)\n"); + return NULL; + } + memset (map, 0, sizeof (struct ods2map)); + + do { + mapinuse = fh2p->fh2_b_map_inuse; + while (fm2p < (struct fm2def *)((short unsigned *)fh2p + fh2p->fh2_b_acoffset) && mapinuse > 0) { + u32 cnt = 0; + u32 lbn = 0; + u16 size = 0; + + switch (fm2p->u1.fm1.fm2_v_format) { + /* FIXME: Use defines */ + case 0: size = 1; break; + case 1: cnt = fm2p->u1.fm1.fm2_b_count1; lbn = (fm2p->u1.fm1.fm2_v_highlbn << 16) | fm2p->u1.fm1.fm2_w_lowlbn; size = 2; break; + case 2: cnt = fm2p->u1.fm2.fm2_v_count2; lbn = (le16_to_cpu(fm2p->u1.fm2.fm2_l_lbn2[1]) << 16) | le16_to_cpu(fm2p->u1.fm2.fm2_l_lbn2[0]); size = 3; break; + case 3: cnt = (fm2p->u1.fm3.fm2_v_count2 << 16) | le16_to_cpu(fm2p->u1.fm3.fm2_w_lowcount); lbn = le32_to_cpu(fm2p->u1.fm3.fm2_l_lbn3); size = 4; break; + } + if (fm2p->u1.fm1.fm2_v_format > 0) { + if (idx > 15) { + map->nxt = kmalloc (sizeof (struct ods2map), GFP_KERNEL); + if (map->nxt) { + map = map->nxt; + memset (map, 0, sizeof (struct ods2map)); + idx = 0; + } else { + printk("ODS2-fs kmalloc failed for getmap (2)\n"); + return map; + } + } + map->s1[idx].cnt = cnt + 1; /* the count is always N + 1 mapped blocks */ + map->s1[idx].lbn = lbn; + idx++; + } + mapinuse -= size; + fm2p = (struct fm2def *) ((short unsigned *) (fm2p) + size); + } + + /* + * If there is an extension header we need to read all of them because + * they could have additional mapping information. + * + * Note that we can not use iget to fetch the extension header because + * it is not a valid inode for an ODS2 file. Only primary file header + * can be used as an inode. + */ + if (fh2p->u3.s1.fid_w_ex_fidnum != 0) { + u32 lbn; + + if ((lbn = ino2fhlbn(sb, le16_to_cpu(fh2p->u3.s1.fid_w_ex_fidnum) | (fh2p->u3.s1.fid_b_ex_fidnmx << 16))) != 0) { + fh2p = NULL; + brelse(bh); + if ((bh = sb_bread(sb, GETBLKNO (sb, lbn))) != NULL && bh->b_data != NULL) { + fh2p = (struct fh2def *) (GETBLKP(sb, lbn, bh->b_data)); + fm2p = (struct fm2def *) ((short unsigned *) fh2p + fh2p->fh2_b_mpoffset); + } + } + } else { + fh2p = NULL; + } + } while (fh2p != NULL); + brelse(bh); + return mapfst; +} + + +struct buffer_head *getfilebh(struct file *filp, u32 vbn) { + struct inode *inode = filp->f_dentry->d_inode; + struct super_block *sb = inode->i_sb; + struct ods2fh *ods2fhp = (struct ods2fh *) inode->u.generic_ip; + struct ods2file *ods2filep = (struct ods2file *) filp->private_data; + + if ((vbn - 1) * 512 < inode->i_size) { + u32 lbn; + + if ((lbn = vbn2lbn(sb, ods2fhp->map, vbn)) > 0) { + if (!ods2filep->bhp || GETBLKNO(sb, lbn) != ods2filep->bhp->b_blocknr) { + brelse(ods2filep->bhp); + ods2filep->bhp = NULL; + if ((ods2filep->bhp = sb_bread(sb, GETBLKNO(sb, lbn))) != NULL) { + if (ods2filep->bhp->b_data != NULL) { + ods2filep->data = GETBLKP(sb, lbn, ods2filep->bhp->b_data); + return ods2filep->bhp; + } + } + } else { + ods2filep->data = GETBLKP(sb, lbn, ods2filep->bhp->b_data); + return ods2filep->bhp; + } + } + } + + return NULL; +} + +int +verify_fh (struct fh2def *fh2p, u32 ino) +{ + u16 *p = (short unsigned *) fh2p; + u16 chksum = 0; + + for (; p < (short unsigned *)&(fh2p->fh2_w_checksum) ; chksum += le16_to_cpu(*p++)); + if (fh2p->fh2_b_idoffset <= fh2p->fh2_b_mpoffset && + fh2p->fh2_b_mpoffset <= fh2p->fh2_b_acoffset && + fh2p->fh2_b_acoffset <= fh2p->fh2_b_rsoffset && + fh2p->u1.s1.fh2_b_structlevl == 2 && fh2p->u1.s1.fh2_b_structlevv >= 1 && + fh2p->u2.s1.fh2_w_fid_num != 0 && + ((fh2p->u2.s1.fh2_b_fid_nmx << 16) | le16_to_cpu(fh2p->u2.s1.fh2_w_fid_num)) == ino && + fh2p->fh2_b_map_inuse <= (fh2p->fh2_b_acoffset - fh2p->fh2_b_mpoffset) && + le16_to_cpu(fh2p->fh2_w_checksum) == chksum) { + + return 1; /* it is a valid file header */ + } + return 0; +} + + +#ifdef DODGY_GCC +/* + * To not emit __udivdi3 and __umoddi3, we'll for now implement our own + * div64() and use it. Once we've got a "capable" toolchain, this may + * again go away, so this is a FIXME item. + */ +static u64 +div64 (u64 a, u32 b0) +{ + u32 a1, a2; + u32 res; + + a1 = ((u32 *) &a)[0]; + a2 = ((u32 *) &a)[1]; + res = a1/b0 + (u64) a2 * (u64) (0xffffffff/b0) + a2 / b0 + (a2 * (0xffffffff % b0)) / b0; + + return res; +} +#endif /* DODGY_GCC */ + + +/* + * Ok, I give up :-) for some reason unknown to me the addition of 2 seconds + * is needed to get the correct time. + * It works for a file created 1-jan-1971 and for a file created 1-jan-2038 + * as well as for files created 1992 and 2003. + * + */ +void +vms2timespec (struct timespec *dest, vms_timestamp_t src) +{ + /* time_t = (vms-350669880092000)/10000000 + 2 */ +#ifdef DODGY_GCC + dest->tv_sec = div64 ((le64_to_cpu (src) - 35066988009200000ull), 10000000) + 2; + dest->tv_nsec = le64_to_cpu (src) - div64 (src, 10000000); +#else + dest->tv_sec = (le64_to_cpu (src) - 35066988009200000ull) / 10000000 + 2; + dest->tv_nsec = (le64_to_cpu (src) - 35066988009200000ull) % 10000000; +#endif +} + +void +timespec2vms (vms_timestamp_t *dest, struct timespec *src) +{ + vms_timestamp_t temp; + + /* vms = 10000000 * (time_t - 2) + 35066988009200000 */ + temp = (src->tv_sec - 2) * 10000000 + 35066988009200000ull; + temp += src->tv_nsec / 100; + *dest = cpu_to_le64 (temp); +} + +int +get_hardsect_size (int xx) +{ + printk (KERN_ERR "%s: Not implemented, returning 512!\n", __FUNCTION__); + return 512; +} + diff -Nru a/include/asm-vax/a.out.h b/include/asm-vax/a.out.h --- a/include/asm-vax/a.out.h 1970-01-01 01:00:00 +++ b/include/asm-vax/a.out.h 2005-04-27 00:53:31 @@ -0,0 +1,35 @@ +#ifndef _VAX_A_OUT_H +#define _VAX_A_OUT_H + +/* Stolen from the i386 port */ + +/* + * This was TASK_SIZE and is TASK_SIZE once again. Don't play + * games with it, otherwise argc stuff breaks. + */ +#define STACK_TOP TASK_SIZE /* = 0x7fffffff */ + +/* + * This is needed to override a conflicting definition in ../linux/a.out.h + * It should really be: + * + * #define SEGMENT_SIZE PAGE_SIZE + */ +#define page_size PAGE_SIZE + +struct exec { + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#endif /* _VAX_A_OUT_H */ diff -Nru a/include/asm-vax/atomic.h b/include/asm-vax/atomic.h --- a/include/asm-vax/atomic.h 1970-01-01 01:00:00 +++ b/include/asm-vax/atomic.h 2005-04-27 01:09:11 @@ -0,0 +1,255 @@ +#ifndef _VAX_ATOMIC_H +#define _VAX_ATOMIC_H + +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +/* + * Worry about SMP VAXes later. Much later... + * + * Still, should try and use interlocked instructions here. + * When we do, we'll have to make atomic_t a short int, since + * ADAWI only works on WORDs and that's the only interlocked + * arithmetic primitive we have. + */ + +#ifdef CONFIG_SMP +#error "SMP configuration aren't supported right now..." +#endif + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) ( (atomic_t) {i} ) + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_read(v) ((v)->counter) + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_set(v, i) (((v)->counter) = i) + +/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. Note that the guaranteed useful range + * of an atomic_t is only 24 bits -- Not sure thats true on the vax. + */ +static __inline__ void atomic_add(int i, atomic_t *v) +{ + __asm__ __volatile__( + " addl2 %1, %0 \n" + : "=m" (v->counter) + : "g" (i), + "m" (v->counter)); +} + +/** + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. Not sure thats + * true for VAX. + */ +static __inline__ void atomic_sub(int i, atomic_t *v) +{ + __asm__ __volatile__( + " subl2 %1, %0 \n" + : "=m" (v->counter) + : "g" (i), + "m" (v->counter)); +} + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. (i386) Not sure + * thats true on a VAX. + */ +static __inline__ void atomic_inc(atomic_t *v) +{ + __asm__ __volatile__( + " incl %0 \n" + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +static __inline__ void atomic_dec(volatile atomic_t *v) +{ + __asm__ __volatile__( + " decl %0 \n" + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +static __inline__ int atomic_dec_and_test(volatile atomic_t *v) +{ + unsigned long c; + + __asm__ __volatile__( + " decl %0 \n" + " movl %0, %1 \n" + : "=m" (v->counter), + "=g" (c) + : "m" (v->counter) + : "memory"); + + return c == 0; +} + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +static __inline__ int atomic_inc_and_test(atomic_t *v) +{ + unsigned long c; + + __asm__ __volatile__( + " incl %0 \n" + " movl %0, %1 \n" + : "=m" (v->counter), + "=g" (c) + : "m" (v->counter) + : "memory"); + + return c == 0; +} + +/** + * atomic_add_negative - add and test if negative + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +static __inline__ int atomic_add_negative(int i, atomic_t *v) +{ + int retval = 0; + __asm__ __volatile__( + " addl2 %2,%1 \n" + " bgeq 1f \n" /* >= 0, return FALSE */ + " incl %0 \n" /* < 0, return TRUE */ + "1: \n" + : "+g" (retval), "+m" (v->counter) + : "g" (i), "m" (v->counter) : "memory" ); + + return retval; +} + + +/* + * These are x86-specific, used by some header files + * But we may find a use for them too. + */ +#define atomic_clear_mask(mask, v) \ + __asm__ __volatile__("bicl2 %1,%0" : : "m" (*v), "ir" (mask)) + +#define atomic_set_mask(mask, v) \ + __asm__ __volatile__("bisl2 %1,%0" : : "m" (*v), "ir" (mask)); + +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + + +/* + * These functions are used in semaphore.h + */ +static __inline__ long atomic_add_return(int i, atomic_t *v) +{ + long temp, result; + + __asm__ __volatile__( + "1: movl %1, %0 \n" + " addl2 %3, %0 \n" + " movl %0, %2 \n" + " movl %0, %1 \n" + : "=&r" (temp), + "=m" (v->counter), + "=&r" (result) + : "Ir" (i), + "m" (v->counter)); + + return result; +} + +static __inline__ long atomic_sub_return(int i, atomic_t *v) +{ + long temp, result; + __asm__ __volatile__( + "1: movl %1, %0 \n" + " subl2 %3, %0 \n" + " movl %0, %2 \n" + " movl %0, %1 \n" + : "=&r" (temp), + "=m" (v->counter), + "=&r" (result) + : "Ir" (i), + "m" (v->counter)); + + return result; +} + +#define atomic_dec_return(v) atomic_sub_return(1,(v)) +#define atomic_inc_return(v) atomic_add_return(1,(v)) + +#endif /* _VAX_ATOMIC_H */ diff -Nru a/include/asm-vax/bitops.h b/include/asm-vax/bitops.h --- a/include/asm-vax/bitops.h 1970-01-01 01:00:00 +++ b/include/asm-vax/bitops.h 2005-05-09 23:22:23 @@ -0,0 +1,541 @@ +#ifndef _VAX_BITOPS_H +#define _VAX_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + */ + +#include +#include + +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + * + * VAX port atp Jan 1999 + * Updates for 2.4.3+ atp Mar 2002 + */ + +/* This will come handy to access given addresses */ +#define ADDR (* (volatile long *) addr) + +/* + * I'd like to use interlocked variable length bitfield instructions + * here, but they only seem to exist in branch and {set/clear} flavours. + * There appears to be no complement bit equivalent. + * We are ignoring SMP for the moment anyway... + * atp Jan 99. + * + * We use bbss rather than bisl2 for the variable bit field + * if the bit is in a register, and nr>31 then we get a reserveed + * operand fault. Add an "i" to the instructions for interlocked + * operation (note: check subsetting rules?) + * + * Mar 2002 atp. Further reading of asm-i386/bitops.h reveals that the + * bit number is constrained by "Ir" to be an integer in the + * range 0-31. So we could use bisl2 after all. + */ + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void set_bit(int nr, volatile void *addr) +{ + __asm__ __volatile__( + " bbss %1, %0, 1f \n" + "1: \n" + : "=m" (ADDR) + : "ir" (nr)); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __set_bit(int nr, volatile void *addr) +{ + __asm__( + " bbss %1, %0, 2f \n" + "2: \n" + : "=m" (ADDR) + : "ir" (nr)); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static __inline__ void clear_bit(int nr, volatile void *addr) +{ + __asm__ __volatile__( + " bbcc %1,%0, 1f \n" + "1: \n" + : "=m" (ADDR) + : "ir" (nr)); +} + +/** + * __clear_bit - Set a bit in memory + * @nr: the bit to clear + * @addr: the address to start counting from + * + * Unlike clear_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __clear_bit(int nr, volatile void *addr) +{ + __asm__( + " bbcc %1, %0, 2f \n" + "2: \n" + : "=m" (ADDR) + : "ir" (nr)); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __change_bit(int nr, volatile void *addr) +{ + __asm__ __volatile__( + " bbcs %1, %0, 3f \n" + " bbsc %1, %0, 3f \n" + "3: \n" + : "=m" (ADDR) + : "ir" (nr)); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void change_bit(int nr, volatile void *addr) +{ + __asm__ __volatile__( + " bbcs %1, %0, 3f \n" + " bbsc %1, %0, 3f \n" + "3: \n" + : "=m" (ADDR) + : "ir" (nr)); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_set_bit(int nr, volatile void *addr) +{ + int oldbit; + + /* There are interlocked versions of bbss and bbcs we could use... */ + __asm__ __volatile__( + " clrl %0 \n" + " bbcs %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "r" (nr) + : "memory"); + + return oldbit; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_set_bit(int nr, volatile void *addr) +{ + int oldbit; + + __asm__( + " clrl %0 \n" + " bbcs %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr)); + + return oldbit; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_clear_bit(int nr, volatile void *addr) +{ + int oldbit; + + __asm__ __volatile__( + " clrl %0 \n" + " bbcc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr) + : "memory"); + + return oldbit; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_clear_bit(int nr, volatile void *addr) +{ + int oldbit; + + __asm__( + " clrl %0 \n" + " bbcc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr)); + + return oldbit; +} + +/* WARNING: non atomic and it can be reordered! */ +static __inline__ int __test_and_change_bit(int nr, volatile void *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its new value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_change_bit(int nr, volatile void *addr) +{ + int oldbit; + + __asm__ __volatile__( + " clrl %0 \n" + " bbsc %2. %1, 4f \n" + " incl %0 \n" + " bbcs %2, %1,4f \n" + "4: \n" + : "=&r" (oldbit), + "=m" (ADDR) + : "ir" (nr) + : "memory"); + + return oldbit; +} + +#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static int test_bit(int nr, const volatile void * addr); +#endif + +/* + * This routine doesn't need to be atomic. + */ +static __inline__ int constant_test_bit(int nr, const volatile void *addr) +{ + return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; +} + +static __inline__ int variable_test_bit(int nr, const volatile void *addr) +{ + int oldbit; + + __asm__ __volatile__( + " clrl %0 \n" + " bbc %2, %1, 1f \n" + " incl %0 \n" + "1: \n" + : "=&r" (oldbit) + : "m" (ADDR), + "ir" (nr)); + + return oldbit; +} + +#define test_bit(nr, addr) \ + (__builtin_constant_p(nr) \ + ? constant_test_bit((nr), (addr)) \ + : variable_test_bit((nr), (addr))) + +/* + * FIXME: if we were keen we could do the ffs/ffz in a nice tight + * assembly loop using ffc/ffs and a sliding 32bit field window. + * + * For now, we use the alpha/ppc port method, with 32bit chunks. + */ + +/** + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + * (VAX) + * We could check the Z condition code bit if we wanted to check against + * the ~OUL case, but this interface is designed for intel. Nuff sed. + */ +static __inline__ unsigned long ffz(unsigned long word) +{ + __asm__( + " ffc $0, $32, %1, %0 \n" + : "=rm" (word) + : "rm" (word)); + + return word; +} + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __inline__ unsigned long __ffs(unsigned long word) +{ + __asm__( + " ffs $0, $32, %1, %0 \n" + : "=rm" (word) + : "rm" (word)); + + return word; +} + +/* + * fls: find last bit set. + */ +#define fls(x) generic_fls(x) + + +#ifdef __KERNEL__ + /* + * Every architecture must define this function. It's the fastest + * way of searching a 168-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is set. + */ +static inline int sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __inline__ int ffs(int x) +{ + int r; + + __asm__( + " ffs $0, $32, %1, %0 \n" + " bnequ 1f \n" + " movl $-1, %0 \n" + "1: \n" + : "=ir" (r) + : "mr" (x)); + + return r + 1; +} +#endif /* __KERNEL__ */ + +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ +extern __inline__ unsigned long +find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset) +{ + unsigned int * p = ((unsigned int *) addr) + (offset >> 5); + unsigned int result = offset & ~31UL; + unsigned int tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *p++; + tmp |= ~0UL >> (32-offset); + if (size < 32) + goto found_first; + if (~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size &= ~31UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} + +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +/* + * This implementation of find_next_bit() is also stolen + * from the Alpha port... + */ +static __inline__ unsigned long +find_next_bit(const void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *(p++); + tmp &= ~0UL << offset; + if (size < 32) + goto found_first; + if (tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size & ~31UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= ~0UL >> (32 - size); + if (!tmp) + return result + size; +found_middle: + return result + __ffs(tmp); +} + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifdef __KERNEL__ +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +/* Bitmap functions for the ext2/ext3 filesystems. */ +#define ext2_set_bit __test_and_set_bit +#define ext2_set_bit_atomic(l,n,a) test_and_set_bit((n), (a)) +#define ext2_clear_bit __test_and_clear_bit +#define ext2_clear_bit_atomic(l, n, a) test_and_clear_bit((n), (a)) +#define ext2_test_bit test_bit +#define ext2_find_first_zero_bit find_first_zero_bit +#define ext2_find_next_zero_bit find_next_zero_bit + +/* Bitmap functions for the Minix filesystem. */ +#define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr)) +#define minix_set_bit(nr, addr) __set_bit((nr), (addr)) +#define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr)) +#define minix_test_bit(nr, addr) test_bit((nr), (addr)) +#define minix_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) +#endif /* __KERNEL__ */ + +#endif /* _VAX_BITOPS_H */ diff -Nru a/include/asm-vax/bug.h b/include/asm-vax/bug.h --- a/include/asm-vax/bug.h 1970-01-01 01:00:00 +++ b/include/asm-vax/bug.h 2003-09-10 10:58:59 @@ -0,0 +1,22 @@ +#ifndef _ASMVAX_BUG_H +#define _ASMVAX_BUG_H + +#define BUG() do { \ + printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + __asm__ __volatile__("bugw $0"); \ +} while (0) + +#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) + +#define WARN_ON(condition) do { \ + if (unlikely((condition)!=0)) { \ + printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ + dump_stack(); \ + } \ +} while (0) + +#define PAGE_BUG(page) do { \ + BUG(); \ +} while (0) + +#endif /* _ASMVAX_BUG_H */ diff -Nru a/include/asm-vax/bugs.h b/include/asm-vax/bugs.h --- a/include/asm-vax/bugs.h 1970-01-01 01:00:00 +++ b/include/asm-vax/bugs.h 2002-05-20 02:33:38 @@ -0,0 +1,18 @@ +/* + * bugs + */ + +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Needs: + * void check_bugs(void); + */ + +/* + * I don't know of any vax bugs. there must be some. + */ + +static void check_bugs(void) +{ +} diff -Nru a/include/asm-vax/bus/qbus.h b/include/asm-vax/bus/qbus.h --- a/include/asm-vax/bus/qbus.h 1970-01-01 01:00:00 +++ b/include/asm-vax/bus/qbus.h 2004-04-27 01:29:52 @@ -0,0 +1,94 @@ + +#ifndef __VAX_BUS_QBUS_H +#define __VAX_BUS_QBUS_H + +#include +#include +#include + +#define QBUS_NUM_VECTORS 128 + +/* Traditionally, CSR addresses for QBUS devices are expressed as + 760000 (octal) + CSR_OFFSET_WITHIN_BUS_IOSPACE */ + +#define QBUS_OCTAL_CSR(csr_offset) ((csr_offset) + 0760000) + +/* Driver model support */ +extern struct bus_type qbus_bus_type; + +struct qbus_device { + unsigned int csr; + struct device dev; +}; + +struct qbus_driver { + int (*probe)(struct qbus_device *qbus_dev); + struct device_driver drv; +}; + +#define QBUS_DEV(_d) container_of((_d), struct qbus_device, dev) +#define QBUS_DRV(_d) container_of((_d), struct qbus_driver, drv) + +int qbus_register_device(struct qbus_device *); +int qbus_register_driver(struct qbus_driver *); +void qbus_unregister_driver(struct qbus_driver *); + +/* In each of these bus ops, the first argument is a reference + to the bus adapter device */ +struct qbus_ops { + struct vax_dmamap * (*dma_map)(struct device *busdev, void *start, unsigned int len); + void (*dma_unmap)(struct device *busdev, struct vax_dmamap *map); + void (*dma_dumpmap)(struct device *busdev); + int (*vector_to_irq)(struct device *busdev, unsigned int vector); + + int (*request_irq)(struct device *busdev, unsigned int vector, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, const char * devname, void *dev_id); + unsigned int (*reserve_vector)(struct device *busdev, unsigned int vector); + unsigned int (*alloc_vector)(struct device *busdev); + void (*free_vector)(struct device *busdev, unsigned int vector); + void * (*ioremap)(struct device *busdev, unsigned int bus_addr, unsigned int size); +}; + + +void qbus_dumpmap(struct device *busdev); +struct vax_dmamap *qbus_alloc_mapregs(struct device *busdev, void *start, unsigned int len); +void qbus_unmap(struct device *busdev, struct vax_dmamap *mapping); + +/* What system IRQ vector corresponds to the QBUS vector? */ + +int qbus_vector_to_irq(struct device *busdev, unsigned int vector); + +/* Wrapper around request_irq() */ + +int qbus_request_irq(struct device *busdev, unsigned int qbus_vector, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, + const char * devname, + void *dev_id); + +/* Mark a specific QBUS vector as unavailable for dynamic allocation. + Returns 0 if was previously available, 1 if previously reserved */ + +unsigned int qbus_reserve_vector(struct device *busdev, unsigned int vector); + +/* Return an available interrupt vector */ + +unsigned int qbus_alloc_vector(struct device *busdev); + +/* Mark a QBUS interrupt vector as available */ + +void qbus_free_vector(struct device *busdev, unsigned int vector); + + +/* Wraps ioremap() so that device drivers don't have to know where + the QBUS I/O space is located in physical memory */ + +void *qbus_ioremap(struct device *busdev, unsigned int bus_addr, unsigned int size); + +/* Define this for symmetry */ +#define qbus_iounmap(addr) iounmap(addr) + + +#endif /* __VAX_BUS_QBUS_H */ + diff -Nru a/include/asm-vax/bus/vsbus.h b/include/asm-vax/bus/vsbus.h --- a/include/asm-vax/bus/vsbus.h 1970-01-01 01:00:00 +++ b/include/asm-vax/bus/vsbus.h 2005-02-28 23:46:19 @@ -0,0 +1,79 @@ + +#ifndef __VAX_BUS_VSBUS_H +#define __VAX_BUS_VSBUS_H + +#include +#include +#include + +struct vsbus_registers { + unsigned long vs_hltcod; + unsigned long vc_410msr; + unsigned long vc_410cear; /* VS2K */ + unsigned char vc_intmsk; /* Interrupt mask register */ + unsigned char vc_vdcorg; /* Mono display origin */ + unsigned char vc_vdcsel; /* Video interrupt select */ + unsigned char vc_intreq; /* Interrupt request register */ +#define vc_intclr vc_intreq + unsigned short vc_diagdsp; + unsigned short pad4; + unsigned long vc_parctl; +#define vc_bwf0 vc_parctl + unsigned short pad5; + unsigned short pad6; + unsigned short vc_diagtimu; + unsigned short vc_diagtme; +#define vc_diagtimm vc_diagtme +}; + +#define VSA_CLOCK_BASE 0x200b0000 +#define VSA_BASE_REGS 0x20080000 +#define VSA_KA55_BASE_REGS 0x25c00000 + +/* Driver model support */ +extern struct bus_type vsbus_bus_type; + +struct vsbus_device { + unsigned int phys_base; + unsigned int vsbus_irq; + struct device dev; +}; + +struct vsbus_driver { + int (*probe)(struct vsbus_device *vsbus_dev); + void (*remove)(struct vsbus_device *vsbus_dev); + struct device_driver drv; +}; + +#define VSBUS_DEV(_d) container_of((_d), struct vsbus_device, dev) +#define VSBUS_DRV(_d) container_of((_d), struct vsbus_driver, drv) + +int vsbus_register_device(struct vsbus_device *); +int vsbus_register_driver(struct vsbus_driver *); +void vsbus_unregister_driver(struct vsbus_driver *); + +/* End of device model support */ + +#define VSBUS_NR_IRQS 8 + +unsigned int vsbus_irqindex_to_irq(unsigned int); + +int vsbus_request_irq(unsigned int vsbus_irqindex, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, const char *devname, void *dev_id); +void vsbus_free_irq(unsigned int vsbus_irqindex); + +/* Used at boot time to create device structs for the devices that are + always there */ +void vsbus_add_fixed_device(struct device *parent, char *name, + unsigned int phys_base, unsigned int irqindex); + +/* To keep things a bit simpler, and because all the machines fit this + requirement, we only allow one VSBUS adapter to exist in the system, + so this function can only be called once. The hardware-specific + VSBUS adapter drivers call this at init time. */ + +int init_vsbus_adapter(unsigned int *vectors, unsigned long registers); + +#endif /* __VAX_BUS_VSBUS_H */ + diff -Nru a/include/asm-vax/byteorder.h b/include/asm-vax/byteorder.h --- a/include/asm-vax/byteorder.h 1970-01-01 01:00:00 +++ b/include/asm-vax/byteorder.h 2005-04-20 15:46:19 @@ -0,0 +1,55 @@ +#ifndef _VAX_BYTEORDER_H +#define _VAX_BYTEORDER_H + +/* FIXME: arch byte swapping in assembly */ +#include +#include +#define VAX_USE_ARCH_SWAB32 1 + +#ifdef __GNUC__ +/* + * ragge has a 1 insn shorter sequence which will probably replace this one + * later, depending on the relative instruction costs. + */ +#if VAX_USE_ARCH_SWAB32 +static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +{ + __u32 t1; + + __asm__ volatile ( /* Input is aabbccdd in x (%1) */ + " rotl $8, %1, %%r1 \n" /* r1 = bbccddaa */ + " bicl3 $0xff00ff00, %%r1, %0 \n" /* %0 = 00cc00aa */ + " rotl $-8, %1, %%r1 \n" /* r1 = ddaabbcc */ + " bicl2 $0xff00ff, %%r1 \n" /* r1 = dd00bb00 */ + " bisl2 %%r1, %0 \n" /* %0 = ddccbbaa */ + : "=&g"(t1) + : "r"(x) + : "r1"); + + return t1; +} +#endif + +/* + * According to the resident expert, this is as fast as assembly + */ +static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) +{ + __u16 __x = x; + return (__u16)(__x << 8 | __x >> 8); +} +#if VAX_USE_ARCH_SWAB32 +#define __arch__swab32(x) ___arch__swab32(x) +#endif +#define __arch__swab16(x) ___arch__swab16(x) + +#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif + +#endif /* __GNUC__ */ + +#include + +#endif /* _VAX_BYTEORDER_H */ diff -Nru a/include/asm-vax/cache.h b/include/asm-vax/cache.h --- a/include/asm-vax/cache.h 1970-01-01 01:00:00 +++ b/include/asm-vax/cache.h 2003-09-17 23:59:07 @@ -0,0 +1,12 @@ +#ifndef _ASM_VAX_CACHE_H +#define _ASM_VAX_CACHE_H + +/* + * FIXME: find the real values! + */ + +/* bytes per L1 cache line */ +#define L1_CACHE_SHIFT 5 /* FIXME: a guess */ +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif /* _ASM_VAX_CACHE_H */ diff -Nru a/include/asm-vax/cacheflush.h b/include/asm-vax/cacheflush.h --- a/include/asm-vax/cacheflush.h 1970-01-01 01:00:00 +++ b/include/asm-vax/cacheflush.h 2005-07-31 17:09:16 @@ -0,0 +1,52 @@ +#ifndef _VAX_CACHEFLUSH_H +#define _VAX_CACHEFLUSH_H + +#include + +/* + * cacheflush.h. Definitions for cache structures/routines. + * Copyright atp Jan 2001 + */ + + +/* FIXME: double check this. VAX hw ref guide pg 274 */ +/* Also see flush cache arch document by D. Mosberger */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_range(mm, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_page_to_ram(page) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(page) do { } while (0) +#define flush_dcache_mmap_unlock(page) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +/* Flushing the instruction cache is all-or-nothing on VAX. */ +#define flush_icache_range(start, end) flush_icache() +#define flush_icache_user_range(vma, pg, start, end) flush_icache() +#define flush_icache_page(vma, pg) flush_icache() + +static inline void flush_icache(void) +{ + /* + * Push a PC/PSL onto the stack so it looks like we got + * an interrupt, and then REI. + */ + __asm__ ( + " movpsl -(%sp) \n" + " pushab 1f \n" + " rei \n" + "1: \n"); +} + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* _VAX_CACHEFLUSH_H */ diff -Nru a/include/asm-vax/call_std.h b/include/asm-vax/call_std.h --- a/include/asm-vax/call_std.h 1970-01-01 01:00:00 +++ b/include/asm-vax/call_std.h 2004-06-05 10:37:55 @@ -0,0 +1,37 @@ +#ifndef _ASM_VAX_CALL_STD_H +#define _ASM_VAX_CALL_STD_H + +/* + * VAX calling standard structures. Useful Descriptor formats are in + * asm/descriptor.h + * Copyright atp Nov 1998 + */ + +struct vax_arglist { + unsigned char argc; + unsigned char mbz[3]; + unsigned int argv[1]; /* Up to argc arguments - this is really + only a header... */ +}; + +/* + * Stack frame used by CALLG or CALLS instr. + * (OpenVMS Calling standard (AA-QSBBB-TE) pg. 2-2 + */ +struct vax_call_frame { + unsigned int cond; + unsigned int mbz1:5; + unsigned int psw:10; + unsigned int mbz2:1; + unsigned int save_mask:12; + unsigned int mbz3:1; + unsigned int calls:1; + unsigned int align:2; + struct vax_arglist *saved_ap; + struct vax_call_frame *saved_fp; + unsigned int saved_pc; + unsigned int saved_reg[0]; /* Regs specified by bits in save_mask, + low regs at lower addrs */ +}; + +#endif /* _ASM_VAX_CALL_STD_H */ diff -Nru a/include/asm-vax/checksum.h b/include/asm-vax/checksum.h --- a/include/asm-vax/checksum.h 1970-01-01 01:00:00 +++ b/include/asm-vax/checksum.h 2005-03-10 01:35:35 @@ -0,0 +1,161 @@ +#ifndef _VAX_CHECKSUM_H +#define _VAX_CHECKSUM_H + +#include + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function may be called with any length and any buffer alignment + */ +extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); + + +/* + * the same as csum_partial, but copies from user space. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +extern inline unsigned int +csum_partial_copy_from_user(const char *src, char *dst, + int len, unsigned int sum, int *errp) +{ + if (copy_from_user(dst, src, len)) { + *errp = -EFAULT; + memset(dst, 0, len); + return sum; + } + return csum_partial(dst, len, sum); +} + +extern inline unsigned int +csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum) +{ + memcpy(dst,src,len); + return csum_partial(dst, len, sum); +} + + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ + + +/* These are only used here in this function */ +#define checksum_Asm __asm __volatile +#define checksum_ADDL checksum_Asm("addl2 (%2)+,%0" : "=r" (sum) : "0" (sum), "r" (w)) +#define checksum_ADWC checksum_Asm("adwc (%2)+,%0" : "=r" (sum) : "0" (sum), "r" (w)) +#define checksum_ADDC checksum_Asm("adwc $0,%0" : "=r" (sum) : "0" (sum)) + +static inline unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) +{ + volatile register unsigned char *w; + register int len = 0; + register unsigned int sum = 0; + int i; + len=ihl; + w=iph; + + + for (i=0; i>16; + + +} + +#undef checksum_Asm +#undef checksum_ADDL +#undef checksum_ADWC +#undef checksum_ADDC + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +/* + extern unsigned short int csum_tcpudp_magic(unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum); + + unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, + unsigned short len, unsigned short proto, + unsigned int sum) +*/ + +/* + * Fold a partial checksum without adding pseudo headers + */ + +extern inline unsigned short csum_fold(unsigned int sum) +{ + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return ~sum; +} + + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum) +{ + + /* in X86 this is addl %1, %0 + adcl %2, %0 + adcl %3, %0 + adcl $0, %0 */ + + __asm__("addl2 %1, %0\n" + "adwc %2, %0\n" + "adwc %3, %0\n" + "adwc $0, %0\n" + : "=r" (sum) : "g" (daddr), "g" (saddr), "g"((ntohs(len)<<16)+proto*256), "0" (sum)); + return sum; +} +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern inline unsigned short +ip_compute_csum(unsigned char * buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#endif /* VAX_CHECKSUM_H */ diff -Nru a/include/asm-vax/clock.h b/include/asm-vax/clock.h --- a/include/asm-vax/clock.h 1970-01-01 01:00:00 +++ b/include/asm-vax/clock.h 2002-05-20 03:09:49 @@ -0,0 +1,25 @@ +#ifndef _VAX_CLOCK_H_ +#define _VAX_CLOCK_H_ + +/* + * VAX/Linux clock.h + * Definitions for VAX clocks (interval counter and time-of-day) + */ + +/* structure of ICCS register (Interval Clock Control and Status) */ + +#define ICCS_ERROR (0x80000000) /* counter overflowed, INTERRUPT + already set, write 1 to clear */ +#define ICCS_INTERRUPT (0x00000080) /* Set when ICR overflows, write 1 + to clear */ +#define ICCS_TRANSFER (0x00000040) /* Copy NICR to ICR */ +#define ICCS_SINGLESTEP (0x00000020) /* increment Interval Count Register + (ICR) by one, only if RUN is 0 */ +#define ICCS_INTENABLE (0x00000010) /* enable clock interrupts */ +#define ICCS_RUN (0x00000001) /* enable counter */ + +/* prototypes for clock handling functions in arch/vax/clock.c */ +void generic_clock_init(void); +void ka4x_clock_init(void); + +#endif /* _VAX_CLOCK_H_ */ diff -Nru a/include/asm-vax/cputime.h b/include/asm-vax/cputime.h --- a/include/asm-vax/cputime.h 1970-01-01 01:00:00 +++ b/include/asm-vax/cputime.h 2005-03-28 02:33:28 @@ -0,0 +1,7 @@ +#ifndef __VAX_CPUTIME_H +#define __VAX_CPUTIME_H + +#include + +#endif /* __VAX_CPUTIME_H */ + diff -Nru a/include/asm-vax/current.h b/include/asm-vax/current.h --- a/include/asm-vax/current.h 1970-01-01 01:00:00 +++ b/include/asm-vax/current.h 2003-07-10 01:10:17 @@ -0,0 +1,24 @@ +#ifndef _VAX_CURRENT_H +#define _VAX_CURRENT_H + +#include + +struct task_struct; + +/* Later, we might arrange for current to live in R11 + permanently. For now, I'm going to compute it from the + current kernel stack pointer. Note that we may be called + while on the interrupt stack, so we need to use MFPR to + read the KSP, rather than just read SP. We need to make + sure that KSP is valid early enough during boot or this + will screw up royally. */ + + +static inline struct task_struct * get_current(void) +{ + return current_thread_info()->task; +} + +#define current get_current() + +#endif /* _VAX_CURRENT_H */ diff -Nru a/include/asm-vax/delay.h b/include/asm-vax/delay.h --- a/include/asm-vax/delay.h 1970-01-01 01:00:00 +++ b/include/asm-vax/delay.h 2004-06-14 14:09:08 @@ -0,0 +1,54 @@ +#ifndef _VAX_DELAY_H +#define _VAX_DELAY_H + +#include /* for HZ */ + +extern unsigned long loops_per_sec; + +/* + * Copyright (C) 1993 Linus Torvalds + * + * Delay routines, using a pre-computed "loops_per_second" value. + * VAX port Copyright 1998 atp. + */ + +extern __inline__ void +__delay(unsigned long loops) +{ + __asm__ __volatile__( + "1: sobgtr %0, 1b \n" + : "=r"(loops) + : "0"(loops)); +} + +/* + * division by multiplication: you don't have to worry about + * loss of precision. + * + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + */ +extern __inline__ void +__udelay(unsigned long usecs, unsigned long lpj) +{ + struct { + unsigned long lo; + unsigned long hi; + } prod; + + usecs *= 0x000010c6UL * HZ; /* (2**32 / 1000000) * HZ */ + + __asm__( + " emul %1, %2, $0, %0 \n" + : "=g"(prod) + : "g"(usecs), "g"(lpj)); + + __delay(prod.hi); +} + +#define udelay(usecs) __udelay(usecs, loops_per_jiffy) + +#endif /* _VAX_DELAY_H */ diff -Nru a/include/asm-vax/descriptor.h b/include/asm-vax/descriptor.h --- a/include/asm-vax/descriptor.h 1970-01-01 01:00:00 +++ b/include/asm-vax/descriptor.h 2004-06-02 21:48:12 @@ -0,0 +1,50 @@ +#ifndef _VAX_DESCRIPTOR_H +#define _VAX_DESCRIPTOR_H + +/* + * Copyright atp Nov 1998. + * VAX calling standard descriptor data structure + * ref: VAX C (AA-L370D-TE) page 13-14 + * + * Written: 11/11/98 atp + */ + +/* descriptor class codes (table 13-4) subset useful for C/UNIX */ +#define DSC_K_CLASS_N 0 /* none (for symmettry) */ +#define DSC_K_CLASS_S 1 /* scalar string */ +#define DSC_K_CLASS_D 2 /* dynamic string descriptor */ +#define DSC_K_CLASS_A 4 /* array */ +#define DSC_K_CLASS_P 5 /* procedure */ +#define DSC_K_CLASS_PI 6 /* procedure incarnation (a what?) */ +#define DSC_K_CLASS_J 7 /* label */ +#define DSC_K_CLASS_JI 8 /* label incarnation (a what?) */ +#define DSC_K_CLASS_SD 9 /* decimal scalar string */ +#define DSC_K_CLASS_NCA 10 /* non contiguous array */ +#define DSC_K_CLASS_VS 11 /* varying string */ +#define DSC_K_CLASS_VSA 12 /* varying string array */ +#define DSC_K_CLASS_UBS 13 /* unaligned bit string */ +#define DSC_K_CLASS_UBA 14 /* unaligned bit array */ +#define DSC_K_CLASS_SB 15 /* string with bounds descriptor */ +#define DSC_K_CLASS_UBSB 16 /* unaligned bit string w/ bounds descriptor */ +#define DSC_K_CLASS_BFA 191 /* basic file array */ + +/* descriptor atomic data types */ +#define DSC_K_DTYPE_BU 2 /* byte unsigned */ +#define DSC_K_DTYPE_WU 3 /* word unsigned */ +#define DSC_K_DTYPE_LU 4 /* longword unsigned */ +#define DSC_K_DTYPE_B 6 /* byte */ +#define DSC_K_DTYPE_W 7 /* word */ +#define DSC_K_DTYPE_L 8 /* longword */ +#define DSC_K_DTYPE_F 10 /* F floating */ +#define DSC_K_DTYPE_D 11 /* D floating */ +#define DSC_K_DTYPE_G 27 /* G floating */ + +/* generalised descriptor structure */ +struct dsc_descriptor { + u_short length; + u_char dtype; + u_char class; + void *pointer; +}; + +#endif /* _VAX_DESCRIPTOR_H */ diff -Nru a/include/asm-vax/diag_led.h b/include/asm-vax/diag_led.h --- a/include/asm-vax/diag_led.h 1970-01-01 01:00:00 +++ b/include/asm-vax/diag_led.h 2005-08-07 00:14:34 @@ -0,0 +1,35 @@ +#ifndef _VAX_ASM_DIAG_LED_H +#define _VAX_ASM_DIAG_LED_H + +/* + * If anybody wants to use this driver, please fix it for your VAX. Should + * be fairly easy with this help: + * + * $ grep DIAGDISP ../../vms-defs/lib.hex + * literal IO420$AW_DIAGDISP = 0x20080010; ! DIAGNOSTIC DISPLAY REG WRITE ONLY + * literal IO43$AW_DIAGDISP = 0x20080010; + * literal IO4A$AW_DIAGDISP = 0x20080010; + * literal IO440$AW_DIAGDISP = 0x20080010; + * literal IO46$AW_DIAGDISP = 0x20080010; + * literal IO49$AW_DIAGDISP = 0x25800004; + * literal IO1303$AW_DIAGDISP = 0x25800004; + * + * If I don't get feedback that this driver is generally useful, it may be + * removed after dz11 hacking is finished. --jbglaw + */ + +#define DIAG_LED_KA42_BASE 0x20080010 +#define DIAG_LED_KA43_BASE 0x20080010 +#define DIAG_LED_KA46_BASE 0x20080010 +#define DIAG_LED_KA48_BASE 0x20080010 +#define DIAG_LED_KA49_BASE 0x25800004 +#define DIAG_LED_KA52_BASE 0x20140030 +#define DIAG_LED_KA670_BASE 0x20140030 +#define DIAG_LED_VXT_BASE 0x200c1000 + +extern int diag_led_set_state (unsigned char state); +extern unsigned char diag_led_get_state (void); +extern int diag_led_on (int led_num /* 0..7 */); +extern int diag_led_off (int led_num /* 0..7 */); + +#endif /* _VAX_ASM_DIAG_LED_H */ diff -Nru a/include/asm-vax/div64.h b/include/asm-vax/div64.h --- a/include/asm-vax/div64.h 1970-01-01 01:00:00 +++ b/include/asm-vax/div64.h 2003-09-24 01:45:29 @@ -0,0 +1 @@ +#include diff -Nru a/include/asm-vax/dma-mapping.h b/include/asm-vax/dma-mapping.h --- a/include/asm-vax/dma-mapping.h 1970-01-01 01:00:00 +++ b/include/asm-vax/dma-mapping.h 2004-10-01 00:36:23 @@ -0,0 +1,133 @@ +/* + * + * FIXME: implement a sensible dma_ API for VAX + * + */ + +#ifndef _ASM_VAX_DMA_MAPPING_H +#define _ASM_VAX_DMA_MAPPING_H + +/* need struct page definitions */ +#include + +#include + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + BUG(); + return 0; +} + +static inline int +dma_set_mask(struct device *dev, u64 dma_mask) +{ + BUG(); + return 0; +} + +static inline void * +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + int flag) +{ + BUG(); + return NULL; +} + +static inline void +dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) +{ + BUG(); +} + +static inline dma_addr_t +dma_map_single(struct device *dev, void *cpu_addr, size_t size, + enum dma_data_direction direction) +{ + BUG(); + return 0; +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + BUG(); + return 0; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + BUG(); + return 0; +} + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline void +dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline void +dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + BUG(); +} + +/* Now for the API extensions over the pci_ one */ + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +#define dma_is_consistent(d) (1) + +static inline int +dma_get_cache_alignment(void) +{ + BUG(); + return 0; +} + +static inline void +dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline void +dma_cache_sync(void *vaddr, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +#endif + diff -Nru a/include/asm-vax/dma.h b/include/asm-vax/dma.h --- a/include/asm-vax/dma.h 1970-01-01 01:00:00 +++ b/include/asm-vax/dma.h 2002-07-19 01:48:41 @@ -0,0 +1,23 @@ +#ifndef _VAX_DMA_H +#define _VAX_DMA_H 1 + + +/* This structure is used to keep track of bug map register + allocations. You get one from a bus driver's alloc_mapreg + function and release it by calling the bus driver's unmap + function. */ + +struct vax_dmamap { + unsigned int reg; + unsigned int pagelets; + void *virtaddr; + unsigned int busaddr; +}; + + +/* Some peripherals are limited by their PC/ISA designs/heritage. */ + +/* max dma address is 16mb used in mm/init.c*/ +#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) + +#endif /* _VAX_DMA_H */ diff -Nru a/include/asm-vax/dz11.h b/include/asm-vax/dz11.h --- a/include/asm-vax/dz11.h 1970-01-01 01:00:00 +++ b/include/asm-vax/dz11.h 2003-10-14 02:36:38 @@ -0,0 +1,138 @@ +#ifndef _VAX_DZ11_H_ +#define _VAX_DZ11_H_ + +/* + * $Id: dz11.h,v 1.6 2003/10/14 00:36:38 kenn Exp $ + * + * Copyright 2000, Kenn Humborg + * + * Definitions for DZ11-compatible RS232 I/O chips. If all this + * works, this will obsolete all the DC7085 stuff. + * + * Jan 2001 atp - updated to 2.4. + * + * Reference: DZQ11 Asynchronous Multiplexer Users Guide + * EK-DZQ11-UG-002 + * August 1997 + * + */ + +#include +#include +#include + +struct dz11_regs { + u_short csr; /* READ/WRITE: Control & status register */ + u_short padding1; + union { + u_short rbuf; /* READ ONLY: receiver buffer */ + u_short lpr; /* WRITE ONLY: line parameter register */ + } rbuf_lpr; + u_short padding2; + ushort tcr; /* READ/WRITE: transmitter control register */ + u_short padding3; + union { + u_short msr; /* READ ONLY: modem status register */ + u_short tdr; /* WRITE ONLY: transmit data register */ + } msr_tdr; + u_short padding4; +}; + +/* This gets set to non-NULL once the I/O page has been mapped */ +extern volatile struct dz11_regs *dz11_addr; + +/* Definitions of bits in CSR */ +#define DZ11_CSR_MAINT 0x0008 /* read/write */ +#define DZ11_CSR_CLR 0x0010 /* read/write */ +#define DZ11_CSR_MSE 0x0020 /* read/write */ +#define DZ11_CSR_RIE 0x0040 /* read/write */ +#define DZ11_CSR_RDONE 0x0080 /* read only */ +#define DZ11_CSR_TLINEA 0x0100 /* read only */ +#define DZ11_CSR_TLINEB 0x0200 /* read only */ +#define DZ11_CSR_SAE 0x1000 /* read/write */ +#define DZ11_CSR_SA 0x2000 /* read only */ +#define DZ11_CSR_TIE 0x4000 /* read/write */ +#define DZ11_CSR_TRDY 0x8000 /* read only */ + +/* Definitions of bits in RBUF - all bits read only */ +#define DZ11_RBUF_DATA_MASK 0x00ff +#define DZ11_RBUF_RXLINEA 0x0100 +#define DZ11_RBUF_RXLINEB 0x0200 +#define DZ11_RBUF_PARERR 0x1000 +#define DZ11_RBUF_FRAMERR 0x2000 +#define DZ11_RBUF_OVRNERR 0x4000 +#define DZ11_RBUF_DATAVALID 0x8000 + +/* Definitions of bits in LPR - all bits write only */ +#define DZ11_LPR_LINEA 0x0001 +#define DZ11_LPR_LINEB 0x0002 +#define DZ11_LPR_CHARLGTHA 0x0008 +#define DZ11_LPR_CHARLGTHB 0x0010 +#define DZ11_LPR_STOPCODE 0x0020 +#define DZ11_LPR_PARENAB 0x0040 +#define DZ11_LPR_ODDPAR 0x0080 +#define DZ11_LPR_SPEEDCODEA 0x0100 +#define DZ11_LPR_SPEEDCODEB 0x0200 +#define DZ11_LPR_SPEEDCODEC 0x0400 +#define DZ11_LPR_SPEEDCODED 0x0800 +#define DZ11_LPR_RXENAB 0x1000 + +/* Line number definitions for LPR register */ +#define DZ11_LPR_LINE0 0 +#define DZ11_LPR_LINE1 1 +#define DZ11_LPR_LINE2 2 +#define DZ11_LPR_LINE3 3 + +/* Char length definitions for LPR register */ +#define DZ11_CHARLGTH_5 0x0000 +#define DZ11_CHARLGTH_6 0x0008 +#define DZ11_CHARLGTH_7 0x0010 +#define DZ11_CHARLGTH_8 0x0018 + +/* Speed code definitions for LPR register */ +#define DZ11_SPEED_50 0x0000 +#define DZ11_SPEED_75 0x0100 +#define DZ11_SPEED_110 0x0200 +#define DZ11_SPEED_134_5 0x0300 +#define DZ11_SPEED_150 0x0400 +#define DZ11_SPEED_300 0x0500 +#define DZ11_SPEED_600 0x0600 +#define DZ11_SPEED_1200 0x0700 +#define DZ11_SPEED_1800 0x0800 +#define DZ11_SPEED_2000 0x0900 +#define DZ11_SPEED_2400 0x0a00 +#define DZ11_SPEED_3600 0x0b00 +#define DZ11_SPEED_4800 0x0c00 +#define DZ11_SPEED_7200 0x0d00 +#define DZ11_SPEED_9600 0x0e00 +#define DZ11_SPEED_19800 0x0f00 + +/* Definitions of bits in TCR - all bits read/write */ +#define DZ11_TCR_LINEENAB0 0x0001 +#define DZ11_TCR_LINEENAB1 0x0002 +#define DZ11_TCR_LINEENAB2 0x0004 +#define DZ11_TCR_LINEENAB3 0x0008 +#define DZ11_TCR_DTR0 0x0100 +#define DZ11_TCR_DTR1 0x0200 +#define DZ11_TCR_DTR2 0x0400 +#define DZ11_TCR_DTR3 0x0800 + +/* Definitions of bits in MSR - all bits read only */ +#define DZ11_MSR_RI0 0x0001 +#define DZ11_MSR_RI1 0x0002 +#define DZ11_MSR_RI2 0x0004 +#define DZ11_MSR_RI3 0x0008 +#define DZ11_MSR_CD0 0x0100 +#define DZ11_MSR_CD1 0x0200 +#define DZ11_MSR_CD2 0x0400 +#define DZ11_MSR_CD3 0x0800 + +/* Definitions of bits in TDR - all bits write only */ +#define DZ11_TDR_DATA_MASK 0x00ff +#define DZ11_TDR_BRK0 0x0100 +#define DZ11_TDR_BRK1 0x0200 +#define DZ11_TDR_BRK2 0x0400 +#define DZ11_TDR_BRK3 0x0800 + + +#endif /* _VAX_DZ11_H_ */ diff -Nru a/include/asm-vax/elf.h b/include/asm-vax/elf.h --- a/include/asm-vax/elf.h 1970-01-01 01:00:00 +++ b/include/asm-vax/elf.h 2003-10-10 23:04:50 @@ -0,0 +1,118 @@ +#ifndef __ASM_VAX_ELF_H +#define __ASM_VAX_ELF_H + +/* + * ELF register definitions.. + * + * added atp Jan 2001. + * + */ + +#include +#include + +#include + +#define R_VAX_32 1 +#define R_VAX_PC32 4 +#define R_VAX_GOT32 7 +#define R_VAX_PLT32 13 + +typedef unsigned long elf_greg_t; + +/* 15 general registers plus pc */ +#define ELF_NGREG 16 +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; +/* FIXME: fpregs. */ +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[2]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_VAX) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_VAX + +/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx + contains a pointer to a function which might be registered using `atexit'. + This provides a mean for the dynamic linker to call DT_FINI functions for + shared libraries that have been loaded before the code runs. + + A value of 0 tells we have no such handler. + + Alpha uses this as an atexit flag too, and so shall we. + + */ +#define ELF_PLAT_INIT(_r, load_addr) _r->r0 = 0 + + +#define USE_ELF_CORE_DUMP +/* hmm, pagelets.... */ +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +/* FIXME: A more sensible solution, given our need to keep virtual + * address space reasonably contiguous would be good. atp jan 2001 */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +/* Wow, the "main" arch needs arch dependent functions too.. :) */ + +/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is + now struct_user_regs, they are different) + * - atp at present (jan 2001) struct pt_regs and struct user_regs_struct + * (see ptrace.h and user.h) are both the same. So this is a null op really. + * However I'm keeping it here for when we need to modify it. + */ + +/*#define ELF_CORE_COPY_REGS(pr_reg, regs) \ + pr_reg[0] = regs->r0; \ + pr_reg[1] = regs->r1; \ + pr_reg[2] = regs->r2; \ + pr_reg[3] = regs->r3; \ + pr_reg[4] = regs->r4; \ + pr_reg[5] = regs->r5; \ + pr_reg[6] = regs->r6; \ + pr_reg[7] = regs->r7; \ + pr_reg[8] = regs->r8; \ + pr_reg[9] = regs->r9; \ + pr_reg[10] = regs->r10; \ + pr_reg[11] = regs->r11; \ + pr_reg[12] = regs->ap; \ + pr_reg[13] = regs->fp; \ + pr_reg[14] = regs->sp; \ + pr_reg[15] = regs->pc; \ + pr_reg[16] = regs->psl; +*/ +/* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, + but it's not easy, and we've already done it here. */ + +/* FIXME: This needs an addition to the vax mv structure, + * if there is any point in testing for subsetting etc..*/ +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (system_utsname.machine) + +#ifdef __KERNEL__ +#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +#endif + +#endif diff -Nru a/include/asm-vax/errno.h b/include/asm-vax/errno.h --- a/include/asm-vax/errno.h 1970-01-01 01:00:00 +++ b/include/asm-vax/errno.h 2005-08-14 16:10:55 @@ -0,0 +1,6 @@ +#ifndef _VAX_ERRNO_H +#define _VAX_ERRNO_H + +#include + +#endif /* _VAX_ERRNO_H */ diff -Nru a/include/asm-vax/fcntl.h b/include/asm-vax/fcntl.h --- a/include/asm-vax/fcntl.h 1970-01-01 01:00:00 +++ b/include/asm-vax/fcntl.h 2004-09-03 00:41:50 @@ -0,0 +1,90 @@ +#ifndef _VAX_FCNTL_H +#define _VAX_FCNTL_H + +/* atp 1998: FIXME: double check all this later */ + +/* open/fcntl - O_SYNC is only implemented on blocks devices and on files + located on an ext2 file system */ +#define O_ACCMODE 0003 +#define O_RDONLY 00 +#define O_WRONLY 01 +#define O_RDWR 02 +#define O_CREAT 0100 /* not fcntl */ +#define O_EXCL 0200 /* not fcntl */ +#define O_NOCTTY 0400 /* not fcntl */ +#define O_TRUNC 01000 /* not fcntl */ +#define O_APPEND 02000 +#define O_NONBLOCK 04000 +#define O_NDELAY O_NONBLOCK +#define O_SYNC 010000 +#define FASYNC 020000 /* fcntl, for BSD compatibility */ +#define O_DIRECT 040000 /* direct disk access hint - currently ignored */ +#define O_LARGEFILE 0100000 +#define O_DIRECTORY 0200000 /* must be a directory */ +#define O_NOFOLLOW 0400000 /* don't follow links */ +#define O_NOATIME 01000000 + +#define F_DUPFD 0 /* dup */ +#define F_GETFD 1 /* get f_flags */ +#define F_SETFD 2 /* set f_flags */ +#define F_GETFL 3 /* more flags (cloexec) */ +#define F_SETFL 4 +#define F_GETLK 5 +#define F_SETLK 6 +#define F_SETLKW 7 + +#define F_SETOWN 8 /* for sockets. */ +#define F_GETOWN 9 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +#define F_GETLK64 12 /* using 'struct flock64' */ +#define F_SETLK64 13 +#define F_SETLKW64 14 + +/* for F_[GET|SET]FL */ +#define FD_CLOEXEC 1 /* actually anything with low bit set goes */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 0 +#define F_WRLCK 1 +#define F_UNLCK 2 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 4 /* or 3 */ +#define F_SHLCK 8 /* or 4 */ + +/* for leases */ +#define F_INPROGRESS 16 + +/* operations for bsd flock(), also used by the kernel implementation */ +#define LOCK_SH 1 /* shared lock */ +#define LOCK_EX 2 /* exclusive lock */ +#define LOCK_NB 4 /* or'd with one of the above to prevent + blocking */ +#define LOCK_UN 8 /* remove lock */ + +#define LOCK_MAND 32 /* This is a mandatory flock */ +#define LOCK_READ 64 /* ... Which allows concurrent read operations */ +#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */ +#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */ + +struct flock { + short l_type; + short l_whence; + off_t l_start; + off_t l_len; + pid_t l_pid; +}; + +struct flock64 { + short l_type; + short l_whence; + loff_t l_start; + loff_t l_len; + pid_t l_pid; +}; + +#define F_LINUX_SPECIFIC_BASE 1024 + +#endif /* _VAX_FCNTL_H */ diff -Nru a/include/asm-vax/hardirq.h b/include/asm-vax/hardirq.h --- a/include/asm-vax/hardirq.h 1970-01-01 01:00:00 +++ b/include/asm-vax/hardirq.h 2005-07-31 20:12:44 @@ -0,0 +1,34 @@ +#ifndef _ASM_VAX_HARDIRQ_H +#define _ASM_VAX_HARDIRQ_H + +/* FIXME: copied from arm port (or any except i386/alpha ) */ +/* stubs for irq scb code */ + +#include +#include +#include + +/* assembly code in softirq.h is sensitive to the offsets of these fields */ +typedef struct { + unsigned int __softirq_pending; + unsigned int __local_irq_count; + unsigned int __local_bh_count; + unsigned int __syscall_count; + struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +/* Enough space to represent out 384 IRQs. */ +#define HARDIRQ_BITS 10 /* was: 15 -- jbglaw@lug-owl.de, 20050731 */ + +/* + * The hardirq mask has to be large enough to have space + * for potentially all IRQ sources in the system nesting + * on a single CPU: + */ +#if (1 << HARDIRQ_BITS) < NR_IRQS +# error "HARDIRQ_BITS is too low!" +#endif + +#endif /* _ASM_VAX_HARDIRQ_H */ diff -Nru a/include/asm-vax/hw_irq.h b/include/asm-vax/hw_irq.h --- a/include/asm-vax/hw_irq.h 1970-01-01 01:00:00 +++ b/include/asm-vax/hw_irq.h 2002-05-20 02:33:39 @@ -0,0 +1,15 @@ +/* + * atp March 2002 for linux/irq.h + * + * FIXME: This may be a very good fit for our interrupt regime. + * Better than that of linux 2.2 + */ +#ifndef _ASMVAX_HW_IRQ_H +#define _ASMVAX_HW_IRQ_H + +/* This may not be apropriate for all machines, we'll see ... */ +static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) +{ +} + +#endif /* _ASMVAX_HW_IRQ_H */ diff -Nru a/include/asm-vax/io.h b/include/asm-vax/io.h --- a/include/asm-vax/io.h 1970-01-01 01:00:00 +++ b/include/asm-vax/io.h 2005-07-31 18:12:23 @@ -0,0 +1,52 @@ +#ifndef __VAX_IO_H +#define __VAX_IO_H +/* atp Jan 2001, junked old io.h. + * wasnt sure about old one, so it was easiest to + * start afresh. + */ + + +#ifdef __KERNEL__ + +#include +#include +#include /* virt_to_phys stuff */ + + +/* FIXME: What is the proper value for this ? */ + +#define IO_SPACE_LIMIT 0xffffffff + +#define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x))) +#define __io_phys(x) ((unsigned long)(x) & ~PAGE_OFFSET) + +/* + * readX/writeX() are used to access memory mapped devices. On some + * architectures the memory mapped IO stuff needs to be accessed + * differently. + */ + +#define readb(addr) (*(volatile unsigned char *) __io_virt(addr)) +#define readw(addr) (*(volatile unsigned short *) __io_virt(addr)) +#define readl(addr) (*(volatile unsigned int *) __io_virt(addr)) + +#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b)) +#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b)) +#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b)) + +#define memset_io(a,b,c) memset(__io_virt(a),(b),(c)) +#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c)) +#define memcpy_toio(a,b,c) memcpy(__io_virt(a),(b),(c)) + +#define inb_p(addr) readb(addr) +#define inb(addr) readb(addr) + +#define outb(x, addr) ((void) writeb(x, addr)) +#define outb_p(x, addr) outb(x, addr) + +#define xlate_dev_mem_ptr(p) __va(p) +#define xlate_dev_kmem_ptr(p) (p) + +#endif /* __KERNEL__ */ + +#endif /* __VAX_IO_H */ diff -Nru a/include/asm-vax/ioctl.h b/include/asm-vax/ioctl.h --- a/include/asm-vax/ioctl.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ioctl.h 2002-05-20 02:33:39 @@ -0,0 +1,62 @@ +#ifndef _VAX_IOCTL_H +#define _VAX_IOCTL_H + + +/* + * this was copied from the alpha as it's a bit cleaner there. + * -- Cort + * If it's good enough for him, it's good enough for us! + * -- Andy + * + */ + +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 13 +#define _IOC_DIRBITS 3 + +#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) +#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) +#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) +#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +/* + * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. + * And this turns out useful to catch old ioctl numbers in header + * files for us. + */ +#define _IOC_NONE 1U +#define _IOC_READ 2U +#define _IOC_WRITE 4U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +/* used to create numbers */ +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) +#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) +#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) + +/* used to decode them.. */ +#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) +#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) +#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) +#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) + +/* various drivers, such as the pcmcia stuff, need these... */ +#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) +#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) +#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) +#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) +#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) + +#endif /* _VAX_IOCTL_H */ diff -Nru a/include/asm-vax/ioctls.h b/include/asm-vax/ioctls.h --- a/include/asm-vax/ioctls.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ioctls.h 2003-01-07 01:26:06 @@ -0,0 +1,84 @@ +#ifndef __ASMVAX_IOCTLS_H__ +#define __ASMVAX_IOCTLS_H__ + +/* from asm-i386 (atp 1998) */ +#include + +/* 0x54 is just a magic number to make these relatively unique ('T') */ + +#define TCGETS 0x5401 +#define TCSETS 0x5402 +#define TCSETSW 0x5403 +#define TCSETSF 0x5404 +#define TCGETA 0x5405 +#define TCSETA 0x5406 +#define TCSETAW 0x5407 +#define TCSETAF 0x5408 +#define TCSBRK 0x5409 +#define TCXONC 0x540A +#define TCFLSH 0x540B +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E +#define TIOCGPGRP 0x540F +#define TIOCSPGRP 0x5410 +#define TIOCOUTQ 0x5411 +#define TIOCSTI 0x5412 +#define TIOCGWINSZ 0x5413 +#define TIOCSWINSZ 0x5414 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define FIONREAD 0x541B +#define TIOCINQ FIONREAD +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define FIONBIO 0x5421 +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define FIOCLEX 0x5451 +#define FIOASYNC 0x5452 +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 + +/* Used for packet mode */ +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 + +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + +#endif /* ASMVAX_IOCTLS_H */ diff -Nru a/include/asm-vax/ioprobe.h b/include/asm-vax/ioprobe.h --- a/include/asm-vax/ioprobe.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ioprobe.h 2004-06-02 21:53:34 @@ -0,0 +1,16 @@ +#ifndef __VAX_IOPROBE_H +#define __VAX_IOPROBE_H + +/* + * Functions for checking if addresses in IO space exist. Used + * to probe for devices. + * + * Inspired by NetBSD/vax. + */ + +/* Returns 1 if address is valid, 0 otherwise */ +extern int iospace_probeb(void *virt_addr); +extern int iospace_probew(void *virt_addr); +extern int iospace_probel(void *virt_addr); + +#endif /* __VAX_IOPROBE_H */ diff -Nru a/include/asm-vax/ipc.h b/include/asm-vax/ipc.h --- a/include/asm-vax/ipc.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ipc.h 2004-06-02 21:56:13 @@ -0,0 +1,27 @@ +#ifndef _VAX_IPC_H +#define _VAX_IPC_H + +/* + * $Id: ipc.h,v 1.5 2004/06/02 19:56:13 jbglaw Exp $ + * + * VAX version + * + * Derived from "include/asm-s390/ipc.h" + */ + +#define SEMOP 1 +#define SEMGET 2 +#define SEMCTL 3 +#define MSGSND 11 +#define MSGRCV 12 +#define MSGGET 13 +#define MSGCTL 14 +#define SHMAT 21 +#define SHMDT 22 +#define SHMGET 23 +#define SHMCTL 24 + +/* Used by the DIPC package, try and avoid reusing it */ +#define DIPC 25 + +#endif /* _VAX_IPC_H */ diff -Nru a/include/asm-vax/ipcbuf.h b/include/asm-vax/ipcbuf.h --- a/include/asm-vax/ipcbuf.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ipcbuf.h 2002-05-20 02:33:39 @@ -0,0 +1,29 @@ +#ifndef __VAX_IPCBUF_H__ +#define __VAX_IPCBUF_H__ + +/* + * The ipc64_perm structure for VAX architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit mode_t and seq + * - 2 miscellaneous 32-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* __VAX_IPCBUF_H__ */ diff -Nru a/include/asm-vax/irq.h b/include/asm-vax/irq.h --- a/include/asm-vax/irq.h 1970-01-01 01:00:00 +++ b/include/asm-vax/irq.h 2003-10-13 02:41:15 @@ -0,0 +1,58 @@ +#ifndef _ASM_VAX_IRQ_H +#define _ASM_VAX_IRQ_H + + +#ifndef irq_canonicalize +#define irq_canonicalize(i) (i) +#endif + +#ifndef NR_IRQS +#define NR_IRQS 384 +#endif + +/* + * Use this value to indicate lack of interrupt + * capability + */ +#ifndef NO_IRQ +#define NO_IRQ 1023 +#endif + +/* atp Jul 01 increased this to three pages - my M38 needs it for some reason */ +#define INT_STACK_SIZE 12288 + +#ifndef __ASSEMBLY__ + +#include /* for NR_CPUS */ +#include /* for struct psl_fields */ +#include /* for struct pt_regs */ + +extern void disable_irq(unsigned int); +extern void enable_irq(unsigned int); + +extern unsigned char interrupt_stack[NR_CPUS][INT_STACK_SIZE] ; + +/* Called at init to make a guard page at the bottom of the + interrupt stack */ +void guard_int_stack(void); + +/* Most exceptions and interrupts leave PC and PSL saved on + the stack. */ + +struct excep_pc_psl { + void *pc; + struct psl_fields psl; +}; + +/* This function registers the handler for an exception. */ + +int register_excep_handler(unsigned int vec_num, + char *exception_name, + void (*handler)(struct pt_regs *, void *), + unsigned int excep_info_size, + unsigned int use_interrupt_stack); + +#endif /* !ASSEMBLY */ + +#endif + diff -Nru a/include/asm-vax/ka43.h b/include/asm-vax/ka43.h --- a/include/asm-vax/ka43.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ka43.h 2005-04-20 20:58:57 @@ -0,0 +1,139 @@ +#ifndef _VAX_KA43_H +#define _VAX_KA43_H + +/* + * $Id: ka43.h,v 1.7 2005/04/20 18:58:57 kenn Exp $ + * + * Definitions for KA43 CPU (VAXstation 3100m76). + * + * Taken from NetBSD + * + * atp. jun 01 machine check stuff lifted from NetBSD. Thanks ragge!. + * + */ + +/* Fixed addresses in CPU's physical memory map */ + +#define KA43_CH2_BASE 0x10000000 /* 2nd level cache data area */ +#define KA43_CH2_END 0x1FFFFFFF +#define KA43_CH2_SIZE 0x10000000 + +#define KA43_CPU_BASE 0x20080000 /* so called "CPU registers" */ +#define KA43_CPU_END 0x200800FF +#define KA43_CPU_SIZE 0x100 + +#define KA43_NWA_BASE 0x20090000 /* Network Address ROM */ +#define KA43_NWA_END 0x2009007F +#define KA43_NWA_SIZE 0x80 + +#define KA43_SER_BASE 0x200A0000 /* Serial line controller */ +#define KA43_SER_END 0x200A000F +#define KA43_SER_SIZE 0x10 + +#define KA43_WAT_BASE 0x200B0000 /* TOY clock and NV-RAM */ +#define KA43_WAT_END 0x200B00FF +#define KA43_WAT_SIZE 0x100 + +#define KA43_SC1_BASE 0x200C0080 /* 1st SCSI Controller Chip */ +#define KA43_SC1_END 0x200C009F +#define KA43_SC1_SIZE 0x20 + +#define KA43_SC2_BASE 0x200C0180 /* 2nd SCSI Controller Chip */ +#define KA43_SC2_END 0x200C019F +#define KA43_SC2_SIZE 0x20 + +#define KA43_SCS_BASE 0x200C0000 /* area occupied by SCSI 1+2 */ +#define KA43_SCS_END 0x200C01FF +#define KA43_SCS_SIZE 0x200 + +#define KA43_LAN_BASE 0x200E0000 /* LANCE chip registers */ +#define KA43_LAN_END 0x200E0007 +#define KA43_LAN_SIZE 0x08 + +#define KA43_CUR_BASE 0x200F0000 /* Monochrome video cursor chip */ +#define KA43_CUR_END 0x200F003C +#define KA43_CUR_SIZE 0x40 + +#define KA43_DMA_BASE 0x202D0000 /* 128KB Data Buffer */ +#define KA43_DMA_END 0x202EFFFF +#define KA43_DMA_SIZE 0x20000 + +#define KA43_CT2_BASE 0x21000000 /* 2nd level cache tag area */ +#define KA43_CT2_END 0x2101FFFF +#define KA43_CT2_SIZE 0x20000 +#define KA43_CH2_CREG 0x21100000 /* 2nd level cache control register */ + +#define KA43_VME_BASE 0x30000000 +#define KA43_VME_END 0x3003FFFF +#define KA43_VME_SIZE 0x40000 + +#define KA43_DIAGMEM 0x28000000 + +/* Cache defines Primary Cachce */ +#define KA43_PCS_ENABLE 0x00000002 /* Enable primary cache */ +#define KA43_PCS_FLUSH 0x00000004 /* Flush cache */ +#define KA43_PCS_REFRESH 0x00000008 /* Enable refresh */ + +#define KA43_PCTAG_TAG 0x1FFFF800 /* bits 11-29 */ +#define KA43_PCTAG_PARITY 0x40000000 +#define KA43_PCTAG_VALID 0x80000000 + +/* Cache define Secondary Cache */ +#define KA43_SESR_CENB 0x00000001 /* Cache Enable */ +#define KA43_SESR_SERR 0x00000002 +#define KA43_SESR_LERR 0x00000004 +#define KA43_SESR_CERR 0x00000008 +#define KA43_SESR_DIRTY 0x00000010 +#define KA43_SESR_MISS 0x00000020 +#define KA43_SESR_DPE 0x00000040 /* Dal Parity Error */ +#define KA43_SESR_TPE 0x00000080 /* Tag Parity Error */ +#define KA43_SESR_WSB 0x00010000 +#define KA43_SESR_CIEA 0x7FFC0000 + + +#define PR_PCTAG 124 +#define PR_PCIDX 125 +#define PC_PCERR 126 +#define PR_PCSTS 127 + +/* Bits in ka43_cpu_regs.parctl */ +#define KA43_PCTL_DPEN 0x00000001 /* DMA parity enable (bit 0) */ +#define KA43_PCTL_CPEN 0x00000002 /* CPU Parity enable (bit 1) */ +#define KA43_PCTL_DMA 0x01000000 /* LANCE DMA control (bit 24) */ + +#ifndef __ASSEMBLY__ + +struct ka43_cpu_regs { + unsigned long hltcod; /* Halt Code Register */ + unsigned long pad2; + unsigned long pad3; + unsigned char intreg[4]; /* Four 1-byte registers */ + unsigned short diagdsp; /* Diagnostic display register */ + unsigned short pad4; + unsigned long parctl; /* Parity Control Register */ + unsigned short pad5; + unsigned short pad6; + unsigned short pad7; + unsigned short diagtme; /* Diagnostic time register */ +}; + +struct ka43_mcframe { /* Format of KA43 machine check frame: */ + int mc43_bcnt; /* byte count, always 24 (0x18) */ + int mc43_code; /* machine check type code and restart bit */ + int mc43_addr; /* most recent (faulting?) virtual address */ + int mc43_viba; /* contents of VIBA register */ + int mc43_sisr; /* ICCS bit 6 and SISR bits 15:0 */ + int mc43_istate; /* internal state */ + int mc43_sc; /* shift count register */ + int mc43_pc; /* trapped PC */ + int mc43_psl; /* trapped PSL */ +}; + +#define KA43_MC_RESTART 0x00008000 /* Restart possible*/ +#define KA43_PSL_FPDONE 0x00010000 /* First Part Done */ + +#endif /* __ASSEMBLY */ + +extern void ka43_diagmem_remap(unsigned long int address, unsigned long int size); + +#endif /* _VAX_KA43_H */ diff -Nru a/include/asm-vax/ka46.h b/include/asm-vax/ka46.h --- a/include/asm-vax/ka46.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ka46.h 2004-06-02 22:00:42 @@ -0,0 +1,32 @@ +#ifndef _KA46_H +#define _KA46_H + +/* + * $Id: ka46.h,v 1.5 2004/06/02 20:00:42 jbglaw Exp $ + * + * definitions for KA46 CPU + */ + +#define KA46_CCR 0x23000000 + +#define KA46_CCR_CENA 0x00000001 +#define KA46_CCR_SPECIO 0x00000010 + +#define KA46_BWF0 0x20080014 +#define KA46_BWF0_FEN 0x01000000 + +/* memory addresses of interest */ +#define KA46_INVFLT 0x20200000 +#define KA46_INVFLTSZ 32768 +#define KA46_CCR 0x23000000 +#define KA46_TAGST 0x2d000000 +#define KA46_TAGSZ 32768 +#define KA46_DMAMAP 0x20080008 + +/* IPR bits definitions */ +#define PCSTS_FLUSH 4 +#define PCSTS_ENABLE 2 +#define PCTAG_PARITY 0x80000000 +#define PCTAG_VALID 1 + +#endif /* _KA46_H */ diff -Nru a/include/asm-vax/ka48.h b/include/asm-vax/ka48.h --- a/include/asm-vax/ka48.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ka48.h 2004-02-26 12:30:09 @@ -0,0 +1,36 @@ +#ifndef __KA48_H__ +#define __KA48_H__ + +/* + * $Id: ka48.h,v 1.1 2004/02/26 11:30:09 jbglaw Exp $ + */ + +#define KA48_CCR 0x23000000 +#define KA48_CCR_CENA 0x00000001 +#define KA48_CCR_SPECIO 0x00000010 + +#define KA48_BWF0 0x20080014 +#define KA48_BWF0_FEN 0x01000000 + +/* Memory addresses of interest */ +#define KA48_INVFLT 0x20200000 +#define KA48_INVFLTSZ 16384 +#define KA48_CCR 0x23000000 +#define KA48_TAGST 0x2d000000 +#define KA48_TAGSZ 32768 +#define KA48_DMAMAP 0x20080008 + +/* IPR bits definitions */ +#define PCSTS_FLUSH 4 +#define PCSTS_ENABLE 2 +#define PCTAG_PARITY 0x80000000 +#define PCTAG_VALID 1 + +/* From OpenVMS $IO440DEF & $KA440DEF */ +#define KA48_PARCTL 0x20080014 +#define KA48_PARCTL_CPEN 0x00000001 /* CPU Parity Eanble? */ +#define KA48_PARCTL_NPEN 0x00000100 /* ?? Parity Enable */ +#define KA48_PARCTL_INVENA 0x01000000 /* Invalid ? Enable */ +#define KA48_PARCTL_AGS 0x02000000 /* ??? */ + +#endif /* __KA48_H__ */ diff -Nru a/include/asm-vax/kmap_types.h b/include/asm-vax/kmap_types.h --- a/include/asm-vax/kmap_types.h 1970-01-01 01:00:00 +++ b/include/asm-vax/kmap_types.h 2004-06-02 22:02:23 @@ -0,0 +1,21 @@ +#ifndef _VAX_KMAP_TYPES_H +#define _VAX_KMAP_TYPES_H + +enum km_type { + KM_BOUNCE_READ, + KM_SKB_SUNRPC_DATA, + KM_SKB_DATA_SOFTIRQ, + KM_USER0, + KM_USER1, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, + KM_PTE0, + KM_PTE1, + KM_IRQ0, + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, + KM_TYPE_NR, +}; + +#endif /* _VAX_KMAP_TYPES_H */ diff -Nru a/include/asm-vax/linkage.h b/include/asm-vax/linkage.h --- a/include/asm-vax/linkage.h 1970-01-01 01:00:00 +++ b/include/asm-vax/linkage.h 2005-05-11 00:47:30 @@ -0,0 +1,7 @@ +#ifndef _VAX_LINKAGE_H +#define _VAX_LINKAGE_H + +#define __ALIGN .balign 2 +#define __ALIGN_STR ".balign 2" + +#endif /* _VAX_LINKAGE_H */ diff -Nru a/include/asm-vax/local.h b/include/asm-vax/local.h --- a/include/asm-vax/local.h 1970-01-01 01:00:00 +++ b/include/asm-vax/local.h 2003-10-01 22:15:41 @@ -0,0 +1,6 @@ +#ifndef __VAX_LOCAL_H +#define __VAX_LOCAL_H + +#include + +#endif /* __VAX_LOCAL_H */ diff -Nru a/include/asm-vax/mc146818rtc.h b/include/asm-vax/mc146818rtc.h --- a/include/asm-vax/mc146818rtc.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mc146818rtc.h 2004-06-02 22:04:31 @@ -0,0 +1,49 @@ +#ifndef _ASM_MC146818RTC_H +#define _ASM_MC146818RTC_H + +/* + * mc146818.h. macros for /dev/rtc (see drivers/char/rtc.c + * and functions in arch/vax/kernel/time.c + * + * Copyright atp Mar 2002. + * + * Adapted from asm-mips/mc146818rtc.h for decstations. + * + * There are two types of "hardware clock" for the VAX family of + * systems. These are the TODR (time of day register) used on the + * big vaxes, and the standard CMOS clock, which is based on the + * familiar dallas chip, used in the desktop vaxes (KA41, 42, etc.. + * + * This file only addresses the desktop vax CMOS clock. + */ +#include +#include /* machine vector */ + +#ifndef RTC_PORT +#define RTC_PORT(x) ((x)) +#endif + + +/* access macros for the clock page. */ +/* -- we should have these in the mv too, perhaps with a + * single rtc_ops structure like the mips, which the + * mv entry can point at. However at the mo, we only + * have a real implementation for the ka4x cmos clock. + */ +unsigned char ka4x_clock_read(unsigned long addr); +void ka4x_clock_write(unsigned char val, unsigned long addr); + +#define CMOS_READ(addr) ({ \ + ka4x_clock_read(addr); \ +}) + +#define CMOS_WRITE(val, addr) ({ \ + ka4x_clock_write(val, addr); \ +}) + +#define RTC_ALWAYS_BCD 0 + +/* for the time being, unless there is a vsbus int for it */ +#define RTC_IRQ 0 + +#endif /* _ASM_MC146818RTC_H */ diff -Nru a/include/asm-vax/mm/mmu_context.h b/include/asm-vax/mm/mmu_context.h --- a/include/asm-vax/mm/mmu_context.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/mmu_context.h 2003-09-24 01:45:55 @@ -0,0 +1,72 @@ +#ifndef _ASM_VAX_MMU_CONTEXT_H +#define _ASM_VAX_MMU_CONTEXT_H +/* atp Jan 2001 */ + +#include + +/* mmu_contexts are part of process control block */ + +#define init_new_context(tsk,mm) 0 + +#define destroy_context(mm) flush_tlb_mm(mm) + +static inline void set_vaxmm_regs_p0(pgd_t *pgdp) +{ + __mtpr(pgdp->br, PR_P0BR); + __mtpr( (pgdp->lr * 8), PR_P0LR); +} + +static inline void set_vaxmm_regs_p1(pgd_t *pgdp) +{ + __mtpr(pgdp->br, PR_P1BR); + __mtpr( (pgdp->lr * 8), PR_P1LR); +} + +static inline void set_vaxmm_regs(pgd_t *pgdp) +{ + __mtpr((pgdp[0]).br, PR_P0BR); + __mtpr( ((pgdp[0]).lr * 8), PR_P0LR); + __mtpr((pgdp[1]).br, PR_P1BR); + __mtpr( ((pgdp[1]).lr * 8), PR_P1LR); +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +/* + * switch_mm implementation + * copy the thread P0/P1 registers into the corresponding CPU registers + * - not sure if this is complete - D.A. May 2001 + */ + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + int cpu = smp_processor_id(); + if (prev!=next) { + clear_bit(cpu, &prev->cpu_vm_mask); + + tsk->thread.pcb.p0br = (next->pgd[0]).br; + tsk->thread.pcb.p0lr = (next->pgd[0]).lr * 8 /*| 0x04000000*/; + tsk->thread.pcb.p1br = (next->pgd[1]).br; + tsk->thread.pcb.p1lr = (next->pgd[1]).lr * 8; + + set_vaxmm_regs(next->pgd); + + flush_tlb_all(); + } + set_bit(cpu, &next->cpu_vm_mask); + +} + +extern inline void activate_mm(struct mm_struct *prev, + struct mm_struct *next) +{ + switch_mm(prev, next, current); +} + +#define deactivate_mm(tsk,mm) do { } while (0) + +#endif diff -Nru a/include/asm-vax/mm/page.h b/include/asm-vax/mm/page.h --- a/include/asm-vax/mm/page.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/page.h 2003-08-03 14:45:08 @@ -0,0 +1,86 @@ +#ifndef _VAX_MM_PAGE_H +#define _VAX_MM_PAGE_H + +/* PAGE_SIZE definitions */ +#include + +/* This was based on the alpha version */ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#define STRICT_MM_TYPECHECKS + +/* Pure 2^n version of get_order */ +extern __inline__ int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + + +/* The hardware pushes this info on the stack when an access violation + occurs */ + +struct accvio_info { + unsigned int reason; + unsigned int addr; + unsigned int pc; + unsigned int psl; +}; + +#define ACCVIO_LENGTH 1 /* Attempt to read past end of region + as defined by P0LR, P1LR or S0LR */ +#define ACCVIO_PTE_READ 2 /* Unable to access PTE */ +#define ACCVIO_WRITE 4 /* Attempted access was a write */ + +#endif /* !ASSEMBLY */ + +/* To align the pointer to the next or previous page boundary. + The pointer is unchanged if it is already on a page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) +#define PAGE_ALIGN_PREV(addr) ((addr)&PAGE_MASK) + + +/* This handles the memory map. (i hope) + * taking the lead from the alpha port the VAX PAGE_OFFSET is + * identified as being the start of kernel S0 (KSEG) space */ + +#define __PAGE_OFFSET (0x80000000) +#define PAGE_OFFSET __PAGE_OFFSET +#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) +#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) + +/* Find this SPTE for addr and extract the PFN from that. This is safe + to use for _any_ S0 address */ +#define MAP_NR(addr) (((GET_HWSPTE_VIRT(addr))->hwpte & PAGELET_PFN_MASK) >> 3) + +#define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) +#define pfn_valid(pfn) ((pfn) < max_mapnr) + +#define pfn_to_page(pfn) (mem_map + (pfn)) +#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) + +#define virt_addr_valid(kaddr) pfn_valid(MAP_NR(kaddr)) + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#endif /* __KERNEL__ */ + +#endif /* _VAX_PAGE_H */ + diff -Nru a/include/asm-vax/mm/pagelet.h b/include/asm-vax/mm/pagelet.h --- a/include/asm-vax/mm/pagelet.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pagelet.h 2004-03-01 01:37:45 @@ -0,0 +1,115 @@ +#ifndef __VAX_MM_PAGELET_H +#define __VAX_MM_PAGELET_H + +/* This file contains the pagelet code that translates between + * the upper layers idea of the PAGE_SIZE (4k) and the Hardware's + * idea of PAGE_SIZE (512bytes). + * + * Copyright atp 1998-2002. + * Jan 2001 atp Integrated with 2.4 tree + * Mar 2002 atp Updates to deal with pmd_populate/pgd_populate in 2.4.3 + */ + + +/* + * RAW Linux MM layer requirements that affect us. + * 2 or 3 level page table based hardware MMU (asm-i386/pgtable.h + * asm-alpha/pgtable.h) + * 4096/8192 bytes/page (linux/mm/slab.c) + * Each page table occupies one page of memory, and each PTE refers to + * one page of memory. + * + * The VAX MMU does not work like this. The VAX Memory map is divided + * into 4 segments, of which 3 are accessible. + * P0 0x00000000 - 0x3fffffff "Process space" + * P1 0x40000000 - 0x7fffffff "Process stack space" + * S0 0x80000000 - 0xbfffffff "System Space" + * S1 0xc0000000 - 0xffffffff "Unreachable/Reserved" + * + * We call this a pgd. + * + * Note that each user "pgd" has a common value for the 3rd and + * 4th pgd entries, for S0, the kernel address space, and + * an invalid one for S1. This disjoint address space is + * similar to that on the S/390. + * FIXME: Note We need to store the user pgd somewhere associated with + * the task structure (mm_struct?). + * + * For each of the two user segments, a base and length register is + * also maintained in the process control block. (pcb.h). + * + * The hardware page size is 512 bytes. Thats a bit small. We need 4096. + * + */ + + +/* PAGE_SHIFT determines the 'logical' page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) +/* our page PFN is in the LSB - this is the same as the pagelet case? */ +#define PAGE_PFN_MASK (0x1FFFFF) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +/* PAGELET_SHIFT determines the hardware page size */ +#define PAGELET_SHIFT 9 +#define PAGELET_SIZE (1UL << PAGELET_SHIFT) + +/* our page PFN is in the LSB */ +#define PAGELET_PFN_MASK (0x1FFFFF) +#define PAGELET_MASK (~(PAGELET_SIZE-1)) +#define PAGELET_ALIGN(addr) (((addr)+PAGELET_SIZE-1)&PAGELET_MASK) +#define PAGELET_ALIGN_PREV(addr) ((addr)&PAGELET_MASK) + +/* type definitions. These need to be in page.h as linux/mm.h + * expects them here */ + +#ifndef __ASSEMBLY__ +/* definition of pte_t */ +typedef struct pagelet_pagecluster pte_t; + +typedef pte_t * pte_addr_t; + +/* definition of pmd_t, an entry in a Page Middle Directory. Each entry + is a pointer to a page of process PTEs */ +typedef struct { + pte_t *pte_page; +} pmd_t; + +/* + * this struct contains the base and length registers + * needed for each part of the pgd + * -- This is similar to the approach on the S390 using segments + * as a pseudo pgd. + * note, the length register here is 1/8th of the real (processor) + * length register + */ + +struct pgd_descriptor { + unsigned long br; + unsigned long lr; + pmd_t *pmd; /* first four pages of the task PTE slot are the pmds + * Our pmds hold 2048 entries and are 2 pages long */ + unsigned long slot; /* the base address of this slot */ + unsigned long segment; /* The segment index - used in pgd_clear */ + struct pgd_descriptor *next; /* Links for PGD entry free list */ +}; + +typedef struct pgd_descriptor pgd_t; + +#define pte_val(x) ((x).pte) +#define __pte(x) ((pte_t) { (x) } ) + +/* hwpte_t */ +typedef struct { unsigned long hwpte; } hwpte_t; +#define hwpte_val(x) ((x).hwpte) +#define __hwpte(x) ((hwpte_t) { (x) } ) + +/* and pgprot_t */ +typedef struct { unsigned long pgprot; } pgprot_t; +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#endif /* !ASSEMBLY */ + + +#endif /* __VAX_MM_PAGELET_H */ diff -Nru a/include/asm-vax/mm/pagelet_pgd.h b/include/asm-vax/mm/pagelet_pgd.h --- a/include/asm-vax/mm/pagelet_pgd.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pagelet_pgd.h 2005-03-28 18:28:23 @@ -0,0 +1,79 @@ +#ifndef _VAX_MM_PAGELET_PGD_H +#define _VAX_MM_PAGELET_PGD_H +/* + * pagelet_pgd.h + * + * Defines the page directory in our fake 2 level paging scheme + * Copyright atp Jan 2001. + */ + + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +/* In the case of the VAX we pretend we have a pgd within which each + * descriptor can map (0x40000000 bytes). */ + +#define PGDIR_SHIFT 30 /* 0x1e */ +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* There are 4 ptrs in each pgd. p0, p1, s0 (and the unreachable s1) */ +#define PTRS_PER_PGD 4 +/* There are 2 user ptrs in each pgd. p0 and p1 */ +#define USER_PTRS_PER_PGD (2) +/* + * pgd entries used up by user/kernel: + */ +#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) /* == 2 */ +#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) /* == 2 */ +#define __USER_PGD_PTRS ((__PAGE_OFFSET >> PGDIR_SHIFT) & 0xf) +#define __KERNEL_PGD_PTRS (PTRS_PER_PGD-__USER_PGD_PTRS) +/* p0 region is first */ +#define FIRST_USER_PGD_NR 0 + + + +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd - pmd is %p.\n", __FILE__, __LINE__, (e).pmd) + + +/* This is the kernel pgd */ +extern pgd_t swapper_pg_dir[ (PTRS_PER_PGD) ]; + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + * All the actual stuff is done by the pmd_xxx functions + */ +static inline int pgd_none(pgd_t pgd) { return !pgd.pmd; } +static inline int pgd_bad(pgd_t pgd) { return !pgd.br; } +static inline int pgd_present(pgd_t pgd) { return (pgd.pmd != 0); } + +extern void pgd_clear(pgd_t * pgdp); + +/* to set the page-dir (p0br/p0lr) (p1br/p1lr) see arch/vax/mm/pgtable.c */ +extern void set_page_dir(struct task_struct *task, pgd_t *pgdir); +#define SET_PAGE_DIR( tsk, pgdir) \ + set_page_dir( (tsk), (pgdir)) + + + +/* hmm, our pgd_t is 8 bytes long. FIXME: check pgd_index gives the right answer */ +/* to find an entry in a page-table-directory */ +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +/* + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] + * + * this macro returns the index of the entry in the pgd page which would + * control the given virtual address + */ +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +/* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's + */ +#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) + +#endif diff -Nru a/include/asm-vax/mm/pagelet_pgprot.h b/include/asm-vax/mm/pagelet_pgprot.h --- a/include/asm-vax/mm/pagelet_pgprot.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pagelet_pgprot.h 2005-03-28 18:36:59 @@ -0,0 +1,93 @@ +/* + * pagelet_pgprot.h - page protection bits from pgtable.h + * + * Copyright atp Jan 2001 + */ + +/* + * VAX processor Protection bits. (From Table 4.3) + * - we are in same boat as i386 as regards PROT_EXEC + * - for some reason the page protection bits are all in the MSB + */ + +#define _PAGE_VALID (1<<31) /* bit 31 == mapping valid */ +#define _PAGE_MODIFY (1<<26) /* hardware modify bit. */ + +/* hardware page protection bits 27:30 */ +#define _PAGE_KW (0x02<<27) /* 0010 */ +#define _PAGE_KR (0x03<<27) /* 0011 */ +#define _PAGE_UW (0x04<<27) /* 0100 */ +/* leaving out exec and super mode page protection modes */ +#define _PAGE_URKW (0x0e<<27) /* 1110 */ +#define _PAGE_UR (0x0f<<27) /* 1111 */ + +#define _PAGE_SR (0x0b<<27) /* 1011 */ + +/* this is a bit of a swizz */ +/* as _PAGE_KW | _PAGE_RO -> PAGE_SR (and implied KR) + * and _PAGE_UW | _PAGE_RO -> PAGE_UR + * Since we dont use super or exec modes, this works + * + * -- However, since kenn wants to run VMS binaries, we may need + * to rethink this. + */ + +#define _PAGE_RO _PAGE_SR + +/* Linux Specific - modify is h/w maintained. accessed is in the "owner" bit + * region */ + +#define _PAGE_ACCESSED (1<<23) +#define _PAGE_DIRTY _PAGE_MODIFY + +/* taking the lead from axp (include/asm-alpha/pgtable.h) */ +/* UW implies KW. Likewise UR implies KR. unfortunately mixing r/w is + * not as simple as oring some bits together */ + +/* As I read the H/W ref manual (p208) uw implies kw. uw imples ur. + * ur implies kr + * 1) "Each modes access can be read/write, read only, or none" + * 2) "if any level has read access, then all more privileged level has too". + * 3) "if any level has write access, then all more privileged level has too". + */ + + +#define _PFN_MASK 0x001FFFFF +#define _PAGE_TABLE (_PAGE_VALID | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_UW ) +#define _KERNPG_TABLE (_PAGE_VALID | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_KW ) + +#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define PAGE_PROT_MASK 0x78000000 + +#define PAGE_NONE __pgprot(_PAGE_VALID | _PAGE_ACCESSED) +#define PAGE_SHARED __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_UW ) +#define PAGE_COPY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_UR ) + +/* These are here in case we need to swap from _PAGE_RO to doing it properly */ +#define PAGE_READONLY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_UR ) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_KW | _PAGE_DIRTY | _PAGE_ACCESSED) +#define PAGE_KRO __pgprot(_PAGE_VALID | _PAGE_KR | _PAGE_DIRTY | _PAGE_ACCESSED) + +/* + * The VAX, like the i386 can't do page protection for execute, and considers that the same are read. + * Also, write permissions imply read permissions. This is the closest we can get.. + * FIXME: we can do better than this + */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED diff -Nru a/include/asm-vax/mm/pagelet_pmd.h b/include/asm-vax/mm/pagelet_pmd.h --- a/include/asm-vax/mm/pagelet_pmd.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pagelet_pmd.h 2003-08-27 01:16:40 @@ -0,0 +1,70 @@ +/* + * pagelet_pmd.h + * + * Defines the page mid level directory in our fake 3 level paging scheme. + * + * Copyright atp Jan 2001. + * atp Jul 2001. Go to a fake 3 level. + * atp Feb 2002. Add in pmd_populate needed for 2.4.3 changes to mm. + */ + + +/* PMD_SHIFT determines the size of the area a second-level page table entry can map */ +/* 1 page of ptes maps 128x4096 bytes = 512kb. + * Each "pmd" here is infact a 2 page = 8kb region at the start of the + * process page table region. It makes the accounting a lot easier. + */ +#define PMD_SHIFT 19 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* + * entries per page directory level: the VAX is single level, so + * we don't really have any PMD directory physically, or real pgd for + * that matter. Its just an 8kb region. + */ +#define PTRS_PER_PMD 2048 + +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd entry %p.\n", __FILE__, __LINE__, (e).pte_page) + + +/* pmd_xxx functions */ +/* These are really operating on the first two pages of a balance slot */ + +/* + * we dont want linux mucking about with our pmd pages. It will get it + * wrong. pmd_alloc and pmd_free do the business there. + * + * Changes for 2.4.3 and above. pmd_alloc is no more. we have pgd + * and pmd_populate now. + * -- Change to a pmd that is a two page block of memeory. + * -- remove special flag. + */ + +static inline int pmd_none(pmd_t pmd) { return (pmd.pte_page == NULL); } +static inline int pmd_bad(pmd_t pmd) { return (pmd.pte_page == NULL); } +static inline int pmd_present(pmd_t pmd) { return (pmd.pte_page != NULL); } + +/* Implemented in arch/vax/mm/pgalloc.c */ +void pmd_clear(pmd_t *pmd); + +/* + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] + * + * this macro returns the index of the entry in the pmd page which would + * control the given virtual address + */ +#define pmd_index(address) ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +/* Given an PMD entry (which points to a page of PTEs), return the + struct page * for that page */ +#define pmd_page(p) virt_to_page((p).pte_page) + +static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) +{ + /* locate the pmd entry according to address */ + return dir->pmd + pmd_index(address); +} + + diff -Nru a/include/asm-vax/mm/pagelet_pte.h b/include/asm-vax/mm/pagelet_pte.h --- a/include/asm-vax/mm/pagelet_pte.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pagelet_pte.h 2005-07-31 17:15:55 @@ -0,0 +1,327 @@ +#ifndef _VAX_MM_PAGELET_PTE_H +#define _VAX_MM_PAGELET_PTE_H + +/* + * pagelet_pte.h + * + * Defines the page table entries in our fake 2 level paging scheme. + * Each physical page (512 byte page) is here called a "pagelet" to + * distinguish between these and the logical pages we are trying to + * construct which are 4096 bytes. + * + * PAGE_SIZE = 4096 + * number of physical pages (pagelets)/ logical page = 8 + * => Size of pte_t = 8 * 4 = 32 bytes. + * + * In a 4kb page you can fit 4096/32 = 128 ptes + * + * Terminology: + * A pagelet is one 512 byte chunk == hardware page + * A page is 4096 bytes == 8 pagelets + * A pagelet_pte == hwpte_t == unsigned long + * A pte_t is also an unsigned long. + * A pagecluster_t is the 8 hwptes that govern one page. + * + * The (pte_t) pte that linux expects is actually + * (hwpte_t) pagecluster_struct.pte + * With some knowledge of the other ptes in some of the accessor + * routines. + * + * Copyright atp Jan 2001. + */ + +/* This is the definition of the pagelet_t. + * Note that the first hwpte is the one that linux sees. + * The first hwpte is used for all tests except + * the dirty test, which has to be applied to all */ + +typedef struct pagelet_pagecluster { + unsigned long pte; + unsigned long pte1; + unsigned long pte2; + unsigned long pte3; + unsigned long pte4; + unsigned long pte5; + unsigned long pte6; + unsigned long pte7; +} pagecluster_t; + +/* each ptr is 32 bytes in size */ +#define PTRS_PER_PTE 128 + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) + + +/* number of bits that fit into a memory pointer */ +#define BITS_PER_PTR (8*sizeof(unsigned long)) +/* number of bits that fit into a pte_t (and pagecluster_t) */ +#define BITS_PER_PTE (32*8*4) +#define BYTES_PER_PTE_T (8*4) + +/* sizeof(void*)==1<>PAGE_SHIFT)<pte = pte_val(pte); + ptep->pte1 = pte_val(pte)+1; + ptep->pte2 = pte_val(pte)+2; + ptep->pte3 = pte_val(pte)+3; + ptep->pte4 = pte_val(pte)+4; + ptep->pte5 = pte_val(pte)+5; + ptep->pte6 = pte_val(pte)+6; + ptep->pte7 = pte_val(pte)+7; +} + +static inline void print_pte(pte_t *ptep) +{ + printk(KERN_DEBUG "%8p: %8lx %8lx %8lx %8lx %8lx %8lx %8lx %8lx\n", ptep, ptep->pte,ptep->pte1,ptep->pte2,ptep->pte3,ptep->pte4,ptep->pte5,ptep->pte6,ptep->pte7); +} +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +/* Caution, mk_pte only operates on a single unsigned long + * The nominal "master hwpte" or first element in a pagecluster + * - for examples of usage of mk_pte see linux/mm/memory.c + * You must use set_pte to actually manipulate the pte's. + * + * See asm-i386/pgtable-3level.h for background. + */ + +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +#define pte_pfn(x) ((unsigned long)(((pte_val(x) & PAGE_PFN_MASK) \ + >> (PAGE_SHIFT - PAGELET_SHIFT)))) + +/* Create a PTE from a PFN and a page protection. Note that the PFN + passed in is a PAGE-sized PFN, not a PAGELET-sized PFN */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + pte_t pte; + pte_val(pte) = (pfn << (PAGE_SHIFT - PAGELET_SHIFT)) | pgprot_val(pgprot); + return pte; +} + +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), pgprot) + + +/* + * This requires that pte_set is called after pte_modify + * -- There is (7/2/2001) one reference to this in mm/mprotect.c + */ +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) &= _PAGE_CHG_MASK; + pte_val(pte) |= pgprot_val(newprot); + return pte; +} + +/* Clear a pagecluster */ +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + ptep->pte = 0; + ptep->pte1 = 0; + ptep->pte2 = 0; + ptep->pte3 = 0; + ptep->pte4 = 0; + ptep->pte5 = 0; + ptep->pte6 = 0; + ptep->pte7 = 0; +} + +#define PTE_INIT(x) pte_clear(x) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ + +/* We need to check each one for the hardware dirty bit */ +static inline int pte_dirty(pte_t pte) +{ + return (( (pte).pte | (pte).pte1 | (pte).pte2 | (pte).pte3 | \ + (pte).pte4 | (pte).pte5 | (pte).pte6 | (pte).pte7) & _PAGE_DIRTY); + +} + + +/* who needs that + * extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); } + * extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); } + * extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; } + * extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; } + * extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; } + * extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; } + */ + +/* + * these manipulate various bits in each hwpte. + */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + (pte).pte |= _PAGE_RO; + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + (pte).pte &= ~(_PAGE_RO); + if ( !((pte).pte & _PAGE_UW) ) { + (pte).pte |= _PAGE_KW; + } + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + (pte).pte &= ~(_PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + (pte).pte |= _PAGE_DIRTY; + return pte; +} + +/* used in arch/vax/mm/pgalloc.c */ +static inline pte_t pte_mkinvalid(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_VALID; + return pte; +} + +/* software only - only bother with first pagelet pte in the pagecluster */ +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_ACCESSED; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + return pte; +} + +static inline int pte_read(pte_t pte) +{ + /* If the page protection is non-zero, page is always kernel readable */ + return (pte_val(pte) & PAGE_PROT_MASK); +} + +static inline int pte_write(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_RO); +} + +/* software accessed bit. only bother to test the first one */ +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +static inline int pte_none(pte_t pte) { return (!pte_val(pte)); } +static inline int pte_present(pte_t pte) { return (pte_val(pte) & _PAGE_VALID); } + +extern pte_t * pte_offset(pmd_t * dir, unsigned long address); + +/* These variants on pte_offset are for i386 where pages of PTEs might + be in highmem and thus have to be re-mapped < 4GB before accessing. + The pte_unmap() functions undo whatever has to be done by the _map() + functions */ +#define pte_offset_kernel(dir, address) pte_offset(dir, address) +#define pte_offset_map(dir, address) pte_offset(dir, address) +#define pte_offset_map_nested(dir, address) pte_offset(dir, address) +#define pte_unmap(pte) /* nothing to undo */ +#define pte_unmap_nested(pte) /* nothing to undo */ + + + +/* items to manipulate a hwpte (for the S0 tables ) */ + +static inline void set_hwpte(hwpte_t *ptep, hwpte_t pte) +{ + *ptep = pte; +} + +static inline hwpte_t mk_hwpte(void *page, pgprot_t pgprot) +{ + hwpte_t hwpte; + hwpte_val(hwpte) = (__pa(page) >> PAGELET_SHIFT) | pgprot_val(pgprot); + return hwpte; +} + +static inline int hwpte_none(hwpte_t pte) { return !hwpte_val(pte); } +static inline int hwpte_present(hwpte_t pte) { return hwpte_val(pte) & _PAGE_VALID; } + +static inline hwpte_t hwpte_mkinvalid(hwpte_t pte) +{ + hwpte_val(pte) &= ~_PAGE_VALID; + return pte; +} + +/* Support for non-linear page mappings. Linux implements this by using + one bit in a non-VALID PTE to distinguish a page in swap from a page + in a file mapping. All remaining PTE bits that are not used by the + hardware (for non-VALID PTEs, i.e. non-resident pages) are used to + encode the file offset. VAX has bits 25-23, 21-0 available. We'll + use bit 25 as the SWAP-vs-mappedfile selector and bits 23, 21-0 as + file offset bits. */ + +#define _PAGE_FILE 0x02000000 +#define PTE_FILE_MAX_BITS 23 + +#define pte_to_pgoffX(p) \ + (((pte >> 1) & 0x00800000) + ((p).pte & 0x003fffff)) + +static inline unsigned long pte_to_pgoff(pte_t pte) { + return ((pte_val(pte) & 0x00800000) >> 1) + (pte_val(pte) & 0x003fffff); +} + +static inline pte_t pgoff_to_pte(unsigned long pgoff) { + pte_t pte; + + /* This only sets the first hwpte in the resulting PTE structure, + but that's OK, since set_pte() will be used to actually store + this in a real hardware-visible page table entry */ + pte_val(pte) = ((pgoff << 1) & 0x00800000) + (pgoff & 0x003fffff); + return pte; +} + +static inline int pte_file(pte_t pte) { + return (pte_val(pte) & _PAGE_FILE); +} + +#endif diff -Nru a/include/asm-vax/mm/pgalloc.h b/include/asm-vax/mm/pgalloc.h --- a/include/asm-vax/mm/pgalloc.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pgalloc.h 2003-02-16 01:13:17 @@ -0,0 +1,115 @@ +#ifndef __ASM_VAX_MM_PGALLOC_H +#define __ASM_VAX_MM_PGALLOC_H + +/* Copyright atp 1998-2002. pgalloc.h for VAX architecture. */ +/* + * Fixmes: + * 1) the pte_alloc/freeing stuff. Check Constraints here + * (ptes allocated for the kernel to map S0 space need to + * be physically contiguous. Entries in P0 and P1 need to + * be virtually contiguous in S0 space. + * + * 2) get_pgd_slow is incredibly wasteful. We should pack the + * pgds into a single page, and add pages as needed. The quicklists + * structure can be hijacked for this. Or at least one per pagelet... + */ +/* + * (c) Copyright Dave Airlie 2001 - airlied@linux.ie + * -- re-write for fixed sized processes + * + * atp Jun 2001 remove fixed size processes, use 3 level page table and pte slots. + * atp Jun-Jul 2001 - complete rewrite. + * atp Aug 2001 - swapping and vmalloc need pmd_alloc_kernel + * atp Feb 2002 - Update to track mm changes in 2.4.x (x>2) + * + * each 'pgd' spans an address range of 0x40000000 bytes. + * each page of 'ptes' spans an address range of 0x80000 bytes + * So, there are 0x800 pages of 'ptes' per pgd. Keeping track of which page + * is mapped where, requires a pmd with 0x800 entries. + */ + +#include + +extern struct pgd_cache { + pgd_t *head; /* These are special recyclable slots */ + unsigned long slots_used; + unsigned long size; +} pgd_free_list; + +/* + * Allocate and free page tables. The xxx_kernel() versions are + * used to allocate a kernel page table - this turns on ASN bits + * if any in other archs. On VAX, we cannot manipulate the S0 page + * table after init, so they are BUG()-ed out. + */ + +static inline void pgd_free(pgd_t *pgd) +{ + pgd->next = pgd_free_list.head; + pgd_free_list.head = pgd; + pgd_free_list.size++; +} + +/* renamed from get_pmd_slow to pmd_alloc_one, mar 2002 */ +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + pmd_t *ret; + ret = (pmd_t *)__get_free_pages(GFP_KERNEL,1); + if (ret) { + clear_page(ret); + clear_page(ret + (PAGE_SIZE/sizeof(pmd_t))); + } + return ret; +} + +static inline void pmd_free(pmd_t *pmd) +{ + free_pages((unsigned long)pmd, 1); +} + + +static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) +{ + struct page *page; + + /* Is this an attempt to allocate a page outside the maximum memory + limits? */ + if ((address >= TASK_WSMAX) && (address < (0x80000000-TASK_STKMAX))) { + /* The address is in the no-go zone. Returning NULL here + will get treated as ENOMEM by callers, which is a + reasonable way to fail this, I think */ + return NULL; + } + + page = alloc_page(GFP_KERNEL); + if (page) { + clear_page(page_address(page)); + } + return page; +} + +static inline void pte_free(struct page *pte_page) +{ + __free_page(pte_page); +} + +/* We don't need the generic TLB shootdown stuff yet. Might need it for + SMP later */ +#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) +#define __pte_free_tlb(tlb, pte_page) pte_free(pte_page) + +/* in arch/vax/mm/pgalloc.c */ +pgd_t *pgd_alloc(struct mm_struct *mm); +void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd); +void pmd_populate(struct mm_struct *mm, pmd_t * pmd, struct page *pte); +void pmd_clear(pmd_t *pmd); + +/* Due to limitation of VAX memory management, dynamic allocation and + freeing of system PTEs is impossible. Nothing should be trying to + allocate or free these. */ +#define pte_free_kernel(pte) BUG() +#define pmd_populate_kernel(mm, pmd, pte) BUG() +#define pte_alloc_one_kernel(mm, address) ({ BUG(); (pte_t *)0; }) +#define pmd_page_kernel(pmd) BUG() + +#endif /* __ASM_VAX_PGALLOC_H */ diff -Nru a/include/asm-vax/mm/pgtable.h b/include/asm-vax/mm/pgtable.h --- a/include/asm-vax/mm/pgtable.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pgtable.h 2005-07-31 18:35:59 @@ -0,0 +1,194 @@ +/* (c) 2001 Vax Porting Project, atp, kenn, airlied */ + +/* FIXME: this is a mess its confusing and badly documented + * - needs cleaning up atp jul 2001 */ +#ifndef _VAX_MM_PGTABLE_H +#define _VAX_MM_PGTABLE_H + +#ifndef __ASSEMBLY__ +#include +#include +#include +#endif /* !__ASSEMBLY__ */ + +#include + +/* FIXME: we should really use */ +#include + +/* the pagelet stuff */ +#include + +/* TASK address space sizing, for sizing SPT and so forth */ +#include + +/* + * See Documentation/vax/memory.txt + * for up to date memory layout + */ + +#define KERNEL_START_PHYS 0x00100000 +#define KERNEL_START_VIRT (KERNEL_START_PHYS + PAGE_OFFSET) +#define FIRST_USER_ADDRESS 0 + +/* + * How many extra entries in the system page table for I/O space + * and vmalloc mappings? The SPT_MAX_xxx macros define the max + * space (in kB) that we'll allocate for each purpose. The + * SPT_ENTRIES_xxx macros are then calculated accordingly. + * + * The IOMAP limit isn't necessarily a hard limit. Once we run out + * of IOMAP entries in the SPT, we could use get_free_page() to + * alloc a real page of RAM and hijack its SPTE. + */ + +#define SPT_MAX_IOMAP (16 * 1024) +#define SPT_MAX_VMALLOC 4096 +/* entries is (1024 * 1024) >> PAGELET_SIZE */ +#define SPT_HWPTES_IOMAP (SPT_MAX_IOMAP<<1) +#define SPT_PTES_IOMAP (SPT_MAX_IOMAP >> 2) + /*/>> (PAGE_SHIFT-10)) */ +/* FIXME: (PAGE_SHIFT-10) is hardwired here to 2. asm bug in head.S */ +#define SPT_HWPTES_VMALLOC (SPT_MAX_VMALLOC << 1) +#define SPT_PTES_VMALLOC (SPT_MAX_VMALLOC >> 2) + +#define SPT_BASE ((unsigned long)( (swapper_pg_dir[2]).br )) +/* SPT_LEN can be an lvalue, and is the length in longwords */ +#define SPT_LEN swapper_pg_dir[2].lr +/* SPT_SIZE is the size in BYTES */ +#define SPT_SIZE ((unsigned long)( (swapper_pg_dir[2]).lr ) << 2) + +/* + * Macros to get page table addresses + offsets. + * + * If they are 4K PTEs then set_pte needs to be used on the results, + */ + +/* macro to get linear page table entry for a physical address */ +#define GET_HWSPTE_PHYS(x) ((hwpte_t *) (SPT_BASE + ( ((x) >> PAGELET_SHIFT) << SIZEOF_PTR_LOG2) )) + +/* this is like it is for a reason - we need to wipe out the lower bits, the old + * calculation using page_shift-sizeof_pte_log2 gave the wrong answer sometimes */ +#define GET_SPTE_PHYS(x) ((pte_t *)(SPT_BASE + ( ((x) >> PAGE_SHIFT) << SIZEOF_PTE_LOG2))) + +/* macro to get linear page table entry for a virtual address + (only works for addresses in S0 space) */ +#define GET_HWSPTE_VIRT(x) GET_HWSPTE_PHYS(((unsigned long)x) - PAGE_OFFSET) +#define GET_SPTE_VIRT(x) GET_SPTE_PHYS(((unsigned long)x) - PAGE_OFFSET) +/* macro to get the virtual address represented by an SPTE, given the + address of the SPTE */ +#define SPTE_TO_VIRT(p) ((void *)((((unsigned long)p - (unsigned long)swapper_pg_dir[2].br) << (PAGE_SHIFT-SIZEOF_PTE_LOG2)) + PAGE_OFFSET)) + +#ifndef __ASSEMBLY__ +/* Other architectures put a virtual hole between the end of + mapped physical memory and lowest address that vmalloc() will + hand out. This isn't really practical on the VAX, since the + system page table must be contiguous, so virtual holes in S0 + space waste precious SPTEs. +*/ + +/* the previous definition of VMALLOC START relied on the + * VAX phy memory being an exact 4k multiple, + * my VAX has 7f1f hw-pages so isn't aligned on 4K + * workout the VMALLOC_START from the vmallocmap_base and the + * system base register.- + */ + +/* VMALLOC_OFFSET is the gap between the end of mapping of physical + * ram and the start of VMALLOC ?? */ +#define VMALLOC_OFFSET (SPT_MAX_IOMAP * 1024) +#define VMALLOC_START (PAGE_OFFSET+((vmallocmap_base-swapper_pg_dir[2].br)<<(PAGELET_SHIFT-2))) +#define VMALLOC_VMADDR(x) ((unsigned long)(x)) +#define VMALLOC_END (VMALLOC_START + (SPT_MAX_VMALLOC * 1024)) + +/* Start of task page table area - the variables this is based on + * are defined in asm-vax/mm/task.h */ + +/* address in S0 space of where the process page table area starts and ends.*/ +#define TASKPTE_START PTE_TASK_ALIGN(VMALLOC_END) +#define TASKPTE_END (TASKPTE_START+(PTE_TASK_SLOTSIZE * TASK_MAXUPRC)) +/* the number of hwptes to map this space */ +#define SPT_HWPTES_TASKPTE (((PTE_TASK_SLOTSIZE)>>PAGELET_SHIFT)*TASK_MAXUPRC) +#define SPT_PTES_TASKPTE (SPT_HWPTES_TASKPTE >> 3) + +/* find a slot in the pagetable area for pgd (x), x is 0->TASK_MAXUPRC-1 */ +#define GET_TASKSLOT(x) (TASKPTE_START+((x) * PTE_TASK_SLOTSIZE)) + + +/* page table for 0-4MB for everybody */ +/* This is a c reference to the start of the system page table + * (see arch/vax/boot/head.S). The spt is initialised to cover physical + * memory by early boot code, based on VMB supplied information. Further + * expansion happens later in the boot sequence in paging_init */ +extern pte_t *pg0; + +/* Number of SPTEs in system page table */ +extern unsigned int spt_size; + +/* external pointer to vmallocmap_base in head.S */ +extern unsigned long vmallocmap_base; + +/* zero page used for uninitialized stuff */ +/* extern unsigned long empty_zero_page[1024]; */ + +/* + * BAD_PAGETABLE is used when we need a bogus page-table, while + * BAD_PAGE is used for a bogus page. + */ +extern pte_t __bad_page(void); +extern pte_t * __bad_pagetable(void); + + +#define BAD_PAGETABLE __bad_pagetable() +#define BAD_PAGE __bad_page() + +/* + * * The VAX doesn't have any external MMU info: the kernel page + * * tables contain all the necessary information. + * */ +#define update_mmu_cache(vma,address,pte) do { } while (0) + + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[1024] __attribute__ ((__aligned__(PAGE_SIZE))); +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +/* Encode and de-code a swap entry */ +#define __swp_type(x) (((x).val >> 1) & 0x3f) +#define __swp_offset(x) ((x).val >> 8) +#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) +#define __pte_to_swp_entry(x) ((swp_entry_t) { pte_val(x) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +/* Memory sizing. You'll need to #include to get + * the declaration of boot_rpb. */ +#define max_hwpfn (boot_rpb.l_pfncnt) +extern unsigned long max_pfn; + +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* generic stuff */ +#include + +#endif /*! assembly */ + +/* Needs to be defined here and not in linux/mm.h, as it is arch dependent + * This is used on sparc processors to implement memory holes */ +#define PageSkip(page) 0 +#define kern_addr_valid(addr) 1 + +/* + * No page table caches to initialise or prune + */ +#define pgtable_cache_init() do { } while (0) +#define check_pgt_cache() do { } while (0) + +#define io_remap_page_range(vma, vaddr, paddr, size, prot) \ + remap_pfn_range (vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range (vma, vaddr, pfn, size, prot) + +#endif diff -Nru a/include/asm-vax/mm/pgtable_pagelet.h b/include/asm-vax/mm/pgtable_pagelet.h --- a/include/asm-vax/mm/pgtable_pagelet.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/pgtable_pagelet.h 2002-05-20 02:33:39 @@ -0,0 +1,28 @@ +#ifndef __VAX_MM_PGTABLELET_H +#define __VAX_MM_PGTABLELET_H + +/* This file contains the pagelet code that translates between + * the upper layers idea of the PAGE_SIZE (4k) and the Hardware's + * idea of PAGE_SIZE (512bytes). + * + * Copyright atp 1998-2001. Integrated with 2.4 tree Jan 2001 + * + * Meta file that includes all the bits we need. + */ + + +/* Page protection bits */ +#include + +#ifndef __ASSEMBLY__ +/* pgd definitions */ +#include + +/* pmd definitions */ +#include + +/* pte definitions */ +#include +#endif /* !assembly */ + +#endif /* __VAX_MM_PAGELET_H */ diff -Nru a/include/asm-vax/mm/task.h b/include/asm-vax/mm/task.h --- a/include/asm-vax/mm/task.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/task.h 2005-10-03 14:01:29 @@ -0,0 +1,92 @@ +#ifndef __VAX_MM_TASK_H +#define __VAX_MM_TASK_H +/* task.h - task memory map defines */ +/* atp July 2001. */ +/* These are all used to size the relevant structures in the system + * page table, in paging_init (arch/vax/mm/init.c) + * + * reminder: The vax memory map is not sparse. Every hole in the address + * space uses page table entries, and wastes memory. In addition + * Because the page tables need to be contiguous, in S0 virtual + * memory, we have to allocate contiguous system page table entries, + * which, in turn have to be contiguous in physical ram. So we fix + * at boot the amount of Virtual Address space that each task has + * available, and the maximum number of tasks that can be run. + * Keep these values as small as you can, or you will waste lots + * of memory on useless pagetables. Documentation/vax/memory.txt. + * + * Mar 2002. Update to 2.4.3 memory management. Thought of a better way + * of workig back to the pgd. Removed PGD_SPECIAL botch. + */ + +/* currently allocate 32mb of virtual memory */ +/* These defines cover the process memory map, and are in bytes */ + +/* Please remember to make them a multiple of PAGE_SIZE, or its going to + * get wierd here */ + +/* TASK_WSMAX is the max virtual address space in P0 */ +/* TASK_WSMAX must not be larger than 1Gb, it is the sum of the + * TXT section - which defines the largest program that can be run, + * and the MMAP section, which describes how much virtual address space + * that program has available to it + */ + +/* TASK_TXTMAX is the maximum program size */ +#define TASK_TXTMAX (6*1024*1024) + +/* TASK_MMAPMAX is the max space in P0 for the mmap() function , + contiguous with TASK_TXTMAX - Its basically the amount of memory you + give a process to play with */ +#define TASK_MMAPMAX (58*1024*1024) + +/* TASK_STKMAX is the max space for the stack in P1 */ +#define TASK_STKMAX (8*1024*1024) + +#define TASK_WSMAX (TASK_TXTMAX+TASK_MMAPMAX) + +/* TASK_MAXUPRC is the maximum number of user processes on the system + * Think of this like balsetcnt on VMS. + * -- this should also set/be set by the linux max task variable + */ +#define TASK_MAXUPRC (64) + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ + +#define TASK_UNMAPPED_BASE TASK_TXTMAX + +/* calculations based on the above for the SPT */ +/* NPTE_TASK = the number of HWPTE's needed to map a process */ +#define N_HWPTE_TASK_P0 ((TASK_WSMAX)>>PAGELET_SHIFT) +#define N_HWPTE_TASK_P1 ((TASK_STKMAX)>>PAGELET_SHIFT) +/* There are 4 4096 byte pages in the pmd. = 4x1024 hwptes. */ +#define N_HWPTE_TASK_PMD ((4*1024)) +#define N_HWPTE_TASK (N_HWPTE_TASK_P0+N_HWPTE_TASK_P1+N_HWPTE_TASK_PMD) + +/* The alignment we want - at present double page for pte_alloc/offset to work ok */ +#define PTE_TASK_MASK (~(8191)) +#define PTE_TASK_ALIGN(x) (((x)+8191)&PTE_TASK_MASK) + +/* size in bytes of an aligned task pte region */ +#define PTE_TASK_SLOTSIZE PTE_TASK_ALIGN(N_HWPTE_TASK<<2) + +/* The number of pagelets, or SPTEs needed to hold this number of HWPTEs */ +#define SPTE_MAX_TASKPTE ((N_HWPTE_TASK>>(PAGELET_SHIFT-2))+1) + +/* The offsets into page table area from the start of this slot, in bytes */ +#define P0PTE_OFFSET (N_HWPTE_TASK_PMD<<2) +/* this is the end of the slot, as p1 counts backwards */ +#define P1PTE_OFFSET ((N_HWPTE_TASK_P0+N_HWPTE_TASK_P1+N_HWPTE_TASK_PMD)<<2) +#define P0PMD_OFFSET (0) +#define P1PMD_OFFSET (PAGE_SIZE*2) + +/* + * User space process size: 2GB (default). + * This is a bit bogus - a linux thing. + */ +#define TASK_SIZE (PAGE_OFFSET) + +#endif /* __VAX_MM_TASK_H */ diff -Nru a/include/asm-vax/mm/virtmap.h b/include/asm-vax/mm/virtmap.h --- a/include/asm-vax/mm/virtmap.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mm/virtmap.h 2002-07-19 01:50:40 @@ -0,0 +1,44 @@ +#ifndef _VAX_ASM_VIRT_H +#define _VAX_ASM_VIRT_H + + +#define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x))) +#define __io_phys(x) ((unsigned long)(x) & ~PAGE_OFFSET) + +/* + * Change virtual addresses to physical addresses and vv. + * These are pretty trivial + */ +extern inline unsigned long virt_to_phys(volatile void * address) +{ + return __io_phys(address); +} + +extern inline void * phys_to_virt(unsigned long address) +{ + return __io_virt(address); +} + +extern void *ioremap(unsigned long offset, unsigned long size); + +/* + * This one maps high address device memory and turns off caching for that area. + * it's useful if some control registers are in such an area and write combining + * or read caching is not desirable. + * + * I don't think this needs to be treated differently on a VAX. IO space + * is handled slightly different wrt caching and accesses, and I think that + * will take care of everything for us. + */ +extern inline void * ioremap_nocache (unsigned long offset, unsigned long size) +{ + return ioremap(offset, size ); +} + +extern void iounmap(void *addr); + + +/* Needed for BIO layer. Convert a struct page * to a physical address */ +#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT) + +#endif /* _VAX_ASM_VIRT_H */ diff -Nru a/include/asm-vax/mman.h b/include/asm-vax/mman.h --- a/include/asm-vax/mman.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mman.h 2003-10-05 03:46:37 @@ -0,0 +1,43 @@ +#ifndef __VAX_MMAN_H__ +#define __VAX_MMAN_H__ + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x4 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_SHARED 0x01 /* Share changes */ +#define MAP_PRIVATE 0x02 /* Changes are private */ +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x10 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x20 /* don't use a file */ + +#define MAP_GROWSDOWN 0x00100 /* stack-like segment */ +#define MAP_DENYWRITE 0x00800 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */ +#define MAP_LOCKED 0x02000 /* pages are locked */ +#define MAP_NORESERVE 0x04000 /* don't check for reservations */ +#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x40000 /* do not block on IO */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_INVALIDATE 2 /* invalidate the caches */ +#define MS_SYNC 4 /* synchronous memory sync */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#define MADV_NORMAL 0x0 /* default page-in behavior */ +#define MADV_RANDOM 0x1 /* page-in minimum required */ +#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ +#define MADV_WILLNEED 0x3 /* pre-fault pages */ +#define MADV_DONTNEED 0x4 /* discard these pages */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#endif /* __VAX_MMAN_H__ */ diff -Nru a/include/asm-vax/mmu.h b/include/asm-vax/mmu.h --- a/include/asm-vax/mmu.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mmu.h 2004-06-02 22:05:42 @@ -0,0 +1,7 @@ +#ifndef _VAX_MMU_H +#define _VAX_MMU_H + +/* Default "unsigned long" context */ +typedef unsigned long mm_context_t; + +#endif /* _VAX_MMU_H */ diff -Nru a/include/asm-vax/mmu_context.h b/include/asm-vax/mmu_context.h --- a/include/asm-vax/mmu_context.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mmu_context.h 2002-12-02 00:54:43 @@ -0,0 +1,6 @@ +#ifndef __VAX_TMP_MMUCONTEXT_H +#define __VAX_TMP_MMUCONTEXT_H + +#include + +#endif /* __VAX_TMP_MMUCONTEXT_H */ diff -Nru a/include/asm-vax/module.h b/include/asm-vax/module.h --- a/include/asm-vax/module.h 1970-01-01 01:00:00 +++ b/include/asm-vax/module.h 2003-09-17 23:57:45 @@ -0,0 +1,22 @@ +#ifndef _ASM_VAX_MODULE_H +#define _ASM_VAX_MODULE_H +/* + * This file contains the vax architecture specific module code. + */ + +#define module_map(x) vmalloc(x) +#define module_unmap(x) vfree(x) +#define module_arch_init(x) (0) +#define arch_init_modules(x) do { } while (0) + +/* Maybe we need stuff here, but I don't think so. */ +struct mod_arch_specific +{ +}; + +/* VAX is a 32bit ELF target */ +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr + +#endif /* _ASM_VAX_MODULE_H */ diff -Nru a/include/asm-vax/msgbuf.h b/include/asm-vax/msgbuf.h --- a/include/asm-vax/msgbuf.h 1970-01-01 01:00:00 +++ b/include/asm-vax/msgbuf.h 2002-05-20 02:33:39 @@ -0,0 +1,31 @@ +#ifndef __VAX_MSGBUF_H +#define __VAX_MSGBUF_H + +/* + * The msqid64_ds structure for VAX architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + unsigned long __unused1; + __kernel_time_t msg_rtime; /* last msgrcv time */ + unsigned long __unused2; + __kernel_time_t msg_ctime; /* last change time */ + unsigned long __unused3; + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* _VAX_MSGBUF_H */ diff -Nru a/include/asm-vax/mtpr.h b/include/asm-vax/mtpr.h --- a/include/asm-vax/mtpr.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mtpr.h 2004-06-02 21:23:42 @@ -0,0 +1,142 @@ +#ifndef _MTPR_H_ +#define _MTPR_H_ + +/* + * VAX/Linux mtpr.h: VAX Processor Registers + * + * Thanks to NetBSD for the unknown registers + */ + +#define PR_KSP 0 /* Kernel Stack Pointer */ +#define PR_ESP 1 /* Executive Stack Pointer */ +#define PR_SSP 2 /* Supervisor Stack Pointer */ +#define PR_USP 3 /* User Stack Pointer */ +#define PR_ISP 4 /* Interrupt Stack Pointer */ + +#define PR_P0BR 8 /* P0 Base Register */ +#define PR_P0LR 9 /* P0 Length Register */ +#define PR_P1BR 10 /* P1 Base Register */ +#define PR_P1LR 11 /* P1 Length Register */ +#define PR_SBR 12 /* System Base Register */ +#define PR_SLR 13 /* System Limit Register */ +#define PR_PCBB 16 /* Process Control Block Base */ +#define PR_SCBB 17 /* System Control Block Base */ +#define PR_IPL 18 /* Interrupt Priority Level */ +#define PR_ASTLVL 19 /* AST Level */ +#define PR_SIRR 20 /* Software Interrupt Request */ +#define PR_SISR 21 /* Software Interrupt Summary */ +#define PR_IPIR 22 /* KA820 Interprocessor register */ +#define PR_MCSR 23 /* Machine Check Status Register 11/750 */ +#define PR_ICCS 24 /* Interval Clock Control */ +#define PR_NICR 25 /* Next Interval Count */ +#define PR_ICR 26 /* Interval Count */ +#define PR_TODR 27 /* Time Of Year (optional) */ +#define PR_CSRS 28 /* Console Storage R/S */ +#define PR_CSRD 29 /* Console Storage R/D */ +#define PR_CSTS 30 /* Console Storage T/S */ +#define PR_CSTD 31 /* Console Storage T/D */ +#define PR_RXCS 32 /* Console Receiver C/S */ +#define PR_RXDB 33 /* Console Receiver D/B */ +#define PR_TXCS 34 /* Console Transmit C/S */ +#define PR_TXDB 35 /* Console Transmit D/B */ +#define PR_TBDR 36 /* Translation Buffer Group Disable Register 11/750 */ +#define PR_CADR 37 /* Cache Disable Register 11/750 */ +#define PR_MCESR 38 /* Machine Check Error Summary Register 11/750 */ +#define PR_CAER 39 /* Cache Error Register 11/750 */ +#define PR_ACCS 40 /* Accelerator control register */ +#define PR_SAVISP 41 /* Console Saved ISP */ +#define PR_SAVPC 42 /* Console Saved PC */ +#define PR_SAVPSL 43 /* Console Saved PSL */ +#define PR_WCSA 44 /* WCS Address */ +#define PR_WCSB 45 /* WCS Data */ +#define PR_SBIFS 48 /* SBI Fault/Status */ +#define PR_SBIS 49 /* SBI Silo */ +#define PR_SBISC 50 /* SBI Silo Comparator */ +#define PR_SBIMT 51 /* SBI Silo Maintenance */ +#define PR_SBIER 52 /* SBI Error Register */ +#define PR_SBITA 53 /* SBI Timeout Address Register */ +#define PR_SBIQC 54 /* SBI Quadword Clear */ +#define PR_IUR 55 /* Initialize Unibus Register 11/750 */ +#define PR_MAPEN 56 /* Memory Management Enable */ +#define PR_TBIA 57 /* Trans. Buf. Invalidate All */ +#define PR_TBIS 58 /* Trans. Buf. Invalidate Single */ +#define PR_TBDATA 59 /* Translation Buffer Data */ +#define PR_MBRK 60 /* Microprogram Break */ +#define PR_PMR 61 /* Performance Monitor Enable */ +#define PR_SID 62 /* System ID Register */ +#define PR_TBCHK 63 /* Translation Buffer Check */ + +#define PR_PAMACC 64 /* Physical Address Memory Map Access (KA86) */ +#define PR_PAMLOC 65 /* Physical Address Memory Map Location (KA86) */ +#define PR_CSWP 66 /* Cache Sweep (KA86) */ +#define PR_MDECC 67 /* MBOX Data Ecc Register (KA86) */ +#define PR_MENA 68 /* MBOX Error Enable Register (KA86) */ +#define PR_MDCTL 69 /* MBOX Data Control Register (KA86) */ +#define PR_MCCTL 70 /* MBOX Mcc Control Register (KA86) */ +#define PR_MERG 71 /* MBOX Error Generator Register (KA86) */ +#define PR_CRBT 72 /* Console Reboot (KA86) */ +#define PR_DFI 73 /* Diagnostic Fault Insertion Register (KA86) */ +#define PR_EHSR 74 /* Error Handling Status Register (KA86) */ +#define PR_STXCS 76 /* Console Storage C/S (KA86) */ +#define PR_STXDB 77 /* Console Storage D/B (KA86) */ +#define PR_ESPA 78 /* EBOX Scratchpad Address (KA86) */ +#define PR_ESPD 79 /* EBOX Scratchpad Data (KA86) */ + +#define PR_RXCS1 80 /* Serial-Line Unit 1 Receive CSR (KA820) */ +#define PR_RXDB1 81 /* Serial-Line Unit 1 Receive Data Buffer (KA820) */ +#define PR_TXCS1 82 /* Serial-Line Unit 1 Transmit CSR (KA820) */ +#define PR_TXDB1 83 /* Serial-Line Unit 1 Transmit Data Buffer (KA820) */ +#define PR_RXCS2 84 /* Serial-Line Unit 2 Receive CSR (KA820) */ +#define PR_RXDB2 85 /* Serial-Line Unit 2 Receive Data Buffer (KA820) */ +#define PR_TXCS2 86 /* Serial-Line Unit 2 Transmit CSR (KA820) */ +#define PR_TXDB2 87 /* Serial-Line Unit 2 Transmit Data Buffer (KA820) */ +#define PR_RXCS3 88 /* Serial-Line Unit 3 Receive CSR (KA820) */ +#define PR_RXDB3 89 /* Serial-Line Unit 3 Receive Data Buffer (KA820) */ +#define PR_TXCS3 90 /* Serial-Line Unit 3 Transmit CSR (KA820) */ +#define PR_TXDB3 91 /* Serial-Line Unit 3 Transmit Data Buffer (KA820) */ +#define PR_RXCD 92 /* Receive Console Data from another cpu (KA820) */ +#define PR_CACHEX 93 /* Cache invalidate Register (KA820) */ +#define PR_BINID 94 /* VAXBI node ID Register (KA820) */ +#define PR_BISTOP 95 /* VAXBI Stop Register (KA820) */ + +#define PR_VINTSR 123 /* vector i/f error status (KA43/KA46) */ +#define PR_PCTAG 124 /* primary cache tag store (KA43/KA46) */ +#define PR_PCIDX 125 /* primary cache index (KA43/KA46) */ +#define PR_PCERR 126 /* primary cache error address (KA43/KA46) */ +#define PR_PCSTS 127 /* primary cache status (KA43/KA46) */ + +/* Definitions for AST */ +#define AST_NO 4 +#define AST_OK 3 + +/* Bits within PR_TXCS */ +#define PR_TXCS_READY 0x80 +#define PR_TXCS_INTEN 0x40 + +/* Bits within PR_RXCS */ +#define PR_RXCS_READY 0x80 +#define PR_RXCS_INTEN 0x40 + + +#define Xmtpr(val,reg) \ +{ \ + asm __volatile ( \ + "mtpr %0,%1" \ + : /* No output */ \ + : "g"(val), "g"(reg)); \ +} + +#define Xmfpr(reg) \ +({ \ + register int val; \ + asm __volatile ( \ + "mfpr %1,%0" \ + : "=g"(val) \ + : "g" (reg)); \ + val; \ +}) + +#define __mtpr(a,b) Xmtpr((a),(b)) +#define __mfpr(a) Xmfpr((a)) + +#endif /* _MTPR_H_ */ diff -Nru a/include/asm-vax/mv.h b/include/asm-vax/mv.h --- a/include/asm-vax/mv.h 1970-01-01 01:00:00 +++ b/include/asm-vax/mv.h 2005-10-21 09:22:38 @@ -0,0 +1,209 @@ +#ifndef _VAX_MV_H +#define _VAX_MV_H + +/* + * Define format of machine vector. This structure abstracts out + * the CPU-specific operations. All vectors are CALLS linkage + * unless otherwise specified. + * + * Fields marked as mandatory must be implemented (cannot be + * NULL). The kernel will not check if mandatory fields are NULL + * before dispatching through them. + * + * Optional fields can be NULL. A NULL optional field will either + * be a no-op, or will lead to sensible default behaviour. + * + * If you add or remove elements, please don't forget to modify + * ./arch/vax/kernel/asm-offsets.c! + */ + +#ifndef __ASSEMBLY__ +struct vax_mv { + void (*pre_vm_init)(void); /* Called after kernel relocation */ + void (*post_vm_init)(void); /* Called at start of setup_arch() */ + + void (*pre_vm_putchar)(int); + int (*pre_vm_getchar)(void); + + void (*post_vm_putchar)(int); + int (*post_vm_getchar)(void); + + void (*console_init)(void); /* If implemented, this is called from + drivers/char/tty_io.c, console_init() */ + + void (*reboot)(void); /* If implemented, these are called from */ + void (*halt)(void); /* machine_restart/_halt */ + + void (*mcheck)(void *); /* Machine check handler */ + + void (*init_devices)(void); /* Optional */ + + const char * (*cpu_type_str)(void); + + void (*clock_init)(void); /* Called by time_init() to set up RTC */ + unsigned int *clock_base; /* Address of clock page for vsbus RTC */ + + unsigned int sidex; /* If not available, it's set to 0 */ + + /* Flags for very CPU-specific features */ + unsigned long nicr_required:1; /* A value needs to be written to + PR_NICR to get overrun (aka timer) + interrupts */ + unsigned long keep_early_console:1; /* Don't disable early console as + requested, because there's not + yet a real console driver */ +}; +#endif /* !__ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ +extern struct vax_mv *mv; /* Points to the applicable MV */ + +extern struct vax_mv mv_ka41; /* All others may be available, or not */ +extern struct vax_mv mv_ka42; +extern struct vax_mv mv_ka43; +extern struct vax_mv mv_ka46; +extern struct vax_mv mv_ka48; +extern struct vax_mv mv_ka49; +extern struct vax_mv mv_ka52; +extern struct vax_mv mv_ka55; +extern struct vax_mv mv_ka410; +extern struct vax_mv mv_ka630; +extern struct vax_mv mv_ka640; +extern struct vax_mv mv_ka650; +extern struct vax_mv mv_ka660; +extern struct vax_mv mv_vxt; + +/* + * This defines a match for a CPU and a macro to place + * such a match structure into the right section at link time + */ +struct cpu_match { + struct vax_mv *mv; + unsigned long sid_mask; + unsigned long sid_match; + unsigned long sidex_addr; + unsigned long sidex_mask; + unsigned long sidex_match; +}; + +#define __CPU_MATCH __attribute_used__ __attribute__((__section__(".init.cpumatch"))) + + +/* + * These functions are implemented in arch/vax/kernel/cpu_generic.c. + * They apply to lots of CPUs. + */ +extern void mtpr_putchar(int c); +extern int mtpr_getchar(void); + +extern void ka46_48_49_prom_putchar(int c); +extern int ka46_48_49_prom_getchar(void); + +extern void ka4x_prom_putchar(int c); +extern int ka4x_prom_getchar(void); + +extern void vxt2694_putchar (int c); +extern int vxt2694_getchar (void); + +/* Helper functions to get serial console working. */ +extern void dz11_putchar(int c); +extern int dz11_getchar(void); +extern void init_dz11_console(unsigned long dz11_phys_addr, unsigned int line); +extern void dz_serial_console_init(void); +extern void init_vxt2694_console (unsigned long phys_addr); + +/* + * FIXME: These should be removed eventually after there's no more code + * which needs to know exact CPU type. + */ +static inline int is_ka41(void) { +#ifdef CONFIG_CPU_KA41 + return mv == &mv_ka41; +#else + return 0; +#endif +} + +static inline int is_ka42(void) { +#ifdef CONFIG_CPU_KA42 + return mv == &mv_ka42; +#else + return 0; +#endif +} + +static inline int is_ka43(void) { +#ifdef CONFIG_CPU_KA43 + return mv == &mv_ka43; +#else + return 0; +#endif +} + +static inline int is_ka46(void) { +#ifdef CONFIG_CPU_KA46 + return mv == &mv_ka46; +#else + return 0; +#endif +} + +static inline int is_ka52(void) { +#ifdef CONFIG_CPU_KA52 + return mv == &mv_ka52; +#else + return 0; +#endif +} + +static inline int is_ka55(void) { +#ifdef CONFIG_CPU_KA55 + return mv == &mv_ka55; +#else + return 0; +#endif +} + +static inline int is_ka48(void) { +#ifdef CONFIG_CPU_KA48 + return mv == &mv_ka48; +#else + return 0; +#endif +} + +static inline int is_ka49(void) { +#ifdef CONFIG_CPU_KA49 + return mv == &mv_ka49; +#else + return 0; +#endif +} + +static inline int is_ka650(void) { +#ifdef CONFIG_CPU_KA650 + return mv == &mv_ka650; +#else + return 0; +#endif +} + +static inline int is_ka660(void) { +#ifdef CONFIG_CPU_KA660 + return mv == &mv_ka660; +#else + return 0; +#endif +} + +static inline int is_vxt(void) { +#ifdef CONFIG_CPU_VXT + return mv == &mv_vxt; +#else + return 0; +#endif +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _VAX_MV_H */ diff -Nru a/include/asm-vax/namei.h b/include/asm-vax/namei.h --- a/include/asm-vax/namei.h 1970-01-01 01:00:00 +++ b/include/asm-vax/namei.h 2004-06-02 22:07:42 @@ -0,0 +1,19 @@ +#ifndef _VAX_NAMEI_H +#define _VAX_NAMEI_H + +/* + * $Id: namei.h,v 1.5 2004/06/02 20:07:42 jbglaw Exp $ + * linux/include/asm-alpha/namei.h + * + * Included from linux/fs/namei.c + * atp jan 2000 - updated for 2.4 + */ + +/* This dummy routine maybe changed to something useful + * for /usr/gnemul/ emulation stuff. + * Look at asm-sparc/namei.h for details. + */ + +#define __emul_prefix() NULL + +#endif /* _VAX_NAMEI_H */ diff -Nru a/include/asm-vax/page.h b/include/asm-vax/page.h --- a/include/asm-vax/page.h 1970-01-01 01:00:00 +++ b/include/asm-vax/page.h 2004-06-02 22:54:42 @@ -0,0 +1,6 @@ +#ifndef _VAX_PAGE_H +#define _VAX_PAGE_H + +#include + +#endif /* _VAX_PAGE_H */ diff -Nru a/include/asm-vax/param.h b/include/asm-vax/param.h --- a/include/asm-vax/param.h 1970-01-01 01:00:00 +++ b/include/asm-vax/param.h 2004-09-30 22:17:44 @@ -0,0 +1,24 @@ +#ifndef _VAX_PARAM_H +#define _VAX_PARAM_H + +#ifdef __KERNEL__ +#define HZ 100 /* Actually, some VAX CPUs are hardwired + to trigger interrupts at 10ms, so + HZ==100 is mandatory */ +#define USER_HZ 100 +#define CLOCKS_PER_SEC 100 /* frequency at which times() counts */ +#endif + +#define EXEC_PAGESIZE 4096 + +#ifndef NGROUPS +#define NGROUPS 32 +#endif + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif /* _VAX_PARAM_H */ diff -Nru a/include/asm-vax/pcb.h b/include/asm-vax/pcb.h --- a/include/asm-vax/pcb.h 1970-01-01 01:00:00 +++ b/include/asm-vax/pcb.h 2004-06-02 23:02:37 @@ -0,0 +1,55 @@ +#ifndef _VAX_PCB_H +#define _VAX_PCB_H + +#include + +/* + * Process Control Block structure + * + * Reference: VAX Architecture Reference Manual, pg 260 + * Copyright atp 1998. For Linux/Vax. + */ + +struct vax_pcb { + unsigned long int ksp; + unsigned long int esp; /* null? */ + unsigned long int ssp; /* null? */ + unsigned long int usp; + unsigned long int r0; + unsigned long int r1; + unsigned long int r2; + unsigned long int r3; + unsigned long int r4; + unsigned long int r5; + unsigned long int r6; + unsigned long int r7; + unsigned long int r8; + unsigned long int r9; + unsigned long int r10; + unsigned long int r11; + unsigned long int ap; + unsigned long int fp; + unsigned long int pc; + struct psl_fields psl; + unsigned long int p0br; + /* + * The length registers hold the number of HWPTES, i.e. 8 * + * shat you would expect from the values in the PGD, since + * a HW page is 512 bytes (while we present assembled 4K pages + * to Linux' MM layer). + */ + unsigned long int p0lr:22; + unsigned long int mbz1:2; /* reserved, MBZ */ + unsigned long int astlvl:3; + unsigned long int mbz2:5; /* reserved, MBZ */ + unsigned long int p1br; + unsigned long int p1lr; + unsigned long int mbz3:7; /* reserved, MBZ */ + unsigned long int pme:1; +}; + +/* Process control block initialiser used in INIT_THREAD */ + +#define INIT_PCB { 0 } /* fill complete structure with zeroes */ + +#endif /* _VAX_PCB_H */ diff -Nru a/include/asm-vax/pci.h b/include/asm-vax/pci.h --- a/include/asm-vax/pci.h 1970-01-01 01:00:00 +++ b/include/asm-vax/pci.h 2004-08-07 02:06:47 @@ -0,0 +1,17 @@ +#ifndef _ASM_VAX_PCI_H +#define _ASM_VAX_PCI_H + +/* + * VAX asm/pci.h file. + * + * We dont have PCI busses on vaxes, but since someone added in an + * #include to drivers/scsi/hosts.h at or around 2.4.3, + * without encasing it in a #ifdef CONFIG_PCI we need this stub file + * to prevent the compilation dying as we compile the scsi routines. + * + * atp Mar 2002 + */ + +#define PCI_DMA_BUS_IS_PHYS (0) + +#endif /* _ASM_VAX_PCI_H */ diff -Nru a/include/asm-vax/percpu.h b/include/asm-vax/percpu.h --- a/include/asm-vax/percpu.h 1970-01-01 01:00:00 +++ b/include/asm-vax/percpu.h 2004-06-02 23:04:47 @@ -0,0 +1,6 @@ +#ifndef _VAX_PERCPU_H +#define _VAX_PERCPU_H + +#include + +#endif /* _VAX_PERCPU_H */ diff -Nru a/include/asm-vax/pgalloc.h b/include/asm-vax/pgalloc.h --- a/include/asm-vax/pgalloc.h 1970-01-01 01:00:00 +++ b/include/asm-vax/pgalloc.h 2002-05-20 02:33:39 @@ -0,0 +1,6 @@ +#ifndef __ASM_VAX_PGALLOC_H +#define __ASM_VAX_PGALLOC_H + +#include + +#endif /* __ASM_VAX_PGALLOC_H */ diff -Nru a/include/asm-vax/pgtable.h b/include/asm-vax/pgtable.h --- a/include/asm-vax/pgtable.h 1970-01-01 01:00:00 +++ b/include/asm-vax/pgtable.h 2004-06-02 23:05:23 @@ -0,0 +1,6 @@ +#ifndef _VAX_PGTABLE_H +#define _VAX_PGTABLE_H + +#include + +#endif /* _VAX_PGTABLE_H */ diff -Nru a/include/asm-vax/poll.h b/include/asm-vax/poll.h --- a/include/asm-vax/poll.h 1970-01-01 01:00:00 +++ b/include/asm-vax/poll.h 2004-05-15 22:12:58 @@ -0,0 +1,25 @@ +#ifndef __VAX_POLL_H +#define __VAX_POLL_H + +/* from alpha */ + +#define POLLIN 1 +#define POLLPRI 2 +#define POLLOUT 4 +#define POLLERR 8 +#define POLLHUP 16 +#define POLLNVAL 32 +#define POLLRDNORM 64 +#define POLLRDBAND 128 +#define POLLWRNORM 256 +#define POLLWRBAND 512 +#define POLLMSG 1024 +#define POLLREMOVE 2048 + +struct pollfd { + int fd; + short events; + short revents; +}; + +#endif /* __VAX_POLL_H */ diff -Nru a/include/asm-vax/posix_types.h b/include/asm-vax/posix_types.h --- a/include/asm-vax/posix_types.h 1970-01-01 01:00:00 +++ b/include/asm-vax/posix_types.h 2005-03-22 10:18:07 @@ -0,0 +1,69 @@ +#ifndef _VAX_POSIX_TYPES_H +#define _VAX_POSIX_TYPES_H + +/* Source: ARM port */ +/* #include */ + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned short __kernel_mode_t; +typedef unsigned short __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef unsigned short __kernel_ipc_pid_t; +typedef unsigned short __kernel_uid_t; +typedef unsigned short __kernel_gid_t; +typedef unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +typedef unsigned short __kernel_old_dev_t; + +typedef int __kernel_clockid_t; +typedef int __kernel_timer_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { +#if defined(__KERNEL__) || defined(__USE_ALL) + int val[2]; +#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ + int __val[2]; +#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ +} __kernel_fsid_t; + +#undef __FD_SET +#define __FD_SET(fd, fdsetp) \ + (((fd_set *)fdsetp)->fds_bits[(fd) >> 5] |= (1<<((fd) & 31))) + +#undef __FD_CLR +#define __FD_CLR(fd, fdsetp) \ + (((fd_set *)fdsetp)->fds_bits[(fd) >> 5] &= ~(1<<((fd) & 31))) + +#undef __FD_ISSET +#define __FD_ISSET(fd, fdsetp) \ + ((((fd_set *)fdsetp)->fds_bits[(fd) >> 5] & (1<<((fd) & 31))) != 0) + +#undef __FD_ZERO +#define __FD_ZERO(fdsetp) \ + (memset (fdsetp, 0, sizeof (*(fd_set *)fdsetp))) + +#endif diff -Nru a/include/asm-vax/processor.h b/include/asm-vax/processor.h --- a/include/asm-vax/processor.h 1970-01-01 01:00:00 +++ b/include/asm-vax/processor.h 2004-10-02 13:40:49 @@ -0,0 +1,114 @@ +#ifndef _VAX_PROCESSOR_H +#define _VAX_PROCESSOR_H + +/* Copyright atp 1998-2001. + * kenn 1999-2001. + * + * processor.h for vax architecture. + * + * Aug 2001. atp fix size of task structure allocation + remove fixme comment + */ + + +#include /* process control block definition */ +#include /* task memory space defines */ + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) + + +struct vaxcpu { + unsigned int sid; + int cpuid; + int mm_enabled; /* have we enabled paging yet? */ + unsigned int *pfnmap; /* address of pfn bitmap */ + unsigned int p_memsize; /* size in bytes of physical memory */ + unsigned int n_pfn; + unsigned int *scb; /* address of the scb */ + unsigned int *rpb; /* address of the rpb */ +}; + +extern struct vaxcpu vax_cpu; + +/* from alpha port */ + +typedef struct { + unsigned long seg; +} mm_segment_t; + +/* From alpha port: FIXME: this needs sorting. */ +/* I doubt we need flags ?(alpha specific) or fs */ +/* atp Jan 2000 - I'm going to restrict this to + * just the pcb at present. As we need + * more per-thread data, we can add it. + */ +struct thread_struct { + struct vax_pcb pcb; +}; + +/* + * This is the initializer for thread_struct (as declared above). At + * present only the pcb is here. + */ +#define INIT_THREAD { .pcb = INIT_PCB, } + +// #include + +struct task_struct; +struct pt_regs; + +/* + * Do necessary setup to start up a newly executed thread. + */ +extern void start_thread(struct pt_regs *, unsigned long, unsigned long); + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *p) +{ + /* nothing to do on VAX */ +} + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + + +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +#define copy_segments(tsk, mm) do { } while (0) +#define release_segments(mm) do { } while (0) +#define forget_segments() do { } while (0) + +/* + * Free current thread data structures etc.. + */ +static inline void exit_thread(void) +{ +} + +/* + * Return saved PC of a blocked thread. + * This needs to be a #define rather than an inline, since we + * don't have the full definition of task_struct at this point + * in the compilation (linux/sched.h includes this file before + * defining task_struct) + */ +#define thread_saved_pc(tsk) tsk->thread.pcb.pc + +unsigned long get_wchan(struct task_struct *p); + +/* temporary - FIXME: */ + +extern inline unsigned long get_wchan(struct task_struct *p) +{ + return 0xdeadbeef; /* XXX */ +} + +#define KSTK_EIP(tsk) (0xdeadbeef) +#define KSTK_ESP(tsk) (0xdeadbeef) + +#define cpu_relax() do { } while (0) + +#endif /* _VAX_PROCESSOR_H */ diff -Nru a/include/asm-vax/psl.h b/include/asm-vax/psl.h --- a/include/asm-vax/psl.h 1970-01-01 01:00:00 +++ b/include/asm-vax/psl.h 2004-06-02 23:15:35 @@ -0,0 +1,72 @@ +#ifndef _PSL_H_ +#define _PSL_H_ + +/* + * Define format of PSW (processor status word) and PSL + * (processor status longword). The PSW is the lower + * 16 bits of the PSL and is the only bit that can be modified + * by non-kernel-mode code + */ + +struct psw_fields { + unsigned c:1; /* carry */ + unsigned v:1; /* overflow */ + unsigned z:1; /* zero */ + unsigned n:1; /* negative */ + unsigned t:1; /* trace enable */ + unsigned iv:1; /* integer overflow trap enable */ + unsigned fu:1; /* floating underflow trap enable */ + unsigned dv:1; /* decimal overflow trap enable */ + + unsigned mbz:8; /* must be zero */ +}; + +struct psl_fields { + unsigned c:1; /* carry */ + unsigned v:1; /* overflow */ + unsigned z:1; /* zero */ + unsigned n:1; /* negative */ + unsigned t:1; /* trace enable */ + unsigned iv:1; /* integer overflow trap enable */ + unsigned fu:1; /* floating underflow trap enable */ + unsigned dv:1; /* decimal overflow trap enable */ + + unsigned mbz1:8; /* must be zero */ + + unsigned ipl:5; /* current IPL */ + + unsigned mbz2:1; /* must be zero */ + + unsigned prevmode:2; /* previous access mode */ + + unsigned accmode:2; /* current access mode */ + + unsigned is:1; /* interrupt stack active */ + unsigned fpd:1; /* first part done */ + + unsigned mbz3:2; /* must be zero */ + + unsigned tp:1; /* trace pending */ + unsigned cm:1; /* compatibility mode */ +}; + +#define PSL_MBZ_MASK 0x3020ff00 /* these bits reserved */ + +/* Values for accmode and prevmode fields above */ +#define PSL_MODE_USER 3 +#define PSL_MODE_SUPER 2 +#define PSL_MODE_EXEC 1 +#define PSL_MODE_KERNEL 0 + +#define RAW_PSL(psl_struct) (*(unsigned int *)&(psl_struct)) + +#define __psl \ +({ \ + struct psl_fields p; \ + __asm__ volatile ( \ + "movpsl %0" \ + : "=g"(p)); \ + p; \ +}) + +#endif /* _PSL_H_ */ diff -Nru a/include/asm-vax/ptrace.h b/include/asm-vax/ptrace.h --- a/include/asm-vax/ptrace.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ptrace.h 2004-11-18 10:30:31 @@ -0,0 +1,106 @@ +#ifndef _VAX_PTRACE_H +#define _VAX_PTRACE_H + +/* ptrace.h linux vax porting team 1998-2001 */ + +#include + +#define PT_R0 0 +#define PT_R1 1 +#define PT_R2 2 +#define PT_R3 3 +#define PT_R4 4 +#define PT_R5 5 +#define PT_R6 6 +#define PT_R7 7 +#define PT_R8 8 +#define PT_R9 9 +#define PT_R10 10 +#define PT_R11 11 +#define PT_AP 12 +#define PT_FP 13 +#define PT_SP 14 +#define PT_PC 15 +#define PT_PSL 16 +#define PT_USP 20 + + +/* + * This struct defines the way the registers are stored on the + * stack during a system call. + */ +struct pt_regs { + unsigned long int r0; + unsigned long int r1; + unsigned long int r2; + unsigned long int r3; + unsigned long int r4; + unsigned long int r5; + unsigned long int r6; + unsigned long int r7; + unsigned long int r8; + unsigned long int r9; + unsigned long int r10; + unsigned long int r11; + unsigned long int ap; + unsigned long int fp; + unsigned long int sp; + unsigned long int pc; + struct psl_fields psl; +}; + +struct user_regs_struct { + unsigned long int r0; + unsigned long int r1; + unsigned long int r2; + unsigned long int r3; + unsigned long int r4; + unsigned long int r5; + unsigned long int r6; + unsigned long int r7; + unsigned long int r8; + unsigned long int r9; + unsigned long int r10; + unsigned long int r11; + unsigned long int ap; + unsigned long int fp; + unsigned long int sp; + unsigned long int pc; + struct psl_fields psl; +}; +/* + * OK, this may need a FIXME: later. user_regs_struct is the + * regs structure expected by userspace. see comment in asm-i386/user.h + */ + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETFPREGS 14 +#define PTRACE_SETFPREGS 15 + +#ifdef __KERNEL__ + +#define user_mode(regs) ((regs)->psl.accmode == PSL_MODE_USER) + +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) + +extern void hex_dump(void *, unsigned int); +extern void show_regs(struct pt_regs *); +extern void show_cpu_regs(void); +extern void dump_cur_regs(unsigned int frames); +extern void vax_dump_stack(unsigned int frames); + +#define DUMP_STACK_CURRENT 0 +#define DUMP_STACK_CALLER 1 + +#define DUMP_REGS_CURRENT 0 +#define DUMP_REGS_CALLER 1 + +/* extra stuff for binfmt_aout */ +#define START_DATA(u) (u.start_data) + +#endif /* __KERNEL__ */ + +#endif /* _VAX_PTRACE_H */ diff -Nru a/include/asm-vax/resource.h b/include/asm-vax/resource.h --- a/include/asm-vax/resource.h 1970-01-01 01:00:00 +++ b/include/asm-vax/resource.h 2005-10-03 14:02:20 @@ -0,0 +1,7 @@ +#ifndef _ASM_VAX_RESOURCE_H +#define _ASM_VAX_RESOURCE_H + +#include +#include + +#endif /* _ASM_VAX_RESOURCE_H */ diff -Nru a/include/asm-vax/rpb.h b/include/asm-vax/rpb.h --- a/include/asm-vax/rpb.h 1970-01-01 01:00:00 +++ b/include/asm-vax/rpb.h 2004-03-04 01:28:45 @@ -0,0 +1,79 @@ +#ifndef _VAX_RPB_H +#define _VAX_RPB_H + +/* atp Sep. 1998 VAX arch. Restart Parameter Block */ +/* This work is copyright atp 1998 and is licenced under the + * GNU GPL version 2. + * + * History: + * 1.0 (atp) Cribbed out of VMS internals + Data structures Table 30.22 + * Stripped RPB$ off struct names. + */ + +#include + +typedef uint8_t byte; +typedef uint16_t word; +typedef uint32_t longword; +typedef uint64_t quadword; + +struct rpb_struct { + longword l_base; /* physical base addr (VMB) */ + longword l_restart; /* Phys addr of EXE$RESTART */ + longword l_chksum; /* Checksum of first 31 longwords of exe$restart */ + longword l_rststflg; /* restart in progress flag (console) */ + longword l_haltpc; /* PC at halt (vmb) */ + longword l_haltpsl; /* PSL at halt (vmb) */ + longword l_haltcode; /* reason for the halt/restart (vmb) */ + longword l_bootr0; /* saved bootstrap parameters */ + longword l_bootr1; /* saved bootstrap parameters */ + longword l_bootr2; /* saved bootstrap parameters */ + longword l_bootr3; /* saved bootstrap parameters */ + longword l_bootr4; /* saved bootstrap parameters */ + longword l_bootr5; /* saved bootstrap parameters */ + longword l_iovec; /* address of bootstrap driver */ + longword l_iovecsz; /* size in bytes of bootstrap driver */ + longword l_fillbn; /* Logical Block no of secondary bootstrap file (us?) */ + longword l_filsiz; /* size in BLOCKS of secondary bootstrap file */ + quadword q_pfnmap; /* descriptor of PFN bitmap */ + longword l_pfncnt; /* count of physical pages */ + longword l_svaspt; /* system virtual address of system pg table */ + longword l_csrphy; /* physical addr of UBA device CSR */ + longword l_csrvir; /* virtual addr of UBA CSR */ + longword l_adpphy; /* phys addr of adapter config register */ + longword l_adpvir; /* virtual addr of adapter config register */ + word w_unit; /* bootstrap device unit number */ + byte b_devtyp; /* bootstrap device type code */ + byte b_slave; /* bootstrap device slave unit num */ + char t_file[40]; /* secondary bootstrap file name */ + byte b_confreg[16]; /* byte array of adapter types (11/78x,11/750 only)*/ +#if 0 + byte b_hdrpgcnt; /* count of header pages in secondary boot image */ + word w_bootndt; /* type of boot adapter */ + byte b_flags; /* misc flag bits */ +#else + /* Compiler doesn't align these fields correctly */ + byte b_hdrpgcnt; /* count of header pages in secondary boot image */ + byte b_bootndt_lo; /* type of boot adapter (low byte) */ + byte b_bootndt_hi; /* type of boot adapter (high byte) */ + byte b_flags; /* misc flag bits */ +#endif + longword l_max_pfn; /* maximum PFN */ + longword l_sptep; /* system space PTE prototype register */ + longword l_sbr; /* system base register */ + longword l_cpudbvec; /* phys addr of per CPU db vector or primary's percpu database */ + longword l_cca_addr; /* physical address of cca */ + longword l_slr; /* saved system length register */ + longword l_memdsc[16]; /* array of memory descriptors (bugcheck) */ + longword l_smp_pc; /* smp boot page physical address */ + byte b_wait[4]; /* loop code for attached processor */ + longword l_badpgs; /* number of bad pages found on memory scan */ + byte b_ctrlltr; /* controller letter designation */ + byte b_scbpagct; /* count of SCB pages */ + byte b_reserved[6]; /* reserved */ + longword l_vmb_version; /* vmb version number (uvax VMB's) */ +}; + +extern struct rpb_struct boot_rpb; + +#endif /* _VAX_RPB_H */ diff -Nru a/include/asm-vax/scatterlist.h b/include/asm-vax/scatterlist.h --- a/include/asm-vax/scatterlist.h 1970-01-01 01:00:00 +++ b/include/asm-vax/scatterlist.h 2004-06-03 09:13:58 @@ -0,0 +1,16 @@ +#ifndef _VAX_SCATTERLIST_H +#define _VAX_SCATTERLIST_H + +/* FIXME: ?? atp mar 2002 */ + +struct scatterlist { + char *address; /* Location data is to be transferred to */ + struct page *page; /* Location for highmem page, if any */ + unsigned int offset; /* for highmem, page offset */ + dma_addr_t dma_address; + unsigned int length; +}; + +#define ISA_DMA_THRESHOLD (~0UL) + +#endif /* _VAX_SCATTERLIST_H */ diff -Nru a/include/asm-vax/scb.h b/include/asm-vax/scb.h --- a/include/asm-vax/scb.h 1970-01-01 01:00:00 +++ b/include/asm-vax/scb.h 2004-06-03 19:51:12 @@ -0,0 +1,117 @@ +#ifndef _VAX_SCB_H +#define _VAX_SCB_H + +#include + +/* system control block definition pg. 243-244 VAX Architecture HB */ +/* This is the first page of the scb only. Each machine type has its + * own different subsequent pages. */ +/* atp Dec 1998 */ + +struct vax_scb { + void *passrel; /* 00 passive release */ + void *mcheck; /* 01 machine check */ + void *ksp_inval; /* 02 kernel stack invalid */ + void *pwrfail; /* 03 power failure */ + void *resinstr; /* 04 Opcodes res->DEC + privileged instr fault*/ + void *xfc; /* 05 XFC instr */ + void *resop; /* 06 reserved operand */ + void *resam; /* 07 reserved addressing mode */ + + void *accvio; /* 08 access violation */ + void *trans_inval; /* 09 translation invalid */ + void *tpend; /* 0a trace pending */ + void *bpt; /* 0b breakpoint */ + void *compat; /* 0c compatibility mode */ + void *arith; /* 0d arithmetic trap or fault */ + void *rsv1; /* 0e reserved to DEC */ + void *rsv2; /* 0f reserved to DEC */ + + void *chmk; /* 10 change mode to kernel trap - SYSCALL */ + void *chme; /* 11 change mode to exec trap - unused */ + void *chms; /* 12 change mode to supervisor - unused (DCL anyone?) */ + void *chmu; /* 13 change mode to user - ? */ + void *buserr[5]; /* 14 - 18 bus or memory error */ + void *rsv3[7]; /* 19 - 1f unused/reserved to DEC */ + + void *rsv4; /* 20 unused/reserved to DEC */ + void *intvec[15]; /* 21 - 2f software interrupts 0x1->0xf */ + + void *timer; /* 30 interval timer */ + void *rsv5; /* 31 unused/reserved to DEC */ + void *subset; /* 32 subset arch emulation request */ + void *susp_emul; /* 33 suspended emulation ?? */ + void *rsv6[8]; /* 34 - 3b unused/reserved to DEC/owners */ + + void *con_str_rcv; /* 3c console storage receive (11/750,11/730)*/ + void *con_str_xmt; /* 3d console storage xmit (11/750,11/730) */ + void *con_term_rcv; /* 3e console terminal receive (11/750,11/730)*/ + void *con_term_xmt; /* 3f console terminal xmit (11/750,11/730) */ + + void *adapter[0x40]; /* 40 - 7f adapter vectors */ +}; + +#define SCB_PASSREL 0x00 +#define SCB_MCHECK 0x01 +#define SCB_KSP_INVAL 0x02 +#define SCB_PWRFAIL 0x03 +#define SCB_RESINSTR 0x04 +#define SCB_XFC 0x05 +#define SCB_RESOP 0x06 +#define SCB_RESAM 0x07 + +#define SCB_ACCVIO 0x08 +#define SCB_TRANS_INVAL 0x09 +#define SCB_TPEND 0x0a +#define SCB_BPT 0x0b +#define SCB_COMPAT 0x0c +#define SCB_ARITH 0x0d + +#define SCB_CHMK 0x10 +#define SCB_CHME 0x11 +#define SCB_CHMS 0x12 +#define SCB_CHMU 0x13 +#define SCB_BUSERR1 0x14 +#define SCB_BUSERR2 0x15 +#define SCB_MEMCORR 0x15 /* corrected read memory handler */ +#define SCB_BUSERR3 0x16 +#define SCB_BUSERR4 0x17 +#define SCB_BUSERR5 0x18 + +#define SCB_SOFTINT1 0x21 +#define SCB_SOFTINT2 0x22 +#define SCB_SOFTINT3 0x23 +#define SCB_SOFTINT4 0x24 +#define SCB_SOFTINT5 0x25 +#define SCB_SOFTINT6 0x26 +#define SCB_SOFTINT7 0x27 +#define SCB_SOFTINT8 0x28 +#define SCB_SOFTINT9 0x29 +#define SCB_SOFTINTA 0x2a +#define SCB_SOFTINTB 0x2b +#define SCB_SOFTINTC 0x2c +#define SCB_SOFTINTD 0x2d +#define SCB_SOFTINTE 0x2e +#define SCB_SOFTINTF 0x2f + +#define SCB_TIMER 0x30 +#define SCB_SUBSET 0x32 +#define SCB_SUSP_EMUL 0x33 + +#define SCB_CON_STR_RCV 0x3c +#define SCB_CON_STR_XMT 0x3d +#define SCB_CON_TERM_RCV 0x3e +#define SCB_CON_TERM_XMT 0x3f + +union scb_and_device_vectors { + struct vax_scb scb; + unsigned char *vectors[NR_IRQS]; +}; + +/* Defined in kernel/interrupt.c */ +extern union scb_and_device_vectors scb; + +/* This macro can be used as both an lvalue and an rvalue */ +#define SCB_VECTOR(vec_num) scb.vectors[vec_num] + +#endif /* _VAX_SCB_H */ diff -Nru a/include/asm-vax/sections.h b/include/asm-vax/sections.h --- a/include/asm-vax/sections.h 1970-01-01 01:00:00 +++ b/include/asm-vax/sections.h 2004-06-03 19:51:46 @@ -0,0 +1,7 @@ +#ifndef __VAX_SECTIONS_H +#define __VAX_SECTIONS_H + +/* nothing to see, move along */ +#include + +#endif /* __VAX_SECTIONS_H */ diff -Nru a/include/asm-vax/semaphore-helper.h b/include/asm-vax/semaphore-helper.h --- a/include/asm-vax/semaphore-helper.h 1970-01-01 01:00:00 +++ b/include/asm-vax/semaphore-helper.h 2004-06-03 20:35:22 @@ -0,0 +1,102 @@ +#ifndef _VAX_SEMAPHORE_HELPER_H +#define _VAX_SEMAPHORE_HELPER_H + +/* + * $Id: semaphore-helper.h,v 1.5 2004/06/03 18:35:22 jbglaw Exp $ + * + * VAX version based on S390 version + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * + * Derived from "include/asm-i386/semaphore-helper.h" + * (C) Copyright 1996 Linus Torvalds + * (C) Copyright 1999 Andrea Arcangeli + */ + +/* + * These two _must_ execute atomically wrt each other. + * + * This is trivially done with load_locked/store_cond, + * but on the x86 we need an external synchronizer. + */ +static inline void wake_one_more(struct semaphore * sem) +{ + unsigned long flags; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + sem->waking++; + spin_unlock_irqrestore(&semaphore_wake_lock, flags); +} + +static inline int waking_non_zero(struct semaphore *sem) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->waking > 0) { + sem->waking--; + ret = 1; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking_non_zero_interruptible: + * 1 got the lock + * 0 go to sleep + * -EINTR interrupted + * + * If we give up we must undo our count-decrease we previously did in down(). + * Subtle: up() can continue to happens and increase the semaphore count + * even during our critical section protected by the spinlock. So + * we must remeber to undo the sem->waking that will be run from + * wake_one_more() some time soon, if the semaphore count become > 0. + */ +static inline int waking_non_zero_interruptible(struct semaphore *sem, + struct task_struct *tsk) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->waking > 0) { + sem->waking--; + ret = 1; + } else if (signal_pending(tsk)) { + if (atomic_inc_and_test_greater_zero(&sem->count)) + sem->waking--; + ret = -EINTR; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking_non_zero_trylock: + * 1 failed to lock + * 0 got the lock + * + * Implementation details are the same of the interruptible case. + */ +static inline int waking_non_zero_trylock(struct semaphore *sem) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->waking <= 0) + { + if (atomic_inc_and_test_greater_zero(&sem->count)) + sem->waking--; + } else { + sem->waking--; + ret = 0; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +#endif /* _VAX_SEMAPHORE_HELPER_H */ diff -Nru a/include/asm-vax/semaphore.h b/include/asm-vax/semaphore.h --- a/include/asm-vax/semaphore.h 1970-01-01 01:00:00 +++ b/include/asm-vax/semaphore.h 2004-06-03 20:36:25 @@ -0,0 +1,104 @@ +#ifndef _VAX_SEMAPHORE_H +#define _VAX_SEMAPHORE_H + +/* + * $Id: semaphore.h,v 1.7 2004/06/03 18:36:25 jbglaw Exp $ + * + * atp Mar 2002. All change for patch 2.4.4. + * use rwsem.h + * VAX version based on S390 version + * + * S390 version + * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * + * Derived from "include/asm-i386/semaphore.h" + * (C) Copyright 1996 Linus Torvalds + */ + +#include + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +struct semaphore { + atomic_t count; + int sleepers; + wait_queue_head_t wait; +}; + +#define __SEM_DEBUG_INIT(name) + +#define __SEMAPHORE_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ + __SEM_DEBUG_INIT(name) } + +#define __MUTEX_INITIALIZER(name) \ + __SEMAPHORE_INITIALIZER(name,1) + +#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) +#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) + +static inline void sema_init (struct semaphore *sem, int val) +{ + *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); +} + +static inline void init_MUTEX (struct semaphore *sem) +{ + sema_init(sem, 1); +} + +static inline void init_MUTEX_LOCKED (struct semaphore *sem) +{ + sema_init(sem, 0); +} + +asmlinkage void __down_failed(void /* special register calling convention */); +asmlinkage int __down_failed_interruptible(void /* params in registers */); +asmlinkage int __down_failed_trylock(void /* params in registers */); +asmlinkage void __up_wakeup(void /* special register calling convention */); + +asmlinkage void __down(struct semaphore * sem); +asmlinkage int __down_interruptible(struct semaphore * sem); +asmlinkage int __down_trylock(struct semaphore * sem); +asmlinkage void __up(struct semaphore * sem); + +static inline void down(struct semaphore * sem) +{ + if (atomic_dec_return(&sem->count) < 0) + __down(sem); +} + +static inline int down_interruptible(struct semaphore * sem) +{ + int ret = 0; + + if (atomic_dec_return(&sem->count) < 0) + ret = __down_interruptible(sem); + return ret; +} + +static inline int down_trylock(struct semaphore * sem) +{ + int ret = 0; + + if (atomic_dec_return(&sem->count) < 0) + ret = __down_trylock(sem); + return ret; +} + +static inline void up(struct semaphore * sem) +{ + if (atomic_inc_return(&sem->count) <= 0) + __up(sem); +} + +#endif +#endif /* _VAX_SEMAPHORE_H */ diff -Nru a/include/asm-vax/sembuf.h b/include/asm-vax/sembuf.h --- a/include/asm-vax/sembuf.h 1970-01-01 01:00:00 +++ b/include/asm-vax/sembuf.h 2002-05-20 02:33:39 @@ -0,0 +1,25 @@ +#ifndef _VAX_SEMBUF_H +#define _VAX_SEMBUF_H + +/* + * The semid64_ds structure for VAX architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + unsigned long __unused1; + __kernel_time_t sem_ctime; /* last change time */ + unsigned long __unused2; + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _VAX_SEMBUF_H */ diff -Nru a/include/asm-vax/serial.h b/include/asm-vax/serial.h --- a/include/asm-vax/serial.h 1970-01-01 01:00:00 +++ b/include/asm-vax/serial.h 2002-05-20 02:33:39 @@ -0,0 +1,18 @@ +#ifndef __VAX_SERIAL_H +#define __VAX_SERIAL_H + +/* serial.h */ +/* Jan 2001 atp. Added. Not sure what the base clock for VAX + * serial boards is - so go with 1.8Mhz at present + * + * 2000-02-18 KPH + * Removed all the PC-related stuff + */ + +/* We don't have any PC-style UARTs, so we don't want drivers/char/serial.c + do do anything. So, we leave SERIAL_PORT_DFNS empty */ +#define SERIAL_PORT_DFNS + +#define RS_TABLE_SIZE 0 + +#endif /* __VAX_SERIAL_H */ diff -Nru a/include/asm-vax/setup.h b/include/asm-vax/setup.h --- a/include/asm-vax/setup.h 1970-01-01 01:00:00 +++ b/include/asm-vax/setup.h 2004-09-03 00:41:50 @@ -0,0 +1,6 @@ +#ifndef __VAX_SETUP_H +#define __VAX_SETUP_H + +#define COMMAND_LINE_SIZE 256 + +#endif diff -Nru a/include/asm-vax/shmbuf.h b/include/asm-vax/shmbuf.h --- a/include/asm-vax/shmbuf.h 1970-01-01 01:00:00 +++ b/include/asm-vax/shmbuf.h 2002-05-20 02:33:39 @@ -0,0 +1,42 @@ +#ifndef __VAX_SHMBUF_H +#define __VAX_SHMBUF_H + +/* + * The shmid64_ds structure for VAX architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + unsigned long __unused1; + __kernel_time_t shm_dtime; /* last detach time */ + unsigned long __unused2; + __kernel_time_t shm_ctime; /* last change time */ + unsigned long __unused3; + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* __VAX_SHMBUF_H */ diff -Nru a/include/asm-vax/shmparam.h b/include/asm-vax/shmparam.h --- a/include/asm-vax/shmparam.h 1970-01-01 01:00:00 +++ b/include/asm-vax/shmparam.h 2002-05-20 02:33:39 @@ -0,0 +1,8 @@ +#ifndef __ASM_VAX_SHMPARAM_H +#define __ASM_VAX_SHMPARAM_H + +/* atp jan 2001 - 2.4 merge */ + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _ASM_VAX_SHMPARAM_H */ diff -Nru a/include/asm-vax/sigcontext.h b/include/asm-vax/sigcontext.h --- a/include/asm-vax/sigcontext.h 1970-01-01 01:00:00 +++ b/include/asm-vax/sigcontext.h 2002-05-20 02:33:39 @@ -0,0 +1,49 @@ +#ifndef _ASM_VAX_SIGCONTEXT_H +#define _ASM_VAX_SIGCONTEXT_H + + +/* FIXME: redo this. atp */ +/* D.A. replace ptrace.h with contents of structure.. + * this is so gdb will compile */ +#include + +//struct sigcontext_struct { +// unsigned long _unused[4]; +// int signal; +// unsigned long handler; +// unsigned long oldmask; +// struct pt_regs *regs; +//}; + +/* This struct is saved by setup_frame in signal.c, to keep the current context while + a signal handler is executed. It's restored by sys_sigreturn. + + To keep things simple, we use pt_regs here even though normally you just specify + the list of regs to save. Then we can use copy_from_user on the entire regs instead + of a bunch of get_user's as well... + +*/ + +struct sigcontext { + unsigned long int r0; + unsigned long int r1; + unsigned long int r2; + unsigned long int r3; + unsigned long int r4; + unsigned long int r5; + unsigned long int r6; + unsigned long int r7; + unsigned long int r8; + unsigned long int r9; + unsigned long int r10; + unsigned long int r11; /* note: we keep current in r11 */ + unsigned long int ap; + unsigned long int fp; + unsigned long int sp; + unsigned long int pc; + struct psl_fields psl; + unsigned long oldmask; + unsigned long usp; /* usp before stacking this gunk on it */ +}; + +#endif diff -Nru a/include/asm-vax/siginfo.h b/include/asm-vax/siginfo.h --- a/include/asm-vax/siginfo.h 1970-01-01 01:00:00 +++ b/include/asm-vax/siginfo.h 2003-01-27 01:22:02 @@ -0,0 +1 @@ +#include diff -Nru a/include/asm-vax/signal.h b/include/asm-vax/signal.h --- a/include/asm-vax/signal.h 1970-01-01 01:00:00 +++ b/include/asm-vax/signal.h 2003-08-07 02:16:04 @@ -0,0 +1,191 @@ +#ifndef _ASM_VAX_SIGNAL_H +#define _ASM_VAX_SIGNAL_H + +#include + +/* from i386 header */ + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifdef __KERNEL__ +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +#define _NSIG 64 +#define _NSIG_BPW 32 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGUNUSED 31 +#define SIGSYS 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX (_NSIG-1) + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND +#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ + +#define SA_RESTORER 0x04000000 + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#ifdef __KERNEL__ + +/* + * These values of sa_flags are used only by the kernel as part of the + * irq handling routines. + * + * SA_INTERRUPT is also used by the irq handling routines. + * SA_SHIRQ is for shared interrupt support on PCI and EISA. + */ +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#endif + +#define SIG_BLOCK 0 /* for blocking signals */ +#define SIG_UNBLOCK 1 /* for unblocking signals */ +#define SIG_SETMASK 2 /* for setting the signal mask */ + +/* Type of a signal handler. */ +typedef void (*__sighandler_t)(int); + +#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ +#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ +#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ + +#ifdef __KERNEL__ +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + void (*sa_restorer)(void); + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +}; + +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int, struct siginfo *, void *); + } _u; + sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ +#include + +/* This forward declaration is required to silence warnings in + files that include linux/signal.h before linux/ptrace.h */ +struct pt_regs; + +#endif /* __KERNEL__ */ + +#endif diff -Nru a/include/asm-vax/socket.h b/include/asm-vax/socket.h --- a/include/asm-vax/socket.h 1970-01-01 01:00:00 +++ b/include/asm-vax/socket.h 2004-11-17 10:25:25 @@ -0,0 +1,53 @@ +#ifndef _ASM_SOCKET_H +#define _ASM_SOCKET_H + +/* from asm-i386 + * FIXME: check vs netbsd and /or utlrix - if we want to be compatible */ + +#include + +/* For setsockoptions(2) */ +#define SOL_SOCKET 1 + +#define SO_DEBUG 1 +#define SO_REUSEADDR 2 +#define SO_TYPE 3 +#define SO_ERROR 4 +#define SO_DONTROUTE 5 +#define SO_BROADCAST 6 +#define SO_SNDBUF 7 +#define SO_RCVBUF 8 +#define SO_KEEPALIVE 9 +#define SO_OOBINLINE 10 +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_LINGER 13 +#define SO_BSDCOMPAT 14 +/* To add :#define SO_REUSEPORT 15 */ +#define SO_PASSCRED 16 +#define SO_PEERCRED 17 +#define SO_RCVLOWAT 18 +#define SO_SNDLOWAT 19 +#define SO_RCVTIMEO 20 +#define SO_SNDTIMEO 21 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 22 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 +#define SO_SECURITY_ENCRYPTION_NETWORK 24 + +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_ACCEPTCONN 30 + +#define SO_PEERSEC 31 + +#endif /* _ASM_SOCKET_H */ diff -Nru a/include/asm-vax/sockios.h b/include/asm-vax/sockios.h --- a/include/asm-vax/sockios.h 1970-01-01 01:00:00 +++ b/include/asm-vax/sockios.h 2002-05-20 02:33:39 @@ -0,0 +1,12 @@ +#ifndef __ARCH_VAX_SOCKIOS__ +#define __ARCH_VAX_SOCKIOS__ + +/* Socket-level I/O control calls. */ +#define FIOSETOWN 0x8901 +#define SIOCSPGRP 0x8902 +#define FIOGETOWN 0x8903 +#define SIOCGPGRP 0x8904 +#define SIOCATMARK 0x8905 +#define SIOCGSTAMP 0x8906 /* Get stamp */ + +#endif diff -Nru a/include/asm-vax/spinlock.h b/include/asm-vax/spinlock.h --- a/include/asm-vax/spinlock.h 1970-01-01 01:00:00 +++ b/include/asm-vax/spinlock.h 2002-05-20 02:33:39 @@ -0,0 +1,6 @@ +#ifndef __ASM_VAX_SPINLOCK_H +#define __ASM_VAX_SPINLOCK_H + +#error "No SMP on VAX yet, please set CONFIG_SMP to n" + +#endif /* __ASM_VAX_SPINLOCK_H */ diff -Nru a/include/asm-vax/stat.h b/include/asm-vax/stat.h --- a/include/asm-vax/stat.h 1970-01-01 01:00:00 +++ b/include/asm-vax/stat.h 2003-06-20 02:18:45 @@ -0,0 +1,79 @@ +#ifndef _VAX_STAT_H +#define _VAX_STAT_H + +/* lifted from i386 port */ + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc2.1, hence the absolutely + * insane amounts of padding around dev_t's. + */ +struct stat64 { + unsigned short st_dev; + unsigned char __pad0[6]; + + unsigned long long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + + unsigned long st_uid; + unsigned long st_gid; + + unsigned short st_rdev; + unsigned char __pad3[10]; + + long long st_size; + unsigned long st_blksize; + + unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long __pad4; /* future possible st_blocks high bits */ + + unsigned long st_atime; + unsigned long st_atime_nsec; + + unsigned long st_mtime; + unsigned long st_mtime_nsec; + + unsigned long st_ctime; + unsigned long st_ctime_nsec; + + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _VAX_STAT_H */ diff -Nru a/include/asm-vax/statfs.h b/include/asm-vax/statfs.h --- a/include/asm-vax/statfs.h 1970-01-01 01:00:00 +++ b/include/asm-vax/statfs.h 2003-09-19 01:57:50 @@ -0,0 +1,6 @@ +#ifndef _VAX_STATFS_H +#define _VAX_STATFS_H + +#include + +#endif /* _VAX_STATFS_H */ diff -Nru a/include/asm-vax/string.h b/include/asm-vax/string.h --- a/include/asm-vax/string.h 1970-01-01 01:00:00 +++ b/include/asm-vax/string.h 2004-03-03 21:18:34 @@ -0,0 +1,22 @@ +#ifndef __VAX_STRING_H__ +#define __VAX_STRING_H__ + +#ifdef __KERNEL__ + +/* Define these as we re-implement the 'stupid' ones provided by lib/string.c */ + +#undef __HAVE_ARCH_MEMCPY +#define __HAVE_ARCH_MEMSET +extern void * memset(void *dest, int value, __kernel_size_t size); +#undef __HAVE_ARCH_STRCPY +#undef __HAVE_ARCH_STRNCPY +#undef __HAVE_ARCH_STRCAT +#undef __HAVE_ARCH_STRNCAT +#undef __HAVE_ARCH_STRCHR +#undef __HAVE_ARCH_STRRCHR +#undef __HAVE_ARCH_STRLEN +#undef __HAVE_ARCH_MEMSETW + +#endif /* __KERNEL__ */ + +#endif /* __VAX_STRING_H__ */ diff -Nru a/include/asm-vax/system.h b/include/asm-vax/system.h --- a/include/asm-vax/system.h 1970-01-01 01:00:00 +++ b/include/asm-vax/system.h 2005-07-31 17:02:35 @@ -0,0 +1,175 @@ +#ifndef _VAX_SYSTEM_H +#define _VAX_SYSTEM_H + +#include + +#include +#include + +#include + +#define prepare_arch_schedule(prev) do { } while(0) +#define finish_arch_schedule(prev) do { } while(0) + +#include + +/* + * System defines.. Note that this is included both from .c and .S + * files, so it does only defines, not any C code. + */ + +/* FIXME: these are wrong */ +#define KERNEL_START (PAGE_OFFSET + 0x300000) +#define SWAPPER_PGD (PAGE_OFFSET + 0x300000) +#define INIT_STACK (PAGE_OFFSET + 0x302000) +#define EMPTY_PGT (PAGE_OFFSET + 0x304000) +#define EMPTY_PGE (PAGE_OFFSET + 0x308000) +#define ZERO_PGE (PAGE_OFFSET + 0x30A000) + +#define START_ADDR (PAGE_OFFSET + 0x310000) + +#define arch_align_stack(x) (x) + +#ifndef __ASSEMBLY__ +struct task_struct; /* one of the stranger aspects of C forward declarations.. */ + +/* This macro wrapper around switch_to() means that the compiler + doesn't need to push the unused 3rd argument on the stack */ +struct task_struct *__switch_to(struct task_struct*, struct task_struct*); +#define switch_to(prev, next, last) (last) = __switch_to((prev), (next)) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +#define cmpxchg(ptr, old, new) ({ \ + unsigned long flags; \ + __typeof__(*(ptr)) cur; \ + local_irq_save(flags); \ + cur = *(ptr); \ + if (cur == (old)) { \ + *(ptr) = (new); \ + } \ + local_irq_restore(flags); \ + cur; \ +}) + +/* mb is the alpha instruction to cause serialisation of memory + operations. According to a software note in section 5.5.4 of + part I of the alpha arch handbook (p/no ey-l520e-dp) we don't + need to worry about this on the vax */ + + +/* + * Force strict CPU ordering. Stubbed out FIXME: + */ +#define mb() __asm__ __volatile__("": : :"memory") +#define rmb() mb() +#define wmb() mb() +#define read_barrier_depends() mb() +#define smp_read_barrier_depends() mb() +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() + +#define getipl() __mfpr(PR_IPL) + +#define setipl(ipl) __mtpr(ipl, PR_IPL) + +/* SMP: non atomic alert */ +/* FIXME: Use PR_IPL instead of 0x12 */ +#define swpipl(ipl) \ +({ \ + register unsigned long __r0; \ + \ + __asm__ __volatile__( \ + " mfpr $0x12, %0 \n" \ + " mtpr %1, $0x12 #swpipl \n" \ + : "=&r" (__r0) \ + : "r" (ipl) \ + : "r10", "memory"); \ + \ + __r0; \ +}) + +#define local_save_flags(flags) ((flags) = getipl()) +#define local_irq_save(flags) ((flags) = swpipl(31)) +#define local_irq_restore(flags) setipl(flags) +#define local_irq_disable() setipl(31) + +#define irqs_disabled() (__psl.ipl == 31) + +/* If we're handling an interrupt (i.e. the IS bit is set in the + PSL and we're on the interrupt stack), then we must not enable + interrupts by dropping IPL all the way to 0. If we do, and + another interrupt comes in, then this second interrupt will + be handled normally, but will REI to a PSL with IS set and + an IPL of 0, which REI doesn't like at all. + + So, instead, we drop IPL to 1 if we're running on the interrupt + stack, thus making sure that REI will be kept happy. */ + +extern __inline__ void +local_irq_enable(void) +{ + if (__psl.is) { + setipl(1); + } else { + setipl(0); + } +} + +#ifdef __SMP__ +# error "SMP not supported" +#endif /* __SMP__ */ + +#define nop() __asm__ __volatile__ ("nop") + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define tas(ptr) (xchg((ptr),1)) + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + unsigned int fred; + + switch (size) { + case 1: + __asm__("movzbl %2,%0\n\t" + "movb %1,%2" + : "=&r" (fred) + : "r" (x), "m" (*__xg(ptr)) + : "memory"); + break; + case 2: + __asm__("movzwl %2,%0\n\t" + "movw %1,%2" + : "=&r" (fred) + : "r" (x), "m" (*__xg(ptr)) + : "memory"); + break; + case 4: + __asm__("movl %2,%0\n\t" + "movl %1,%2" + : "=&r" (fred) + : "r" (x), "m" (*__xg(ptr)) + : "memory"); + break; + } + return fred; +} + + +#define HALT asm("halt") +ATTRIB_NORET void machine_halt(void); + +#endif /* notdef __ASSEMBLY__ */ + +#endif /* _VAX_SYSTEM_H */ diff -Nru a/include/asm-vax/termbits.h b/include/asm-vax/termbits.h --- a/include/asm-vax/termbits.h 1970-01-01 01:00:00 +++ b/include/asm-vax/termbits.h 2004-04-04 18:46:42 @@ -0,0 +1,174 @@ +#ifndef __ASMVAX_TERMBITS_H__ +#define __ASMVAX_TERMBITS_H__ + +/* from i386 (atp 1998) */ +#include + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ +}; + +/* c_cc characters */ +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VTIME 5 +#define VMIN 6 +#define VSWTC 7 +#define VSTART 8 +#define VSTOP 9 +#define VSUSP 10 +#define VEOL 11 +#define VREPRINT 12 +#define VDISCARD 13 +#define VWERASE 14 +#define VLNEXT 15 +#define VEOL2 16 + +/* c_iflag bits */ +#define IGNBRK 0000001 +#define BRKINT 0000002 +#define IGNPAR 0000004 +#define PARMRK 0000010 +#define INPCK 0000020 +#define ISTRIP 0000040 +#define INLCR 0000100 +#define IGNCR 0000200 +#define ICRNL 0000400 +#define IUCLC 0001000 +#define IXON 0002000 +#define IXANY 0004000 +#define IXOFF 0010000 +#define IMAXBEL 0020000 +#define IUTF8 0040000 + +/* c_oflag bits */ +#define OPOST 0000001 +#define OLCUC 0000002 +#define ONLCR 0000004 +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 +#define OFILL 0000100 +#define OFDEL 0000200 +#define NLDLY 0000400 +#define NL0 0000000 +#define NL1 0000400 +#define CRDLY 0003000 +#define CR0 0000000 +#define CR1 0001000 +#define CR2 0002000 +#define CR3 0003000 +#define TABDLY 0014000 +#define TAB0 0000000 +#define TAB1 0004000 +#define TAB2 0010000 +#define TAB3 0014000 +#define XTABS 0014000 +#define BSDLY 0020000 +#define BS0 0000000 +#define BS1 0020000 +#define VTDLY 0040000 +#define VT0 0000000 +#define VT1 0040000 +#define FFDLY 0100000 +#define FF0 0000000 +#define FF1 0100000 + +/* c_cflag bit meaning */ +#define CBAUD 0010017 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CSIZE 0000060 +#define CS5 0000000 +#define CS6 0000020 +#define CS7 0000040 +#define CS8 0000060 +#define CSTOPB 0000100 +#define CREAD 0000200 +#define PARENB 0000400 +#define PARODD 0001000 +#define HUPCL 0002000 +#define CLOCAL 0004000 +#define CBAUDEX 0010000 +#define B57600 0010001 +#define B115200 0010002 +#define B230400 0010003 +#define B460800 0010004 +#define B500000 0010005 +#define B576000 0010006 +#define B921600 0010007 +#define B1000000 0010010 +#define B1152000 0010011 +#define B1500000 0010012 +#define B2000000 0010013 +#define B2500000 0010014 +#define B3000000 0010015 +#define B3500000 0010016 +#define B4000000 0010017 +#define CIBAUD 002003600000 /* input baud rate (not used) */ +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define ISIG 0000001 +#define ICANON 0000002 +#define XCASE 0000004 +#define ECHO 0000010 +#define ECHOE 0000020 +#define ECHOK 0000040 +#define ECHONL 0000100 +#define NOFLSH 0000200 +#define TOSTOP 0000400 +#define ECHOCTL 0001000 +#define ECHOPRT 0002000 +#define ECHOKE 0004000 +#define FLUSHO 0010000 +#define PENDIN 0040000 +#define IEXTEN 0100000 + +/* tcflow() and TCXONC use these */ +#define TCOOFF 0 +#define TCOON 1 +#define TCIOFF 2 +#define TCION 3 + +/* tcflush() and TCFLSH use these */ +#define TCIFLUSH 0 +#define TCOFLUSH 1 +#define TCIOFLUSH 2 + +/* tcsetattr uses these */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _ASMVAX_TERMBITS_H */ diff -Nru a/include/asm-vax/termios.h b/include/asm-vax/termios.h --- a/include/asm-vax/termios.h 1970-01-01 01:00:00 +++ b/include/asm-vax/termios.h 2002-05-20 02:33:39 @@ -0,0 +1,107 @@ +#ifndef _ASMVAX_TERMIOS_H +#define _ASMVAX_TERMIOS_H + +/* from asm-i386 */ +/* atp jan 2001. updated for 2.4 merge. */ + +#include +#include + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* modem lines */ +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + +/* line disciplines */ +#define N_TTY 0 +#define N_SLIP 1 +#define N_MOUSE 2 +#define N_PPP 3 +#define N_STRIP 4 +#define N_AX25 5 +#define N_X25 6 /* X.25 async */ +#define N_6PACK 7 +#define N_MASC 8 /* Reserved for Mobitex module */ +#define N_R3964 9 /* Reserved for Simatic R3964 module */ +#define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ +#define N_IRDA 11 /* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */ +#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ +#define N_HDLC 13 /* synchronous HDLC */ + +#ifdef __KERNEL__ + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + *(unsigned short *) &(termios)->x = __tmp; \ +} + +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __KERNEL__ */ + +#endif /* _ASMVAX_TERMIOS_H */ diff -Nru a/include/asm-vax/thread_info.h b/include/asm-vax/thread_info.h --- a/include/asm-vax/thread_info.h 1970-01-01 01:00:00 +++ b/include/asm-vax/thread_info.h 2005-07-31 14:43:08 @@ -0,0 +1,121 @@ +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +/* + * thread_info.h: VAX low-level thread information + * + * Based on asm-i386/thread-info.h: + * Copyright (C) 2002 David Howells (dhowells@redhat.com) + * - Incorporating suggestions made by Linus Torvalds and Dave Miller + */ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#endif + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly + * constants must also be changed + */ +#ifndef __ASSEMBLY__ +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned int flags; /* low level flags */ + unsigned int cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ + + mm_segment_t addr_limit; /* thread address space: + 0-0x7FFFFFFF for user-thead + 0-0xFFFFFFFF for kernel-thread + */ + struct restart_block restart_block; + unsigned char supervisor_stack[0]; +}; + +#else /* !__ASSEMBLY__ */ + +/* offsets into the thread_info struct for assembly code access */ +#define TI_TASK 0x00000000 +#define TI_EXEC_DOMAIN 0x00000004 +#define TI_FLAGS 0x00000008 +#define TI_CPU 0x0000000C +#define TI_PRE_COUNT 0x00000010 +#define TI_ADDR_LIMIT 0x00000014 + +#endif + +#define PREEMPT_ACTIVE 0x40000000 + +/* + * macros/functions for gaining access to the thread information structure + */ +#ifndef __ASSEMBLY__ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + } \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + return (struct thread_info *)(__mfpr(PR_KSP) & ~8191); +} + +/* thread information allocation */ +#define THREAD_SIZE (2*PAGE_SIZE) +#define alloc_thread_info(task) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) +#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) +#define get_thread_info(ti) get_task_struct((ti)->task) +#define put_thread_info(ti) put_task_struct((ti)->task) + +#else /* !__ASSEMBLY__ */ + +/* how to get the thread information struct from ASM */ +#define GET_THREAD_INFO(reg) \ + mfpr $PR_KSP, reg ; \ + bicl2 $8191, reg +#endif + +/* + * thread information flags + * - these are process state flags that various assembly files may need to access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_MEMDIE 4 +#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ + +#define _TIF_SYSCALL_TRACE (1<mm) + +/* Pull in generic TLB shootdown code */ +#include + +#endif /* _VAX_TLB_H */ diff -Nru a/include/asm-vax/tlbflush.h b/include/asm-vax/tlbflush.h --- a/include/asm-vax/tlbflush.h 1970-01-01 01:00:00 +++ b/include/asm-vax/tlbflush.h 2005-04-20 21:01:05 @@ -0,0 +1,61 @@ +#ifndef __VAX_TLBFLUSH_H +#define __VAX_TLBFLUSH_H + +#include + +/* + * TLB flushing: + * + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + * + * VAX hw ref manual pg 216. can use mtpr to either invalidate single + * (TBIS) or all (TBIA) TLB entries. In addition LDPCTX will + * invalidate all process virtual address translations. + */ + +#define __flush_tlb() \ +__asm__ __volatile__("mtpr $0,%0" : : "i" (PR_TBIA) ) + +#define __flush_tlb_one(addr) \ +__asm__ __volatile__("mtpr %0,%1" : :"g" (addr), "i" (PR_TBIS) ) + +#define flush_tlb() __flush_tlb() +#define flush_tlb_all() __flush_tlb() +#define local_flush_tlb() __flush_tlb() +#define flush_tlb_kernel_range(start, end) __flush_tlb() + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + if (mm == current->mm) + __flush_tlb(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr) +{ + if (vma->vm_mm == current->mm) + __flush_tlb_one(addr); +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (vma->vm_mm == current->mm) + __flush_tlb(); +} + +extern inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) +{ + /* FIXME: do we need to notify other CPUs that a process + * page table is going away? I don't think so... + * But what if two processes are sharing this mm_struct and + * are currently running on two different CPUs? */ +} + + + +#endif /* __VAX_TLBFLUSH_H */ diff -Nru a/include/asm-vax/topology.h b/include/asm-vax/topology.h --- a/include/asm-vax/topology.h 1970-01-01 01:00:00 +++ b/include/asm-vax/topology.h 2003-03-14 01:52:07 @@ -0,0 +1,6 @@ +#ifndef _ASM_VAX_TOPOLOGY_H +#define _ASM_VAX_TOPOLOGY_H + +#include + +#endif /* _ASM_VAX_TOPOLOGY_H */ diff -Nru a/include/asm-vax/types.h b/include/asm-vax/types.h --- a/include/asm-vax/types.h 1970-01-01 01:00:00 +++ b/include/asm-vax/types.h 2004-07-29 01:52:49 @@ -0,0 +1,66 @@ +#ifndef _VAX_TYPES_H +#define _VAX_TYPES_H + +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __signed__ long long __s64; +typedef unsigned long long __u64; +#endif + +#endif /* __ASSEMBLY__ */ + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#define BITS_PER_LONG 32 + +#ifndef __ASSEMBLY__ + +#include + +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +typedef signed long long s64; +typedef unsigned long long u64; + + +/* DMA addresses come in generic and 64-bit flavours (although I can't + see 64-bit DMA having any meaning on VAX - KPH). */ + +typedef u32 dma_addr_t; + +typedef u64 dma64_addr_t; + +typedef unsigned int kmem_bufctl_t; + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif diff -Nru a/include/asm-vax/uaccess.h b/include/asm-vax/uaccess.h --- a/include/asm-vax/uaccess.h 1970-01-01 01:00:00 +++ b/include/asm-vax/uaccess.h 2005-05-09 22:29:55 @@ -0,0 +1,285 @@ +#ifndef _VAX_UACCESS_H +#define _VAX_UACCESS_H + +/* Copyright atp Jan 1999, adapted from power pc port */ + +#ifndef __ASSEMBLY__ +#include +#include +#include + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) +#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a,b) ((a).seg == (b).seg) + +#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __user_ok(addr, size) (((size) <= 0x80000000) && ((addr) <= 0x80000000-(size))) +#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) +#define access_ok(type, addr, size) __access_ok((unsigned long) (addr), (size)) + +extern inline int __deprecated verify_area(int type, const void * addr, unsigned long size) +{ + return access_ok(type,addr,size) ? 0 : -EFAULT; +} + + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the uglyness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + * + * FIXME: Lets try this.. + * As we use the same address space for kernel and user data on the + * PowerPC, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + */ +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof (*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__ (*(ptr))) (x),(ptr), sizeof (*(ptr))) + +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__ (*(ptr))) (x), (ptr), sizeof (*(ptr))) + +/* + * The "xxx_ret" versions return constant specified in third argument, if + * something bad happens. These macros can be optimized for the + * case of just returning from the function xxx_ret is used. + */ +#define put_user_ret(x, ptr, ret) ({ \ + if (put_user((x), (ptr))) return ret; }) + +#define get_user_ret(x, ptr, ret) ({ \ + if (get_user((x), (ptr))) return ret; }) + +#define __put_user_ret(x, ptr, ret) ({ \ + if (__put_user((x), (ptr))) return ret; }) + +#define __get_user_ret(x, ptr, ret) ({ \ + if (__get_user((x), (ptr))) return ret; }) + + +extern long __put_user_bad(void); + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err; \ + __put_user_size((x), (ptr), (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__ (*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE, __pu_addr, (size))) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm((x), (ptr), (retval), "movb"); break; \ + case 2: __put_user_asm((x), (ptr), (retval), "movw"); break; \ + case 4: __put_user_asm((x), (ptr), (retval), "movl"); break; \ + case 8: __put_user_asm((x), (ptr), (retval), "movq"); break; \ + default: __put_user_bad(); \ + } \ +} while (0) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (* (struct __large_struct *) (x)) + +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + */ +#define __put_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %1, %2 \n" \ + "2: \n" \ + ".section .fixup,\"ax\" \n" \ + "3: movl %3,%0 \n" \ + " jmp 2b \n" \ + ".previous \n" \ + ".section __ex_table,\"a\" \n" \ + " .align 2 \n" \ + " .long 1b,3b \n" \ + ".previous" \ + : "=r" (err) \ + : "r" (x), \ + "m" (*addr), \ + "i" (-EFAULT), \ + "0" (err)) + + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err, __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT, __gu_val = 0; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + if (access_ok(VERIFY_READ, __gu_addr, (size))) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +extern long __get_user_bad(void); + +#define __get_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __get_user_asm((x), (ptr), (retval), "movb"); break; \ + case 2: __get_user_asm((x), (ptr), (retval), "movw"); break; \ + case 4: __get_user_asm((x), (ptr), (retval), "movl"); break; \ + case 8: __get_user_asm((x), (ptr), (retval), "movq"); break; \ + default: (x) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %2, %1 \n" \ + "2: \n" \ + ".section .fixup,\"ax\" \n" \ + "3: movl %3, %0 \n" \ + " clrl %1 \n" \ + " jmp 2b \n" \ + ".section __ex_table,\"a\" \n" \ + " .align 2 \n" \ + ".long 1b, 3b \n" \ + ".text \n" \ + : "=r" (err), \ + "=r" (x) \ + : "m" (*addr), \ + "i" (-EFAULT), \ + "0" (err)) + +/* More complex routines */ + +extern int __copy_tofrom_user(void *to, const void *from, unsigned long size); + +extern inline unsigned long +copy_from_user(void *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + return __copy_tofrom_user(to, from, n); + return n; +} + +extern inline unsigned long +copy_to_user(void *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_tofrom_user(to, from, n); + return n; +} + +#define __copy_from_user(to, from, size) \ + __copy_tofrom_user((to), (from), (size)) +#define __copy_to_user(to, from, size) \ + __copy_tofrom_user((to), (from), (size)) + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + + +extern unsigned long __clear_user(void *addr, unsigned long size); + +extern inline unsigned long +clear_user(void *addr, unsigned long size) +{ + if (access_ok(VERIFY_WRITE, addr, size)) + return __clear_user(addr, size); + return size? -EFAULT: 0; +} + +extern int __strncpy_from_user(char *dst, const char *src, long count); + +extern inline long +strncpy_from_user(char *dst, const char *src, long count) +{ + if (access_ok(VERIFY_READ, src, 1)) + return __strncpy_from_user(dst, src, count); + return -EFAULT; +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 for error + */ +#define strlen_user(s) strnlen_user(s, ~0UL >> 1) + +/* + * Returns: 0 if exception before NUL or reaching the supplied limit (N), + * a value greater than N if the limit would be exceeded, else strlen. + */ +extern long __strnlen_user(const char *, long); +extern inline long strnlen_user(const char *str, long n) +{ + return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _VAX_UACCESS_H */ diff -Nru a/include/asm-vax/ucontext.h b/include/asm-vax/ucontext.h --- a/include/asm-vax/ucontext.h 1970-01-01 01:00:00 +++ b/include/asm-vax/ucontext.h 2002-05-20 02:33:39 @@ -0,0 +1,13 @@ +#ifndef _ASM_VAX_UCONTEXT_H +#define _ASM_VAX_UCONTEXT_H +/* atp Jul 2001, taken from other ports */ + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _ASM_VAX_UCONTEXT_H */ diff -Nru a/include/asm-vax/unaligned.h b/include/asm-vax/unaligned.h --- a/include/asm-vax/unaligned.h 1970-01-01 01:00:00 +++ b/include/asm-vax/unaligned.h 2004-06-03 23:42:55 @@ -0,0 +1,20 @@ +#ifndef _ASM_VAX_UNALIGNED_H_ +#define _ASM_VAX_UNALIGNED_H_ + +/* + * For the benefit of those who are trying to port Linux to another + * architecture, here are some C-language equivalents. + */ + +#include + + +#define get_unaligned(ptr) \ + ({ __typeof__(*(ptr)) __tmp; memcpy(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) + +#define put_unaligned(val, ptr) \ + ({ __typeof__(*(ptr)) __tmp = (val); \ + memcpy((ptr), &__tmp, sizeof(*(ptr))); \ + (void)0; }) + +#endif /* _ASM_VAX_UNALIGNED_H_ */ diff -Nru a/include/asm-vax/unistd.h b/include/asm-vax/unistd.h --- a/include/asm-vax/unistd.h 1970-01-01 01:00:00 +++ b/include/asm-vax/unistd.h 2005-11-30 15:26:37 @@ -0,0 +1,557 @@ +#ifndef _ASM_VAX_UNISTD_H_ +#define _ASM_VAX_UNISTD_H_ + +/* + * syscall macros Copyright (c) David Airlie 2001 (airlied@linux.ie) + * -- see comments + */ + + +/* + * FIXME: Who do we want to be compatible with? + * + * In theory, we'd try to be compatible either with VMS or with Ultrix + * (that is, Ultrix would probably make more sense...), but I don't + * think that any dude will ever try to run Ultrix/VAX binaries on a + * vax-linux machine ... + */ + +/* + * This file contains the system call numbers. If you change *anything* + * here, also change number of syscall arguments in + * ./linux/arch/vax/kernel/syscall.c and ./l/a/v/k/entry.S ! + */ +//#define __NR_setup 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_lchown 16 +//#define __NR_break 17 +//#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +//#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +//#define __NR_stty 31 +//#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +//#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 +//#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +//#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +//#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_old_getrlimit 76 /* 2GB constrained rlimit */ +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +//#define __NR_select 82 +#define __NR_symlink 83 +//#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +//#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +//#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +//#define __NR_olduname 109 +//#define __NR_iopl 110 +#define __NR_vhangup 111 +//#define __NR_idle 112 +//#define __NR_vm86old 113 +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +//#define __NR_modify_ldt 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +//#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +//#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +//#define __NR_afs_syscall 137 +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 +#define __NR_mlock 150 +#define __NR_munlock 151 +#define __NR_mlockall 152 +#define __NR_munlockall 153 +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 +#define __NR_mremap 163 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +//#define __NR_vm86 166 +//#define __NR_query_module 167 +#define __NR_poll 168 +#define __NR_nfsservctl 169 +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_prctl 172 +#define __NR_rt_sigreturn 173 +#define __NR_rt_sigaction 174 +#define __NR_rt_sigprocmask 175 +#define __NR_rt_sigpending 176 +#define __NR_rt_sigtimedwait 177 +#define __NR_rt_sigqueueinfo 178 +#define __NR_rt_sigsuspend 179 +#define __NR_pread64 180 +#define __NR_pwrite64 181 +#define __NR_chown 182 +#define __NR_getcwd 183 +#define __NR_capget 184 +#define __NR_capset 185 +#define __NR_sigaltstack 186 +#define __NR_sendfile 187 +//#define __NR_getpmsg 188 /* some people actually want streams */ +//#define __NR_putpmsg 189 /* some people actually want streams */ +#define __NR_vfork 190 +#define __NR_getrlimit 191 /* SuS compliant getrlimit */ +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#define __NR_lchown32 198 +#define __NR_getuid32 199 +#define __NR_getgid32 200 +#define __NR_geteuid32 201 +#define __NR_getegid32 202 +#define __NR_setreuid32 203 +#define __NR_setregid32 204 +#define __NR_getgroups32 205 +#define __NR_setgroups32 206 +#define __NR_fchown32 207 +#define __NR_setresuid32 208 +#define __NR_getresuid32 209 +#define __NR_setresgid32 210 +#define __NR_getresgid32 211 +#define __NR_chown32 212 +#define __NR_setuid32 213 +#define __NR_setgid32 214 +#define __NR_setfsuid32 215 +#define __NR_setfsgid32 216 +#define __NR_pivot_root 217 +#define __NR_mincore 218 +#define __NR_madvise 219 +#define __NR_getdents64 220 +#define __NR_fcntl64 221 +#define __NR_tkill 222 +#define __NR_statfs64 223 +#define __NR_fstatfs64 224 +#define __NR_vserver 225 + +#define __NR_last_syscall 225 /* Number of last syscall */ + + + +/* user-visible error numbers are in the range -1 - -124: see */ + +/* + * syscall functions for system calls with 0->5 arguments are implemented + * here-in, for inclusion in libc also. + * All syscalls are implemented the same, the syscall arguments are + * pushed onto the stack, the number of arguments is pushed, the sp + * is loaded into the ap register and the chmk call is made with the + * syscall number as the argument in the r0 register. + * + * Possible optimisations, don't need to use r0 to pass the chmk argument + * suggested by Kenn, will examine later as we are using r0 for the + * return value. + * + * Opposing code is in arch/vax/kernel/syscall.c as is from Kenn Humborg, + * any changes here should be reflected there and vice-versa. + * - Dave Airlie, May 2001 + */ +#define _syscall_return(type) \ +do { \ + if ((unsigned long)(_sc_ret) >= (unsigned long)(-125)) { \ + errno = -(_sc_ret); \ + _sc_ret = -1; \ + } \ + return (type) (_sc_ret); \ +} while (0) + +#define _syscall_clobbers \ + "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", \ + "r9", "r10", "r11" + +#define _syscall0(type, name) \ +type name (void) \ +{ \ + register long _sc_0 __asm__("r0") = __NR_##name; \ + long _sc_ret; \ + \ + __asm__ __volatile__ ( \ + " pushl %%ap \n" \ + " pushl $0 \n" \ + " movl %%sp, %%ap \n" \ + " chmk %%r0 \n" \ + " addl2 $4, %%sp \n" \ + " movl (%%sp)+, %ap \n" \ + : "=r"(_sc_0) \ + : "0"(_sc_0) \ + : _syscall_clobbers); \ + \ + _sc_ret = _sc_0; \ + _syscall_return(type); \ +} + +#define _syscall1(type, name, type1, arg1) \ +type name (type1 arg1) \ +{ \ + register long _sc_0 __asm__("r0") = __NR_##name; \ + long _sc_ret; \ + \ + __asm__ __volatile__ ( \ + " pushl %%ap \n" \ + " pushl %2 \n" \ + " pushl $1 \n" \ + " movl %%sp, %%ap \n" \ + " chmk %%r0 \n" \ + " addl2 $8, %%sp \n" \ + " movl (%%sp)+, %%ap \n" \ + : "=r"(_sc_0) \ + : "0"(_sc_0), "m"(arg1) \ + : _syscall_clobbers); \ + \ + _sc_ret = _sc_0; \ + _syscall_return(type); \ +} + +#define _syscall2(type, name, type1, arg1, type2, arg2) \ +type name (type1 arg1, type2 arg2) \ +{ \ + register long _sc_0 __asm__("r0") = __NR_##name; \ + long _sc_ret; \ + \ + __asm__ __volatile__ ( \ + " pushl %%ap \n" \ + " pushl %3 \n" \ + " pushl %2 \n" \ + " pushl $2 \n" \ + " movl %%sp, %%ap \n" \ + " chmk %%r0 \n" \ + " addl2 $12, %%sp \n" \ + " movl (%%sp)+, %%ap \n" \ + : "=r"(_sc_0) \ + : "0"(_sc_0), "m"(arg1), "m"(arg2) \ + : _syscall_clobbers); \ + \ + _sc_ret = _sc_0; \ + _syscall_return(type); \ +} + +#define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ +type name (type1 arg1, type2 arg2, type3 arg3) \ +{ \ + register long _sc_0 __asm__("r0") = __NR_##name; \ + long _sc_ret; \ + \ + __asm__ __volatile__ ( \ + " pushl %%ap \n" \ + " pushl %4 \n" \ + " pushl %3 \n" \ + " pushl %2 \n" \ + " pushl $3 \n" \ + " movl %%sp, %%ap \n" \ + " chmk %%r0 \n" \ + " addl2 $16, %%sp \n" \ + " movl (%%sp)+, %%ap \n" \ + : "=r"(_sc_0) \ + : "0"(_sc_0), "m"(arg1), "m"(arg2), "m"(arg3) \ + : _syscall_clobbers); \ + \ + _sc_ret = _sc_0; \ + _syscall_return(type); \ +} + +#define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ + register long _sc_0 __asm__("r0") = __NR_##name; \ + long _sc_ret; \ + \ + __asm__ __volatile__ ( \ + " pushl %%ap \n" \ + " pushl %5 \n" \ + " pushl %4 \n" \ + " pushl %3 \n" \ + " pushl %2 \n" \ + " pushl $4 \n" \ + " movl %%sp, %%ap \n" \ + " chmk %%r0 \n" \ + " addl2 $20, %%sp \n" \ + " movl (%%sp)+, %%ap \n" \ + : "=r"(_sc_0) \ + : "0"(_sc_0), "m"(arg1), "m"(arg2), "m"(arg3), \ + "m"( arg4) \ + : _syscall_clobbers); \ + \ + _sc_ret = _sc_0; \ + _syscall_return(type); \ +} + +#define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ +{ \ + register long _sc_0 __asm__("r0") = __NR_##name; \ + long _sc_ret; \ + \ + __asm__ __volatile__ ( \ + " pushl %%ap \n" \ + " pushl %6 \n" \ + " pushl %5 \n" \ + " pushl %4 \n" \ + " pushl %3 \n" \ + " pushl %2 \n" \ + " pushl $5 \n" \ + " movl %%sp, %%ap \n" \ + " chmk %%r0 \n" \ + " addl2 $24, %%sp \n" \ + " movl (%%sp)+, %%ap \n" \ + : "=r"(_sc_0) \ + : "0"(_sc_0), "m"(arg1), "m"(arg2), "m"(arg3), \ + "m"(arg4), "m"(arg5) \ + : _syscall_clobbers); \ + \ + _sc_ret = _sc_0; \ + _syscall_return(type); \ +} + +#define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5, type6, arg6) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, \ + type6 arg6) \ +{ \ + register long _sc_0 __asm__("r0") = __NR_##name; \ + long _sc_ret; \ + \ + __asm__ __volatile__ ( \ + " pushl %%ap \n" \ + " pushl %7 \n" \ + " pushl %6 \n" \ + " pushl %5 \n" \ + " pushl %4 \n" \ + " pushl %3 \n" \ + " pushl %2 \n" \ + " pushl $6 \n" \ + " movl %%sp, %%ap \n" \ + " chmk %%r0 \n" \ + " addl2 $28, %%sp \n" \ + " movl (%%sp)+, %%ap \n" \ + : "=r"(_sc_0) \ + : "0"(_sc_0), "m"(arg1), "m"(arg2), "m"(arg3), \ + "m"(arg4), "m"(arg5), "m"(arg6) \ + : _syscall_clobbers); \ + \ + _sc_ret = _sc_0; \ + _syscall_return(type); \ +} + + +#ifdef __KERNEL__ +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_STIME +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SSETMASK +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_STAT64 +#endif + + +#ifdef __KERNEL_SYSCALLS__ + +/* This _must_ be inlined so that it picks up the correct AP value + from the caller */ +static inline int __chmk(unsigned int syscall) +{ + register int retval __asm__("r0"); + __asm__("chmk %0" + : /* implicit output in r0 */ + : "g"(syscall) + : "r0", "r1"); + return retval; +} + +/* + * we need this inline - forking from kernel space will result + * in NO COPY ON WRITE (!!!), until an execve is executed. This + * is no problem, but for the stack. This is handled by not letting + * main() use the stack at all after fork(). Thus, no function + * calls - which means inline code for fork too, as otherwise we + * would use the stack upon exit from 'fork()'. + * + * Actually only pause and fork are needed inline, so that there + * won't be any messing with the stack from main(), but we define + * some others too. + */ + +#include +#include + + +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +#define __NR__exit __NR_exit +static inline _syscall0(int,sync) +static inline _syscall0(pid_t,setsid) +static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count) +static inline _syscall3(int,read,int,fd,char *,buf,off_t,count) +static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count) +static inline _syscall1(int,dup,int,fd) +static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) +static inline _syscall3(int,open,const char *,file,int,flag,int,mode) +static inline _syscall1(int,close,int,fd) +//static inline _syscall1(int,_exit,int,exitcode) +static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options) +static inline _syscall1(int,delete_module,const char *,name) + +extern int sys_idle(void); +static inline int idle(void) +{ + return sys_idle(); +} + +#define exit(x) _exit(x) + +static inline pid_t wait(int * wait_stat) +{ + return waitpid(-1,wait_stat,0); +} + +#endif + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); + + +#endif /* _ASM_VAX_UNISTD_H_ */ diff -Nru a/include/asm-vax/user.h b/include/asm-vax/user.h --- a/include/asm-vax/user.h 1970-01-01 01:00:00 +++ b/include/asm-vax/user.h 2002-05-20 02:33:39 @@ -0,0 +1,52 @@ +#ifndef _VAX_USER_H +#define _VAX_USER_H + +#include + +#include + +/* + * Core file format: The core file is written in such a way that gdb + * can understand it and provide useful information to the user (under + * linux we use the `trad-core' bfd, NOT the osf-core). The file contents + * are as follows: + * + * upage: 1 page consisting of a user struct that tells gdb + * what is present in the file. Directly after this is a + * copy of the task_struct, which is currently not used by gdb, + * but it may come in handy at some point. All of the registers + * are stored as part of the upage. The upage should always be + * only one page long. + * data: The data segment follows next. We use current->end_text to + * current->brk to pick up all of the user variables, plus any memory + * that may have been sbrk'ed. No attempt is made to determine if a + * page is demand-zero or if a page is totally unused, we just cover + * the entire range. All of the addresses are rounded in such a way + * that an integral number of pages is written. + * stack: We need the stack information in order to get a meaningful + * backtrace. We need to write the data from usp to + * current->start_stack, so we round each of these in order to be able + * to write an integer number of pages. + */ +/* FIXME: this is bogus */ +struct user { + struct pt_regs regs; /* integer regs */ + size_t u_tsize; /* text size (pages) */ + size_t u_dsize; /* data size (pages) */ + size_t u_ssize; /* stack size (pages) */ + unsigned long start_code; /* text starting address */ + unsigned long start_data; /* data starting address */ + unsigned long start_stack; /* stack starting address */ + long int signal; /* signal causing core dump */ + struct pt_regs *u_ar0; /* help gdb find registers */ + unsigned long magic; /* identifies a core file */ + char u_comm[32]; /* user command name */ +}; + +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_DATA_START_ADDR (u.start_data) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* __VAX_USER_H */ diff -Nru a/include/asm-vax/vaxcpu.h b/include/asm-vax/vaxcpu.h --- a/include/asm-vax/vaxcpu.h 1970-01-01 01:00:00 +++ b/include/asm-vax/vaxcpu.h 2005-08-04 21:52:38 @@ -0,0 +1,287 @@ +#ifndef _VAX_CPU_H +#define _VAX_CPU_H + +#ifndef __ASSEMBLY__ +#include +#endif /* !__ASSEMBLY__ */ + +/* cpu.h family defines - architected values (VAX arch ref man p284&app B*/ + +/* SID register. Top byte is the system ID. */ +/* Other bytes in SID are system dependent. */ +/* e.g. for CVAX, low byte is microcode rev. level. */ + +/* Chip SERIES -- from SID register */ +/* First set from table 8.2 pg 284 */ +#define VAX_780 1 /* also 785 */ +#define VAX_750 2 +#define VAX_730 3 +#define VAX_8600 4 +#define VAX_UVAX1 7 /* 5 & 6 reserved */ +#define VAX_UVAX2 8 +/* f. hardware manuals & ultrix headers + other bits. */ +#define VAX_CVAX 10 +#define VAX_RIGEL 11 +#define VAX_MARIAH 18 +#define VAX_NVAX 19 +#define VAX_SOC 20 + +#define VAX_SID_FAMILY_MASK 0xff000000 +#define VAX_SID_FAMILY_SHIFT 24 +#define VAX_SID_FAMILY_SIZE 8 + +/* Chip Model/System Type */ +/* from PROM/SIDEX */ +/* Listed are the ones I have access to. (atp) */ + +/* for CVAX */ +#define VAX_CPU_420 0x0A000004 /* 3100 (low models) */ + +/* Rigel. */ +#define VAX_CPU_43 0x0B000004 /* 3100m76 */ + +/* Maria */ +#define VAX_CPU_46 0x12000004 /* 4000/60 */ + +/* VLC */ +#define VAX_CPU_48 0x14000004 /* 4000/VLC (KA48) */ + +/* SOC */ +#define VAX_CPU_660 0x14000006 /* VAXserver 4000-200 (KA660-B) */ +#define VAX_CPU_VXT 0x14000006 /* VXT2000 - from NetBSD */ + +/*****************************************************************************/ +/* Definitions for MicroVAX I family CPUs */ + +/* System ID register fields */ + +#define UVAX1_SID_HWREV_MASK 0x000000ff +#define UVAX1_SID_HWREV_SHIFT 0 +#define UVAX1_SID_HWREV_SIZE 8 + +#define UVAX1_SID_UCODE_REV_MASK 0x0000ff00 +#define UVAX1_SID_UCODE_REV_SHIFT 8 +#define UVAX1_SID_UCODE_REV_SIZE 8 + +#define UVAX1_SID_DFLOAT_MASK 0x00010000 +#define UVAX1_SID_DFLOAT_SHIFT 16 +#define UVAX1_SID_DFLOAT_SIZE 1 + +/* Machine check info */ + +#ifndef __ASSEMBLY__ +struct uvax1_mcheck_info { + unsigned byte_count; /* normally 0x0c */ + unsigned mcheck_code; + unsigned param1; + unsigned param2; + unsigned pc; + struct psl_fields psl; +}; +#endif /* !__ASSEMBLY__ */ + +/* Values for uvax1_mcheck_info.mcheck_code */ + +#define UVAX1_MCHECK_MEM_CTRL 0 /* Memory controller bug check */ +#define UVAX1_MCHECK_MEM_RD_ERR 1 /* Unrecoverable memory read error */ +#define UVAX1_MCHECK_NONEXIST_MEM 2 /* Nonexistent memory */ +#define UVAX1_MCHECK_ILL_IO_OP 3 /* Illegal I/O space operation */ +#define UVAX1_MCHECK_PTE_RD_ERR 4 /* Unrecoverable PTE read error */ +#define UVAX1_MCHECK_PTE_WR_ERR 5 /* Unrecoverable PTE write error */ +#define UVAX1_MCHECK_CS_PAR_ERR 6 /* Control store parity error */ +#define UVAX1_MCHECK_UCODE_BUGCHK 7 /* Micromachine bug check */ +#define UVAX1_MCHECK_Q22_VEC_RD_ERR 8 /* Q22 bus vector read error */ +#define UVAX1_MCHECK_WR_PARAM_ERR 9 /* Write parameter error */ + + +/* Physical memory map */ + +#define UVAX1_MAX_PHYS_RAM 0x003fffff /* Max 4MB RAM */ + +/* Space between 0x00400000 and 0x1fffffff is reserved */ + +#define UVAX1_Q22_IO_START 0x20000000 +#define UVAX1_Q22_IO_END 0x20001fff + +/* Space between 0x20002000 and 0x3fffffff is reserved */ + + +/*****************************************************************************/ +/* Definitions for MicroVAX II family CPUs */ + +/* SID register distinguishes between KA630 and KA410 */ + +#define UVAX2_SID_SUBTYPE_MASK 0x000000ff +#define UVAX2_SID_SUBTYPE_SHIFT 0 +#define UVAX2_SID_SUBTYPE_SIZE 8 + +#define UVAX2_SID_SUBTYPE_CHARON 0 /* CHARON-VAX MicroVAX II emulator */ +#define UVAX2_SID_SUBTYPE_KA630 1 /* MicroVAX II, VAXstation II */ +#define UVAX2_SID_SUBTYPE_KA410 4 /* MicroVAX 2000, VAXstation 2000 */ + +/* Machine check info */ + +#ifndef __ASSEMBLY__ +struct uvax2_mcheck_info { + unsigned byte_count; /* normally 0x0c */ + unsigned mcheck_code; + unsigned most_recent_virt_addr; + unsigned internal_state_info; + unsigned pc; + struct psl_fields psl; +}; +#endif /* !__ASSEMBLY__ */ + +/* Values for uvax1_mcheck_info.mcheck_code */ + +#define UVAX2_MCHECK_UCODE_STATE_FSD 1 /* impossible microcode state (FSD) */ +#define UVAX2_MCHECK_UCODE_STATE_SSD 2 /* impossible microcode state (SSD) */ +#define UVAX2_MCHECK_UNDEF_FPU_ERR0 3 /* undefined FPU error code 0 */ +#define UVAX2_MCHECK_UNDEF_FPU_ERR7 4 /* undefined FPU error code 7 */ +#define UVAX2_MCHECK_UNDEF_MM_STS_TBMISS 5 /* undefined mem mgmt status (TB miss) */ +#define UVAX2_MCHECK_UNDEF_MM_STS_M0 6 /* undefined mem mgmt status (M=0) */ +#define UVAX2_MCHECK_PPTE_IN_P0 7 /* process PTE in P0 space */ +#define UVAX2_MCHECK_PPTE_IN_P1 8 /* process PTE in P1 space */ +#define UVAX2_MCHECK_UNDEF_INT_ID_CODE 9 /* undefined interrupt ID code */ +#define UVAX2_MCHECK_RD_BUS_ERR_VIRT 0x80 /* read bus error, addr param is virtual */ +#define UVAX2_MCHECK_RD_BUS_ERR_PHYS 0x81 /* read bus error, addr param is physical */ +#define UVAX2_MCHECK_WR_BUS_ERR_VIRT 0x82 /* write bus error, addr param is virtual */ +#define UVAX2_MCHECK_WR_BUS_ERR_PHYS 0x83 /* write bus error, addr param is physical */ + +/* Physical memory map */ + +#define UVAX2_MAX_PHYS_RAM 0x00ffffff /* Max 16MB RAM */ + +/* Space between 0x01000000 and 0x1fffffff is reserved */ + +#define UVAX2_Q22_IO_START 0x20000000 +#define UVAX2_Q22_IO_END 0x20001fff + +/* Space between 0x20002000 and 0x2003ffff is reserved */ + +#define UVAX2_ROM_START 0x20040000 +#define UVAX2_ROM_END 0x2007ffff + +#define UVAX2_LOCAL_REG_START 0x20080000 +#define UVAX2_LOCAL_REG_END 0x200bffff + +/* Space between 0x200c0000 and 0x2fffffff is reserved */ + +#define UVAX2_Q22_MEM_START 0x30000000 +#define UVAX2_Q22_MEM_END 0x303fffff + +/* Space between 0x30400000 and 0x3fffffff is reserved */ + + +/*****************************************************************************/ +/* Definitions for CVAX family CPUs */ + +/* System ID register fields */ + +#define CVAX_SID_UCODE_REV_MASK 0x000000ff +#define CVAX_SID_UCODE_REV_SHIFT 0 +#define CVAX_SID_UCODE_REV_SIZE 8 + + +/* CVAX stores additional ID info in the System ID Extension + at this physical address in ROM. */ +#define CVAX_SIDEX_ADDR 0x20040004 + +/* Fields in SIDEX */ +#define CVAX_SIDEX_TYPE_MASK 0xff000000 +#define CVAX_SIDEX_TYPE_SHIFT 24 +#define CVAX_SIDEX_TYPE_SIZE 8 + +/* Values for SYS_TYPE field in SIDEX */ +#define CVAX_SIDEX_TYPE_Q22 1 +#define CVAX_SIDEX_TYPE_VS3100 4 + +/* Interpretation of the remainder of the SIDEX (bits 23:0) depends + on the type in the 31:24 */ + +/* Fields for Q22-based implementations */ +#define CVAX_Q22_SUBTYPE_MASK 0x0000ff00 +#define CVAX_Q22_SUBTYPE_SHIFT 8 +#define CVAX_Q22_SUBTYPE_SIZE 8 + +#define CVAX_Q22_FW_REV_MASK 0x00ff0000 /* Firmware revision */ +#define CVAX_Q22_FW_REV_SHIFT 16 +#define CVAX_Q22_FW_REV_SIZE 8 + +#define CVAX_Q22_SUBTYPE_KA650 1 +#define CVAX_Q22_SUBTYPE_KA640 2 +#define CVAX_Q22_SUBTYPE_KA655 3 + +/* Fields for early VAXstation 3100 models (VS3100m30) */ + +/* Anyone got a manual for these? */ + +/*****************************************************************************/ +/* Definitions for RIGEL family CPUs */ + +#define RIGEL_SIDEX_ADDR 0x20040004 + +/*****************************************************************************/ +/* Definitions for RIGEL family CPUs */ + +#define MARIAH_SIDEX_ADDR 0x20040004 + +/*****************************************************************************/ +/* stuff for KA4xx series CPU's */ + +/* offsets into prom-space for useful routines */ + +#define KA4_SIDEX 0x20040004 +#define KA4_GETCHAR 0x20040044 +#define KA4_PUTCHAR 0x20040058 + + +/*******************************************************************/ +/* Definitions for SOC family CPUs */ + +/* + * SOC stores additional ID info in the System ID Extension + * at this physical address in ROM. Don't know if this holds + * true for all SOC machines but does seem to work for the KA660. + * + * NOTE: These were just copied from the CVAX (KA640/650/655) above + * so we can't be sure they are right. Can anyone with a tech + * manual confirm these? + * + * SIDEX for VXT2000 is 08040002 + * + */ +#define SOC_SIDEX_ADDR 0x20040004 + +/* Fields in SIDEX */ +#define SOC_SIDEX_TYPE_MASK 0xff000000 +#define SOC_SIDEX_TYPE_SHIFT 24 +#define SOC_SIDEX_TYPE_SIZE 8 + +/* Values for SYS_TYPE field in SIDEX */ +#define SOC_SIDEX_TYPE_Q22 1 +#define SOC_SIDEX_TYPE_KA48 4 /* VS4000/VLC */ +#define SOC_SIDEX_TYPE_VXT 8 /* MVaxII chip system */ + +/* Interpretation of the remainder of the SIDEX (bits 23:0) */ +/* depends on the type in the 31:24 */ + +/* Firmware Revision */ +#define SOC_Q22_FW_REV_MASK 0x00ff0000 +#define SOC_Q22_FW_REV_SHIFT 16 +#define SOC_Q22_FW_REV_SIZE 8 + + +/* Fields for Q22-based implementations */ +#define SOC_Q22_SUBTYPE_MASK 0x0000ff00 +#define SOC_Q22_SUBTYPE_SHIFT 8 +#define SOC_Q22_SUBTYPE_SIZE 8 + +#define SOC_Q22_SUBTYPE_KA660 5 + +/*****************************************************************************/ +/* Definitions for NVAX family CPUs */ + +#define NVAX_SIDEX_ADDR 0x20040004 + +#endif /* _VAX_CPU_H */ diff -Nru a/include/asm-vax/vmb.h b/include/asm-vax/vmb.h --- a/include/asm-vax/vmb.h 1970-01-01 01:00:00 +++ b/include/asm-vax/vmb.h 2002-05-20 02:33:39 @@ -0,0 +1,34 @@ +#ifndef _VAX_VMB_H +#define _VAX_VMB_H +/* + * copyright atp 1998 + * VMB interaction defines + * Argument lists passed from VMB to bootstrap et alia. + * ref: VMS internals and data structures Table 30.23 + * + * Written: atp 11/11/98 + */ + +#include +#include + +struct uvax_vmb_arglist { + u_int nargs; /* number of integers (longwords) in arglist */ + struct dsc_descriptor filecache; /* FILEREAD cache descriptor */ + u_int lo_pfn; /* lowest PFN */ + u_int hi_pfn; /* highest PFN exclusive */ + struct dsc_descriptor pfnmap; /* PFN bitmap descriptor */ + struct dsc_descriptor ucode; /* loaded ucode descriptor */ + u_char systemid[6]; /* SCS system id */ + u_char pad[2]; + u_int flags; /* flags LOAD_SCS, V_TAPE etc. */ + u_int ci_hipfn; /* highest pfn used by CI */ + u64 nodename; /* booting nodename */ + u64 hostaddr; /* host address */ + u64 hostname; /* hostname */ + u64 tod; /* time of day in MOP format */ + u_int xparam; /* extra MOP parameters */ + u_int bvp_pgtbl; /* address of port pagetable */ +}; + +#endif /* _VAX_VMB_H */ diff -Nru a/include/linux/elf.h b/include/linux/elf.h --- a/include/linux/elf.h 2005-06-17 21:48:29 +++ b/include/linux/elf.h 2005-04-06 00:54:39 @@ -85,6 +85,8 @@ #define EM_S390 22 /* IBM S/390 */ +#define EM_VAX 75 /* Digital Equipment Corp VAX - used to be 780 */ + #define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ #define EM_V850 87 /* NEC v850 */ diff -Nru a/include/linux/netfilter_ipv4/ipt_MARK.h b/include/linux/netfilter_ipv4/ipt_MARK.h --- a/include/linux/netfilter_ipv4/ipt_MARK.h 2005-06-17 21:48:29 +++ b/include/linux/netfilter_ipv4/ipt_MARK.h 2002-04-24 10:44:38 @@ -1,20 +1,9 @@ -#ifndef _IPT_MARK_H_target -#define _IPT_MARK_H_target +#ifndef _IPT_MARK_H +#define _IPT_MARK_H -/* Version 0 */ -struct ipt_mark_target_info { - unsigned long mark; +struct ipt_mark_info { + unsigned long mark, mask; + u_int8_t invert; }; -/* Version 1 */ -enum { - IPT_MARK_SET=0, - IPT_MARK_AND, - IPT_MARK_OR -}; - -struct ipt_mark_target_info_v1 { - unsigned long mark; - u_int8_t mode; -}; -#endif /*_IPT_MARK_H_target*/ +#endif /*_IPT_MARK_H*/ diff -Nru a/include/linux/netfilter_ipv4/ipt_TOS.h b/include/linux/netfilter_ipv4/ipt_TOS.h --- a/include/linux/netfilter_ipv4/ipt_TOS.h 2005-06-17 21:48:29 +++ b/include/linux/netfilter_ipv4/ipt_TOS.h 2002-04-24 10:44:38 @@ -1,12 +1,13 @@ -#ifndef _IPT_TOS_H_target -#define _IPT_TOS_H_target +#ifndef _IPT_TOS_H +#define _IPT_TOS_H +struct ipt_tos_info { + u_int8_t tos; + u_int8_t invert; +}; + #ifndef IPTOS_NORMALSVC #define IPTOS_NORMALSVC 0 #endif -struct ipt_tos_target_info { - u_int8_t tos; -}; - -#endif /*_IPT_TOS_H_target*/ +#endif /*_IPT_TOS_H*/ diff -Nru a/include/linux/netfilter_ipv4/ipt_mark.h b/include/linux/netfilter_ipv4/ipt_mark.h --- a/include/linux/netfilter_ipv4/ipt_mark.h 2005-06-17 21:48:29 +++ b/include/linux/netfilter_ipv4/ipt_mark.h 2002-04-24 10:44:38 @@ -1,20 +1,9 @@ -#ifndef _IPT_MARK_H_target -#define _IPT_MARK_H_target +#ifndef _IPT_MARK_H +#define _IPT_MARK_H -/* Version 0 */ -struct ipt_mark_target_info { - unsigned long mark; +struct ipt_mark_info { + unsigned long mark, mask; + u_int8_t invert; }; -/* Version 1 */ -enum { - IPT_MARK_SET=0, - IPT_MARK_AND, - IPT_MARK_OR -}; - -struct ipt_mark_target_info_v1 { - unsigned long mark; - u_int8_t mode; -}; -#endif /*_IPT_MARK_H_target*/ +#endif /*_IPT_MARK_H*/ diff -Nru a/include/linux/netfilter_ipv4/ipt_tos.h b/include/linux/netfilter_ipv4/ipt_tos.h --- a/include/linux/netfilter_ipv4/ipt_tos.h 2005-06-17 21:48:29 +++ b/include/linux/netfilter_ipv4/ipt_tos.h 2002-04-24 10:44:38 @@ -1,12 +1,13 @@ -#ifndef _IPT_TOS_H_target -#define _IPT_TOS_H_target +#ifndef _IPT_TOS_H +#define _IPT_TOS_H +struct ipt_tos_info { + u_int8_t tos; + u_int8_t invert; +}; + #ifndef IPTOS_NORMALSVC #define IPTOS_NORMALSVC 0 #endif -struct ipt_tos_target_info { - u_int8_t tos; -}; - -#endif /*_IPT_TOS_H_target*/ +#endif /*_IPT_TOS_H*/ diff -Nru a/include/linux/netfilter_ipv6/ip6t_MARK.h b/include/linux/netfilter_ipv6/ip6t_MARK.h --- a/include/linux/netfilter_ipv6/ip6t_MARK.h 2005-06-17 21:48:29 +++ b/include/linux/netfilter_ipv6/ip6t_MARK.h 2002-04-24 10:44:38 @@ -1,8 +1,9 @@ -#ifndef _IP6T_MARK_H_target -#define _IP6T_MARK_H_target +#ifndef _IP6T_MARK_H +#define _IP6T_MARK_H -struct ip6t_mark_target_info { - unsigned long mark; +struct ip6t_mark_info { + unsigned long mark, mask; + u_int8_t invert; }; -#endif /*_IPT_MARK_H_target*/ +#endif /*_IPT_MARK_H*/ diff -Nru a/include/linux/netfilter_ipv6/ip6t_mark.h b/include/linux/netfilter_ipv6/ip6t_mark.h --- a/include/linux/netfilter_ipv6/ip6t_mark.h 2005-06-17 21:48:29 +++ b/include/linux/netfilter_ipv6/ip6t_mark.h 2002-04-24 10:44:38 @@ -1,8 +1,9 @@ -#ifndef _IP6T_MARK_H_target -#define _IP6T_MARK_H_target +#ifndef _IP6T_MARK_H +#define _IP6T_MARK_H -struct ip6t_mark_target_info { - unsigned long mark; +struct ip6t_mark_info { + unsigned long mark, mask; + u_int8_t invert; }; -#endif /*_IPT_MARK_H_target*/ +#endif /*_IPT_MARK_H*/ diff -Nru a/init/Kconfig b/init/Kconfig --- a/init/Kconfig 2005-06-17 21:48:29 +++ b/init/Kconfig 2005-07-24 23:36:48 @@ -50,6 +50,17 @@ depends on BROKEN || !SMP default y +config ARCH_API_TEST + tristate "Compile tests for core<->arch API" + default n + help + Select this option if you want to compile a test suite + for the API exposed by the arch-dependent code to the + arch-independent portions of the kernel. If compiled + into the kernel, this test suite will be run early in + the boot sequence. If compiled as a module, the test + suite will be run when the module is loaded. + config LOCK_KERNEL bool depends on SMP || PREEMPT diff -Nru a/init/Makefile b/init/Makefile --- a/init/Makefile 2005-06-17 21:48:29 +++ b/init/Makefile 2005-03-28 01:46:45 @@ -11,6 +11,8 @@ mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o +obj-$(CONFIG_ARCH_API_TEST) += archtest.o + # files to be removed upon make clean clean-files := ../include/linux/compile.h diff -Nru a/init/archtest.c b/init/archtest.c --- a/init/archtest.c 1970-01-01 01:00:00 +++ b/init/archtest.c 2005-03-21 19:58:06 @@ -0,0 +1,301 @@ +/* + * This code tests various, easily-testable parts of the "API" + * implemented by the arch-dependent code. + * You can run the tests on a live system by compiling it as + * a loadable module and loading it. + * If you compile this module into the kernel, the tests will be + * run shortly after computing the BogoMIPS value. + */ + +#include +#include +#include + +#include +#include + +/* + * TODO: + * o Fire off kernel threads to test atomicity of atomic operations. + * o Test operations exported by + * o Test more operations exported by + */ + +static int verbose; + +module_param(verbose, int, 0444); +MODULE_PARM_DESC(verbose, "Run tests verbosely (i.e. print PASS as well as FAIL)"); + +#define TEST(expr) do { \ + if (!(expr)) { \ + failed++; \ + printk("archtest:%d: FAIL %s\n", __LINE__, #expr); \ + } else if (verbose) { \ + printk("archtest:%d: PASS %s\n", __LINE__, #expr); \ + } \ +} while (0) + +#define TEST_(sign, type, format, expr, v) do { \ + sign type actual = (expr); \ + sign type expected = (v); \ + if (actual != expected) { \ + failed++; \ + printk("archtest:%d: FAIL %s == %s (was %" format "/0x%x)\n", __LINE__, #expr, #v, actual, actual); \ + } else if (verbose) { \ + printk("archtest:%d: PASS %s == %s\n", __LINE__, #expr, #v); \ + } \ +} while (0) + +#define TEST_USHORT(expr, v) TEST_(unsigned, short, "u", (expr), (v)) + +#define TEST_UINT(expr, v) TEST_(unsigned, int, "u", (expr), (v)) + +#define TEST_INT(expr, v) TEST_(signed, int, "d", (expr), (v)) + +static int failed; + +static void test_csum(void) +{ + unsigned char data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + unsigned char data2[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; + + + TEST_USHORT(ip_compute_csum(data, sizeof(data)), 0xe6eb); + TEST_USHORT(ip_compute_csum(data, 1), 0xffff); + TEST_USHORT(ip_compute_csum(data, 2), 0xfeff); + TEST_USHORT(ip_compute_csum(data, 3), 0xfefd); + TEST_USHORT(ip_compute_csum(data, 4), 0xfbfd); + + /* Test that alignment doesn't matter */ + TEST_USHORT(ip_compute_csum(data + 1, 1), 0xfffe); + TEST_USHORT(ip_compute_csum(data + 1, 2), 0xfdfe); + TEST_USHORT(ip_compute_csum(data + 1, 3), 0xfdfb); + TEST_USHORT(ip_compute_csum(data + 1, 4), 0xf9fb); + TEST_USHORT(ip_compute_csum(data2, 1), 0xfffe); + TEST_USHORT(ip_compute_csum(data2, 2), 0xfdfe); + TEST_USHORT(ip_compute_csum(data2, 3), 0xfdfb); + TEST_USHORT(ip_compute_csum(data2, 4), 0xf9fb); + + TEST_USHORT(ip_compute_csum(data+1, sizeof(data)-1), 0xebe6); + TEST_USHORT(ip_compute_csum(data+2, sizeof(data)-2), 0xe7eb); + TEST_USHORT(ip_compute_csum(data+3, sizeof(data)-3), 0xede7); + TEST_USHORT(ip_compute_csum(data+4, sizeof(data)-4), 0xeaed); + + /* Test with non-zero initial sum */ + TEST_USHORT(csum_fold(csum_partial(data, sizeof(data), 0x12345678)), 0x7e3f); + TEST_USHORT(csum_fold(csum_partial(data+1, sizeof(data)-1, 0x12345678)), 0x833a); + + /* TODO: test csum_fold */ + /* TODO: test csum_tcpudp_nofold */ + /* TODO: test csum_tcpudp_magic */ + + /* TODO: test ip_fast_csum */ +} + +static void test_div64(void) +{ + uint64_t n; + + n = 20; + TEST_UINT(do_div(n,6), 2); + TEST_UINT(n, 3); + + n = 20; + TEST_UINT(do_div(n,4), 0); + TEST_UINT(n, 5); + + n = 1234567890123ULL; + TEST_UINT(do_div(n,1000), 123); + TEST_UINT(n, 1234567890); +} + +static void test_swab(void) +{ + unsigned int x; + + x = 0x1234; + TEST_UINT(swab16(x), 0x3412); + + x = 0x12345678; + TEST_UINT(swab32(x), 0x78563412); + + TEST_UINT(swab16(0x1234), 0x3412); + TEST_UINT(swab32(0x12345678), 0x78563412); +} + + +static void test_atomic_t(void) +{ + atomic_t a = ATOMIC_INIT(5); + + TEST_INT(atomic_read(&a), 5); + + atomic_set(&a, 10); + TEST_INT(atomic_read(&a), 10); + + atomic_add(3, &a); + TEST_INT(atomic_read(&a), 13); + + atomic_sub(5, &a); + TEST_INT(atomic_read(&a), 8); + + TEST(!atomic_sub_and_test(5, &a)); + TEST(atomic_sub_and_test(3, &a)); + + atomic_set(&a, 2); + atomic_inc(&a); + TEST_INT(atomic_read(&a), 3); + + atomic_dec(&a); + TEST_INT(atomic_read(&a), 2); + + TEST(!atomic_dec_and_test(&a)); + TEST(atomic_dec_and_test(&a)); + TEST(!atomic_dec_and_test(&a)); + TEST(!atomic_dec_and_test(&a)); + + /* a is now -2 */ + + TEST(!atomic_inc_and_test(&a)); + TEST(atomic_inc_and_test(&a)); + TEST(!atomic_inc_and_test(&a)); + + atomic_set(&a, -5); + TEST(atomic_add_negative(4, &a)); + + atomic_set(&a, -5); + TEST(!atomic_add_negative(5, &a)); + + atomic_set(&a, -5); + TEST(!atomic_add_negative(6, &a)); + + atomic_set(&a, 0); + atomic_clear_mask(0x123456, &a); + TEST_INT(atomic_read(&a), 0); + + atomic_set(&a, 0x123456); + atomic_clear_mask(0x123456, &a); + TEST_INT(atomic_read(&a), 0); + + atomic_set(&a, 0xffffff); + atomic_clear_mask(0x123456, &a); + TEST_INT(atomic_read(&a), 0xedcba9); + + atomic_set(&a, 0); + atomic_set_mask(0x123456, &a); + TEST_INT(atomic_read(&a), 0x123456); + + atomic_set(&a, 0x123456); + atomic_set_mask(0x123456, &a); + TEST_INT(atomic_read(&a), 0x123456); + + atomic_set(&a, 0xffffff); + atomic_set_mask(0x123456, &a); + TEST_INT(atomic_read(&a), 0xffffff); +} + +static void test_find_next_bit(void) +{ + long mask[] = { 0, 0, 0, 0 }; + int size = sizeof(mask) * 8; + + TEST_UINT(find_next_bit(mask, size, 0), size); + TEST_UINT(find_next_bit(mask, size, 1), size); + TEST_UINT(find_next_bit(mask, size, 2), size); + TEST_UINT(find_next_bit(mask, size, 7), size); + TEST_UINT(find_next_bit(mask, size, 8), size); + + mask[0] = 0x7e; + TEST_UINT(find_next_bit(mask, size, 0), 1); + TEST_UINT(find_next_bit(mask, size, 1), 1); + TEST_UINT(find_next_bit(mask, size, 2), 2); + TEST_UINT(find_next_bit(mask, size, 6), 6); + TEST_UINT(find_next_bit(mask, size, 7), size); + TEST_UINT(find_next_bit(mask, size, 8), size); + + mask[0] = 0xff; + TEST_UINT(find_next_bit(mask, size, 0), 0); + TEST_UINT(find_next_bit(mask, size, 1), 1); + TEST_UINT(find_next_bit(mask, size, 2), 2); + TEST_UINT(find_next_bit(mask, size, 7), 7); + TEST_UINT(find_next_bit(mask, size, 8), size); + + mask[0] = 0xfe00; + TEST_UINT(find_next_bit(mask, size, 0), 9); + TEST_UINT(find_next_bit(mask, size, 7), 9); + TEST_UINT(find_next_bit(mask, size, 8), 9); + TEST_UINT(find_next_bit(mask, size, 9), 9); + TEST_UINT(find_next_bit(mask, size, 10), 10); + TEST_UINT(find_next_bit(mask, size, 11), 11); + TEST_UINT(find_next_bit(mask, size, 15), 15); + TEST_UINT(find_next_bit(mask, size, 16), size); + + mask[0] = 0; + mask[1] = 0xfe; + TEST_UINT(find_next_bit(mask, size, 0), 33); + TEST_UINT(find_next_bit(mask, size, 1), 33); + TEST_UINT(find_next_bit(mask, size, 31), 33); + TEST_UINT(find_next_bit(mask, size, 32), 33); + TEST_UINT(find_next_bit(mask, size, 33), 33); + TEST_UINT(find_next_bit(mask, size, 34), 34); + TEST_UINT(find_next_bit(mask, size, 39), 39); + TEST_UINT(find_next_bit(mask, size, 40), size); + + TEST_UINT(find_next_bit(mask, size, size-1), size); +} + +static void test_ffs(void) +{ + /* ffs() counts LSB as bit 1 */ + TEST_UINT(ffs(0), 0); + TEST_UINT(ffs(1), 1); + TEST_UINT(ffs(2), 2); + TEST_UINT(ffs(3), 1); + TEST_UINT(ffs(0x80000000), 32); + + /* __ffs() counts LSB as bit 0 */ + /* __ffs(0) is undefined */ + TEST_UINT(__ffs(1), 0); + TEST_UINT(__ffs(2), 1); + TEST_UINT(__ffs(3), 0); + TEST_UINT(__ffs(0x80000000), 31); +} + +void do_tests(void) +{ + test_csum(); + test_div64(); + test_swab(); + test_atomic_t(); + test_find_next_bit(); + test_ffs(); +} + +static int __init archtest_init(void) +{ + do_tests(); + + if (failed) { + printk("archtest: %d test(s) failed\n", failed); + } else { + printk("archtest: all tests passed\n"); + } + return 0; +} + +static void __exit archtest_exit(void) +{ +} + + +/* + * We use core_initcall() so that we are called as early as possible + * during boot if we are compiled in. If we are compiled as a module, + * core_initcall() gets interpreted same as module_init() + */ +core_initcall(archtest_init); +module_exit(archtest_exit); + +MODULE_AUTHOR("Kenn Humborg "); +MODULE_LICENSE("GPL"); + diff -Nru a/lib/radix-tree.c b/lib/radix-tree.c --- a/lib/radix-tree.c 2005-06-17 21:48:29 +++ b/lib/radix-tree.c 2005-03-22 00:44:18 @@ -485,8 +485,8 @@ for ( ; i < RADIX_TREE_MAP_SIZE; i++) { if (slot->slots[i] != NULL) break; - index &= ~((1UL << shift) - 1); - index += 1UL << shift; + index &= ~((1 << shift) - 1); + index += 1 << shift; if (index == 0) goto out; /* 32-bit wraparound */ } @@ -575,8 +575,8 @@ BUG_ON(slot->slots[i] == NULL); break; } - index &= ~((1UL << shift) - 1); - index += 1UL << shift; + index &= ~((1 << shift) - 1); + index += 1 << shift; if (index == 0) goto out; /* 32-bit wraparound */ } @@ -760,10 +760,16 @@ static __init unsigned long __maxindex(unsigned int height) { unsigned int tmp = height * RADIX_TREE_MAP_SHIFT; - unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1; + unsigned long index; - if (tmp >= RADIX_TREE_INDEX_BITS) + /* This is different from the stock kernel, since our GCC emits + code which throws a reserved operand fault when height == 6. + FIXME: we need to fix our compiler :-( */ + if (tmp >= RADIX_TREE_INDEX_BITS) { index = ~0UL; + } else { + index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1; + } return index; } diff -Nru a/net/ipv4/netfilter/ipt_CONNMARK.c b/net/ipv4/netfilter/ipt_CONNMARK.c --- a/net/ipv4/netfilter/ipt_CONNMARK.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_CONNMARK.c 2005-03-21 21:01:20 @@ -1,5 +1,5 @@ -/* This kernel module is used to modify the connection mark values, or - * to optionally restore the skb nfmark from the connection mark +/* This kernel module matches connection mark values set by the + * CONNMARK target * * Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom @@ -18,100 +18,63 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + #include #include -#include -#include MODULE_AUTHOR("Henrik Nordstrom "); -MODULE_DESCRIPTION("IP tables CONNMARK matching module"); +MODULE_DESCRIPTION("IP tables connmark match module"); MODULE_LICENSE("GPL"); #include -#include +#include #include -static unsigned int -target(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) +static int +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const void *matchinfo, + int offset, + int *hotdrop) { - const struct ipt_connmark_target_info *markinfo = targinfo; - unsigned long diff; - unsigned long nfmark; - unsigned long newmark; - + const struct ipt_connmark_info *info = matchinfo; enum ip_conntrack_info ctinfo; - struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo); - if (ct) { - switch(markinfo->mode) { - case IPT_CONNMARK_SET: - newmark = (ct->mark & ~markinfo->mask) | markinfo->mark; - if (newmark != ct->mark) - ct->mark = newmark; - break; - case IPT_CONNMARK_SAVE: - newmark = (ct->mark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask); - if (ct->mark != newmark) - ct->mark = newmark; - break; - case IPT_CONNMARK_RESTORE: - nfmark = (*pskb)->nfmark; - diff = (ct->mark ^ nfmark) & markinfo->mask; - if (diff != 0) { - (*pskb)->nfmark = nfmark ^ diff; - (*pskb)->nfcache |= NFC_ALTERED; - } - break; - } - } + struct ip_conntrack *ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo); + if (!ct) + return 0; - return IPT_CONTINUE; + return ((ct->mark & info->mask) == info->mark) ^ info->invert; } static int checkentry(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, + const struct ipt_ip *ip, + void *matchinfo, + unsigned int matchsize, unsigned int hook_mask) { - struct ipt_connmark_target_info *matchinfo = targinfo; - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_connmark_target_info))) { - printk(KERN_WARNING "CONNMARK: targinfosize %u != %Zu\n", - targinfosize, - IPT_ALIGN(sizeof(struct ipt_connmark_target_info))); + if (matchsize != IPT_ALIGN(sizeof(struct ipt_connmark_info))) return 0; - } - if (matchinfo->mode == IPT_CONNMARK_RESTORE) { - if (strcmp(tablename, "mangle") != 0) { - printk(KERN_WARNING "CONNMARK: restore can only be called from \"mangle\" table, not \"%s\"\n", tablename); - return 0; - } - } - return 1; } -static struct ipt_target ipt_connmark_reg = { - .name = "CONNMARK", - .target = &target, +static struct ipt_match connmark_match = { + .name = "connmark", + .match = &match, .checkentry = &checkentry, .me = THIS_MODULE }; static int __init init(void) { - return ipt_register_target(&ipt_connmark_reg); + return ipt_register_match(&connmark_match); } static void __exit fini(void) { - ipt_unregister_target(&ipt_connmark_reg); + ipt_unregister_match(&connmark_match); } module_init(init); diff -Nru a/net/ipv4/netfilter/ipt_MARK.c b/net/ipv4/netfilter/ipt_MARK.c --- a/net/ipv4/netfilter/ipt_MARK.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_MARK.c 2004-02-23 23:50:10 @@ -1,4 +1,4 @@ -/* This is a module which is used for setting the NFMARK field of an skb. */ +/* Kernel module to match NFMARK values. */ /* (C) 1999-2001 Marc Boucher * @@ -9,153 +9,55 @@ #include #include -#include -#include +#include #include -#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marc Boucher "); -MODULE_DESCRIPTION("iptables MARK modification module"); +MODULE_DESCRIPTION("iptables mark matching module"); -static unsigned int -target_v0(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) -{ - const struct ipt_mark_target_info *markinfo = targinfo; - - if((*pskb)->nfmark != markinfo->mark) { - (*pskb)->nfmark = markinfo->mark; - (*pskb)->nfcache |= NFC_ALTERED; - } - return IPT_CONTINUE; -} - -static unsigned int -target_v1(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) -{ - const struct ipt_mark_target_info_v1 *markinfo = targinfo; - int mark = 0; - - switch (markinfo->mode) { - case IPT_MARK_SET: - mark = markinfo->mark; - break; - - case IPT_MARK_AND: - mark = (*pskb)->nfmark & markinfo->mark; - break; - - case IPT_MARK_OR: - mark = (*pskb)->nfmark | markinfo->mark; - break; - } - - if((*pskb)->nfmark != mark) { - (*pskb)->nfmark = mark; - (*pskb)->nfcache |= NFC_ALTERED; - } - return IPT_CONTINUE; -} - - static int -checkentry_v0(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, - unsigned int hook_mask) +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const void *matchinfo, + int offset, + int *hotdrop) { - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info))) { - printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n", - targinfosize, - IPT_ALIGN(sizeof(struct ipt_mark_target_info))); - return 0; - } + const struct ipt_mark_info *info = matchinfo; - if (strcmp(tablename, "mangle") != 0) { - printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename); - return 0; - } - - return 1; + return ((skb->nfmark & info->mask) == info->mark) ^ info->invert; } static int -checkentry_v1(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, - unsigned int hook_mask) +checkentry(const char *tablename, + const struct ipt_ip *ip, + void *matchinfo, + unsigned int matchsize, + unsigned int hook_mask) { - struct ipt_mark_target_info_v1 *markinfo = targinfo; - - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1))){ - printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n", - targinfosize, - IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1))); + if (matchsize != IPT_ALIGN(sizeof(struct ipt_mark_info))) return 0; - } - if (strcmp(tablename, "mangle") != 0) { - printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename); - return 0; - } - - if (markinfo->mode != IPT_MARK_SET - && markinfo->mode != IPT_MARK_AND - && markinfo->mode != IPT_MARK_OR) { - printk(KERN_WARNING "MARK: unknown mode %u\n", - markinfo->mode); - return 0; - } - return 1; } -static struct ipt_target ipt_mark_reg_v0 = { - .name = "MARK", - .target = target_v0, - .checkentry = checkentry_v0, +static struct ipt_match mark_match = { + .name = "mark", + .match = &match, + .checkentry = &checkentry, .me = THIS_MODULE, - .revision = 0, }; -static struct ipt_target ipt_mark_reg_v1 = { - .name = "MARK", - .target = target_v1, - .checkentry = checkentry_v1, - .me = THIS_MODULE, - .revision = 1, -}; - static int __init init(void) { - int err; - - err = ipt_register_target(&ipt_mark_reg_v0); - if (!err) { - err = ipt_register_target(&ipt_mark_reg_v1); - if (err) - ipt_unregister_target(&ipt_mark_reg_v0); - } - return err; + return ipt_register_match(&mark_match); } static void __exit fini(void) { - ipt_unregister_target(&ipt_mark_reg_v0); - ipt_unregister_target(&ipt_mark_reg_v1); + ipt_unregister_match(&mark_match); } module_init(init); diff -Nru a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c --- a/net/ipv4/netfilter/ipt_TCPMSS.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_TCPMSS.c 2005-03-21 21:01:20 @@ -1,8 +1,7 @@ -/* - * This is a module which is used for setting the MSS option in TCP packets. +/* Kernel module to match TCP MSS values. */ + +/* Copyright (C) 2000 Marc Boucher * - * Copyright (C) 2000 Marc Boucher - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -10,252 +9,118 @@ #include #include - -#include #include +#include #include -#include +#define TH_SYN 0x02 + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marc Boucher "); -MODULE_DESCRIPTION("iptables TCP MSS modification module"); +MODULE_DESCRIPTION("iptables TCP MSS match module"); -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -static u_int16_t -cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck) +/* Returns 1 if the mss option is set and matched by the range, 0 otherwise */ +static inline int +mssoption_match(u_int16_t min, u_int16_t max, + const struct sk_buff *skb, + int invert, + int *hotdrop) { - u_int32_t diffs[] = { oldvalinv, newval }; - return csum_fold(csum_partial((char *)diffs, sizeof(diffs), - oldcheck^0xFFFF)); -} + struct tcphdr _tcph, *th; + /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */ + u8 _opt[15 * 4 - sizeof(_tcph)], *op; + unsigned int i, optlen; -static inline unsigned int -optlen(const u_int8_t *opt, unsigned int offset) -{ - /* Beware zero-length options: make finite progress */ - if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0) return 1; - else return opt[offset+1]; -} + /* If we don't have the whole header, drop packet. */ + th = skb_header_pointer(skb, skb->nh.iph->ihl * 4, + sizeof(_tcph), &_tcph); + if (th == NULL) + goto dropit; -static unsigned int -ipt_tcpmss_target(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) -{ - const struct ipt_tcpmss_info *tcpmssinfo = targinfo; - struct tcphdr *tcph; - struct iphdr *iph; - u_int16_t tcplen, newtotlen, oldval, newmss; - unsigned int i; - u_int8_t *opt; + /* Malformed. */ + if (th->doff*4 < sizeof(*th)) + goto dropit; - if (!skb_ip_make_writable(pskb, (*pskb)->len)) - return NF_DROP; + optlen = th->doff*4 - sizeof(*th); + if (!optlen) + goto out; - iph = (*pskb)->nh.iph; - tcplen = (*pskb)->len - iph->ihl*4; + /* Truncated options. */ + op = skb_header_pointer(skb, skb->nh.iph->ihl * 4 + sizeof(*th), + optlen, _opt); + if (op == NULL) + goto dropit; - tcph = (void *)iph + iph->ihl*4; + for (i = 0; i < optlen; ) { + if (op[i] == TCPOPT_MSS + && (optlen - i) >= TCPOLEN_MSS + && op[i+1] == TCPOLEN_MSS) { + u_int16_t mssval; - /* Since it passed flags test in tcp match, we know it is is - not a fragment, and has data >= tcp header length. SYN - packets should not contain data: if they did, then we risk - running over MTU, sending Frag Needed and breaking things - badly. --RR */ - if (tcplen != tcph->doff*4) { - if (net_ratelimit()) - printk(KERN_ERR - "ipt_tcpmss_target: bad length (%d bytes)\n", - (*pskb)->len); - return NF_DROP; - } - - if(tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) { - if(!(*pskb)->dst) { - if (net_ratelimit()) - printk(KERN_ERR - "ipt_tcpmss_target: no dst?! can't determine path-MTU\n"); - return NF_DROP; /* or IPT_CONTINUE ?? */ + mssval = (op[i+2] << 8) | op[i+3]; + + return (mssval >= min && mssval <= max) ^ invert; } - - if(dst_mtu((*pskb)->dst) <= (sizeof(struct iphdr) + sizeof(struct tcphdr))) { - if (net_ratelimit()) - printk(KERN_ERR - "ipt_tcpmss_target: unknown or invalid path-MTU (%d)\n", dst_mtu((*pskb)->dst)); - return NF_DROP; /* or IPT_CONTINUE ?? */ - } - - newmss = dst_mtu((*pskb)->dst) - sizeof(struct iphdr) - sizeof(struct tcphdr); - } else - newmss = tcpmssinfo->mss; - - opt = (u_int8_t *)tcph; - for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)){ - if ((opt[i] == TCPOPT_MSS) && - ((tcph->doff*4 - i) >= TCPOLEN_MSS) && - (opt[i+1] == TCPOLEN_MSS)) { - u_int16_t oldmss; - - oldmss = (opt[i+2] << 8) | opt[i+3]; - - if((tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) && - (oldmss <= newmss)) - return IPT_CONTINUE; - - opt[i+2] = (newmss & 0xff00) >> 8; - opt[i+3] = (newmss & 0x00ff); - - tcph->check = cheat_check(htons(oldmss)^0xFFFF, - htons(newmss), - tcph->check); - - DEBUGP(KERN_INFO "ipt_tcpmss_target: %u.%u.%u.%u:%hu" - "->%u.%u.%u.%u:%hu changed TCP MSS option" - " (from %u to %u)\n", - NIPQUAD((*pskb)->nh.iph->saddr), - ntohs(tcph->source), - NIPQUAD((*pskb)->nh.iph->daddr), - ntohs(tcph->dest), - oldmss, newmss); - goto retmodified; - } + if (op[i] < 2) i++; + else i += op[i+1]?:1; } +out: + return invert; - /* - * MSS Option not found ?! add it.. - */ - if (skb_tailroom((*pskb)) < TCPOLEN_MSS) { - struct sk_buff *newskb; - - newskb = skb_copy_expand(*pskb, skb_headroom(*pskb), - TCPOLEN_MSS, GFP_ATOMIC); - if (!newskb) { - if (net_ratelimit()) - printk(KERN_ERR "ipt_tcpmss_target:" - " unable to allocate larger skb\n"); - return NF_DROP; - } - - kfree_skb(*pskb); - *pskb = newskb; - iph = (*pskb)->nh.iph; - tcph = (void *)iph + iph->ihl*4; - } - - skb_put((*pskb), TCPOLEN_MSS); - - opt = (u_int8_t *)tcph + sizeof(struct tcphdr); - memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); - - tcph->check = cheat_check(htons(tcplen) ^ 0xFFFF, - htons(tcplen + TCPOLEN_MSS), tcph->check); - tcplen += TCPOLEN_MSS; - - opt[0] = TCPOPT_MSS; - opt[1] = TCPOLEN_MSS; - opt[2] = (newmss & 0xff00) >> 8; - opt[3] = (newmss & 0x00ff); - - tcph->check = cheat_check(~0, *((u_int32_t *)opt), tcph->check); - - oldval = ((u_int16_t *)tcph)[6]; - tcph->doff += TCPOLEN_MSS/4; - tcph->check = cheat_check(oldval ^ 0xFFFF, - ((u_int16_t *)tcph)[6], tcph->check); - - newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS); - iph->check = cheat_check(iph->tot_len ^ 0xFFFF, - newtotlen, iph->check); - iph->tot_len = newtotlen; - - DEBUGP(KERN_INFO "ipt_tcpmss_target: %u.%u.%u.%u:%hu" - "->%u.%u.%u.%u:%hu added TCP MSS option (%u)\n", - NIPQUAD((*pskb)->nh.iph->saddr), - ntohs(tcph->source), - NIPQUAD((*pskb)->nh.iph->daddr), - ntohs(tcph->dest), - newmss); - - retmodified: - /* We never hw checksum SYN packets. */ - BUG_ON((*pskb)->ip_summed == CHECKSUM_HW); - - (*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED; - return IPT_CONTINUE; + dropit: + *hotdrop = 1; + return 0; } -#define TH_SYN 0x02 - -static inline int find_syn_match(const struct ipt_entry_match *m) +static int +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const void *matchinfo, + int offset, + int *hotdrop) { - const struct ipt_tcp *tcpinfo = (const struct ipt_tcp *)m->data; + const struct ipt_tcpmss_match_info *info = matchinfo; - if (strcmp(m->u.kernel.match->name, "tcp") == 0 - && (tcpinfo->flg_cmp & TH_SYN) - && !(tcpinfo->invflags & IPT_TCP_INV_FLAGS)) - return 1; - - return 0; + return mssoption_match(info->mss_min, info->mss_max, skb, + info->invert, hotdrop); } -/* Must specify -p tcp --syn/--tcp-flags SYN */ static int -ipt_tcpmss_checkentry(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, - unsigned int hook_mask) +checkentry(const char *tablename, + const struct ipt_ip *ip, + void *matchinfo, + unsigned int matchsize, + unsigned int hook_mask) { - const struct ipt_tcpmss_info *tcpmssinfo = targinfo; - - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_tcpmss_info))) { - DEBUGP("ipt_tcpmss_checkentry: targinfosize %u != %u\n", - targinfosize, IPT_ALIGN(sizeof(struct ipt_tcpmss_info))); + if (matchsize != IPT_ALIGN(sizeof(struct ipt_tcpmss_match_info))) return 0; - } - - if((tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) && - ((hook_mask & ~((1 << NF_IP_FORWARD) - | (1 << NF_IP_LOCAL_OUT) - | (1 << NF_IP_POST_ROUTING))) != 0)) { - printk("TCPMSS: path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n"); + /* Must specify -p tcp */ + if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) { + printk("tcpmss: Only works on TCP packets\n"); return 0; } - if (e->ip.proto == IPPROTO_TCP - && !(e->ip.invflags & IPT_INV_PROTO) - && IPT_MATCH_ITERATE(e, find_syn_match)) - return 1; - - printk("TCPMSS: Only works on TCP SYN packets\n"); - return 0; + return 1; } -static struct ipt_target ipt_tcpmss_reg = { - .name = "TCPMSS", - .target = ipt_tcpmss_target, - .checkentry = ipt_tcpmss_checkentry, +static struct ipt_match tcpmss_match = { + .name = "tcpmss", + .match = &match, + .checkentry = &checkentry, .me = THIS_MODULE, }; static int __init init(void) { - return ipt_register_target(&ipt_tcpmss_reg); + return ipt_register_match(&tcpmss_match); } static void __exit fini(void) { - ipt_unregister_target(&ipt_tcpmss_reg); + ipt_unregister_match(&tcpmss_match); } module_init(init); diff -Nru a/net/ipv4/netfilter/ipt_connmark.c b/net/ipv4/netfilter/ipt_connmark.c --- a/net/ipv4/netfilter/ipt_connmark.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_connmark.c 2005-03-21 21:01:20 @@ -1,5 +1,5 @@ -/* This kernel module is used to modify the connection mark values, or - * to optionally restore the skb nfmark from the connection mark +/* This kernel module matches connection mark values set by the + * CONNMARK target * * Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom @@ -18,100 +18,63 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + #include #include -#include -#include MODULE_AUTHOR("Henrik Nordstrom "); -MODULE_DESCRIPTION("IP tables CONNMARK matching module"); +MODULE_DESCRIPTION("IP tables connmark match module"); MODULE_LICENSE("GPL"); #include -#include +#include #include -static unsigned int -target(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) +static int +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const void *matchinfo, + int offset, + int *hotdrop) { - const struct ipt_connmark_target_info *markinfo = targinfo; - unsigned long diff; - unsigned long nfmark; - unsigned long newmark; - + const struct ipt_connmark_info *info = matchinfo; enum ip_conntrack_info ctinfo; - struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo); - if (ct) { - switch(markinfo->mode) { - case IPT_CONNMARK_SET: - newmark = (ct->mark & ~markinfo->mask) | markinfo->mark; - if (newmark != ct->mark) - ct->mark = newmark; - break; - case IPT_CONNMARK_SAVE: - newmark = (ct->mark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask); - if (ct->mark != newmark) - ct->mark = newmark; - break; - case IPT_CONNMARK_RESTORE: - nfmark = (*pskb)->nfmark; - diff = (ct->mark ^ nfmark) & markinfo->mask; - if (diff != 0) { - (*pskb)->nfmark = nfmark ^ diff; - (*pskb)->nfcache |= NFC_ALTERED; - } - break; - } - } + struct ip_conntrack *ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo); + if (!ct) + return 0; - return IPT_CONTINUE; + return ((ct->mark & info->mask) == info->mark) ^ info->invert; } static int checkentry(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, + const struct ipt_ip *ip, + void *matchinfo, + unsigned int matchsize, unsigned int hook_mask) { - struct ipt_connmark_target_info *matchinfo = targinfo; - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_connmark_target_info))) { - printk(KERN_WARNING "CONNMARK: targinfosize %u != %Zu\n", - targinfosize, - IPT_ALIGN(sizeof(struct ipt_connmark_target_info))); + if (matchsize != IPT_ALIGN(sizeof(struct ipt_connmark_info))) return 0; - } - if (matchinfo->mode == IPT_CONNMARK_RESTORE) { - if (strcmp(tablename, "mangle") != 0) { - printk(KERN_WARNING "CONNMARK: restore can only be called from \"mangle\" table, not \"%s\"\n", tablename); - return 0; - } - } - return 1; } -static struct ipt_target ipt_connmark_reg = { - .name = "CONNMARK", - .target = &target, +static struct ipt_match connmark_match = { + .name = "connmark", + .match = &match, .checkentry = &checkentry, .me = THIS_MODULE }; static int __init init(void) { - return ipt_register_target(&ipt_connmark_reg); + return ipt_register_match(&connmark_match); } static void __exit fini(void) { - ipt_unregister_target(&ipt_connmark_reg); + ipt_unregister_match(&connmark_match); } module_init(init); diff -Nru a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c --- a/net/ipv4/netfilter/ipt_hashlimit.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_hashlimit.c 2005-07-24 22:29:09 @@ -3,7 +3,7 @@ * * (C) 2003-2004 by Harald Welte * - * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $ + * $Id: ipt_hashlimit.c,v 1.1.1.3 2005/07/24 20:29:09 kenn Exp $ * * Development of this code was funded by Astaro AG, http://www.astaro.com/ * diff -Nru a/net/ipv4/netfilter/ipt_mark.c b/net/ipv4/netfilter/ipt_mark.c --- a/net/ipv4/netfilter/ipt_mark.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_mark.c 2004-02-23 23:50:10 @@ -1,4 +1,4 @@ -/* This is a module which is used for setting the NFMARK field of an skb. */ +/* Kernel module to match NFMARK values. */ /* (C) 1999-2001 Marc Boucher * @@ -9,153 +9,55 @@ #include #include -#include -#include +#include #include -#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marc Boucher "); -MODULE_DESCRIPTION("iptables MARK modification module"); +MODULE_DESCRIPTION("iptables mark matching module"); -static unsigned int -target_v0(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) -{ - const struct ipt_mark_target_info *markinfo = targinfo; - - if((*pskb)->nfmark != markinfo->mark) { - (*pskb)->nfmark = markinfo->mark; - (*pskb)->nfcache |= NFC_ALTERED; - } - return IPT_CONTINUE; -} - -static unsigned int -target_v1(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) -{ - const struct ipt_mark_target_info_v1 *markinfo = targinfo; - int mark = 0; - - switch (markinfo->mode) { - case IPT_MARK_SET: - mark = markinfo->mark; - break; - - case IPT_MARK_AND: - mark = (*pskb)->nfmark & markinfo->mark; - break; - - case IPT_MARK_OR: - mark = (*pskb)->nfmark | markinfo->mark; - break; - } - - if((*pskb)->nfmark != mark) { - (*pskb)->nfmark = mark; - (*pskb)->nfcache |= NFC_ALTERED; - } - return IPT_CONTINUE; -} - - static int -checkentry_v0(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, - unsigned int hook_mask) +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const void *matchinfo, + int offset, + int *hotdrop) { - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info))) { - printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n", - targinfosize, - IPT_ALIGN(sizeof(struct ipt_mark_target_info))); - return 0; - } + const struct ipt_mark_info *info = matchinfo; - if (strcmp(tablename, "mangle") != 0) { - printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename); - return 0; - } - - return 1; + return ((skb->nfmark & info->mask) == info->mark) ^ info->invert; } static int -checkentry_v1(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, - unsigned int hook_mask) +checkentry(const char *tablename, + const struct ipt_ip *ip, + void *matchinfo, + unsigned int matchsize, + unsigned int hook_mask) { - struct ipt_mark_target_info_v1 *markinfo = targinfo; - - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1))){ - printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n", - targinfosize, - IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1))); + if (matchsize != IPT_ALIGN(sizeof(struct ipt_mark_info))) return 0; - } - if (strcmp(tablename, "mangle") != 0) { - printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename); - return 0; - } - - if (markinfo->mode != IPT_MARK_SET - && markinfo->mode != IPT_MARK_AND - && markinfo->mode != IPT_MARK_OR) { - printk(KERN_WARNING "MARK: unknown mode %u\n", - markinfo->mode); - return 0; - } - return 1; } -static struct ipt_target ipt_mark_reg_v0 = { - .name = "MARK", - .target = target_v0, - .checkentry = checkentry_v0, +static struct ipt_match mark_match = { + .name = "mark", + .match = &match, + .checkentry = &checkentry, .me = THIS_MODULE, - .revision = 0, }; -static struct ipt_target ipt_mark_reg_v1 = { - .name = "MARK", - .target = target_v1, - .checkentry = checkentry_v1, - .me = THIS_MODULE, - .revision = 1, -}; - static int __init init(void) { - int err; - - err = ipt_register_target(&ipt_mark_reg_v0); - if (!err) { - err = ipt_register_target(&ipt_mark_reg_v1); - if (err) - ipt_unregister_target(&ipt_mark_reg_v0); - } - return err; + return ipt_register_match(&mark_match); } static void __exit fini(void) { - ipt_unregister_target(&ipt_mark_reg_v0); - ipt_unregister_target(&ipt_mark_reg_v1); + ipt_unregister_match(&mark_match); } module_init(init); diff -Nru a/net/ipv4/netfilter/ipt_realm.c b/net/ipv4/netfilter/ipt_realm.c --- a/net/ipv4/netfilter/ipt_realm.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_realm.c 2004-09-02 20:28:05 @@ -1,6 +1,6 @@ /* IP tables module for matching the routing realm * - * $Id: ipt_realm.c,v 1.3 2004/03/05 13:25:40 laforge Exp $ + * $Id: ipt_realm.c,v 1.1.1.1 2004/09/02 18:28:05 kenn Exp $ * * (C) 2003 by Sampsa Ranta * diff -Nru a/net/ipv4/netfilter/ipt_tcpmss.c b/net/ipv4/netfilter/ipt_tcpmss.c --- a/net/ipv4/netfilter/ipt_tcpmss.c 2005-06-17 21:48:29 +++ b/net/ipv4/netfilter/ipt_tcpmss.c 2005-03-21 21:01:20 @@ -1,8 +1,7 @@ -/* - * This is a module which is used for setting the MSS option in TCP packets. +/* Kernel module to match TCP MSS values. */ + +/* Copyright (C) 2000 Marc Boucher * - * Copyright (C) 2000 Marc Boucher - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -10,252 +9,118 @@ #include #include - -#include #include +#include #include -#include +#define TH_SYN 0x02 + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marc Boucher "); -MODULE_DESCRIPTION("iptables TCP MSS modification module"); +MODULE_DESCRIPTION("iptables TCP MSS match module"); -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -static u_int16_t -cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck) +/* Returns 1 if the mss option is set and matched by the range, 0 otherwise */ +static inline int +mssoption_match(u_int16_t min, u_int16_t max, + const struct sk_buff *skb, + int invert, + int *hotdrop) { - u_int32_t diffs[] = { oldvalinv, newval }; - return csum_fold(csum_partial((char *)diffs, sizeof(diffs), - oldcheck^0xFFFF)); -} + struct tcphdr _tcph, *th; + /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */ + u8 _opt[15 * 4 - sizeof(_tcph)], *op; + unsigned int i, optlen; -static inline unsigned int -optlen(const u_int8_t *opt, unsigned int offset) -{ - /* Beware zero-length options: make finite progress */ - if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0) return 1; - else return opt[offset+1]; -} + /* If we don't have the whole header, drop packet. */ + th = skb_header_pointer(skb, skb->nh.iph->ihl * 4, + sizeof(_tcph), &_tcph); + if (th == NULL) + goto dropit; -static unsigned int -ipt_tcpmss_target(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const void *targinfo, - void *userinfo) -{ - const struct ipt_tcpmss_info *tcpmssinfo = targinfo; - struct tcphdr *tcph; - struct iphdr *iph; - u_int16_t tcplen, newtotlen, oldval, newmss; - unsigned int i; - u_int8_t *opt; + /* Malformed. */ + if (th->doff*4 < sizeof(*th)) + goto dropit; - if (!skb_ip_make_writable(pskb, (*pskb)->len)) - return NF_DROP; + optlen = th->doff*4 - sizeof(*th); + if (!optlen) + goto out; - iph = (*pskb)->nh.iph; - tcplen = (*pskb)->len - iph->ihl*4; + /* Truncated options. */ + op = skb_header_pointer(skb, skb->nh.iph->ihl * 4 + sizeof(*th), + optlen, _opt); + if (op == NULL) + goto dropit; - tcph = (void *)iph + iph->ihl*4; + for (i = 0; i < optlen; ) { + if (op[i] == TCPOPT_MSS + && (optlen - i) >= TCPOLEN_MSS + && op[i+1] == TCPOLEN_MSS) { + u_int16_t mssval; - /* Since it passed flags test in tcp match, we know it is is - not a fragment, and has data >= tcp header length. SYN - packets should not contain data: if they did, then we risk - running over MTU, sending Frag Needed and breaking things - badly. --RR */ - if (tcplen != tcph->doff*4) { - if (net_ratelimit()) - printk(KERN_ERR - "ipt_tcpmss_target: bad length (%d bytes)\n", - (*pskb)->len); - return NF_DROP; - } - - if(tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) { - if(!(*pskb)->dst) { - if (net_ratelimit()) - printk(KERN_ERR - "ipt_tcpmss_target: no dst?! can't determine path-MTU\n"); - return NF_DROP; /* or IPT_CONTINUE ?? */ + mssval = (op[i+2] << 8) | op[i+3]; + + return (mssval >= min && mssval <= max) ^ invert; } - - if(dst_mtu((*pskb)->dst) <= (sizeof(struct iphdr) + sizeof(struct tcphdr))) { - if (net_ratelimit()) - printk(KERN_ERR - "ipt_tcpmss_target: unknown or invalid path-MTU (%d)\n", dst_mtu((*pskb)->dst)); - return NF_DROP; /* or IPT_CONTINUE ?? */ - } - - newmss = dst_mtu((*pskb)->dst) - sizeof(struct iphdr) - sizeof(struct tcphdr); - } else - newmss = tcpmssinfo->mss; - - opt = (u_int8_t *)tcph; - for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)){ - if ((opt[i] == TCPOPT_MSS) && - ((tcph->doff*4 - i) >= TCPOLEN_MSS) && - (opt[i+1] == TCPOLEN_MSS)) { - u_int16_t oldmss; - - oldmss = (opt[i+2] << 8) | opt[i+3]; - - if((tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) && - (oldmss <= newmss)) - return IPT_CONTINUE; - - opt[i+2] = (newmss & 0xff00) >> 8; - opt[i+3] = (newmss & 0x00ff); - - tcph->check = cheat_check(htons(oldmss)^0xFFFF, - htons(newmss), - tcph->check); - - DEBUGP(KERN_INFO "ipt_tcpmss_target: %u.%u.%u.%u:%hu" - "->%u.%u.%u.%u:%hu changed TCP MSS option" - " (from %u to %u)\n", - NIPQUAD((*pskb)->nh.iph->saddr), - ntohs(tcph->source), - NIPQUAD((*pskb)->nh.iph->daddr), - ntohs(tcph->dest), - oldmss, newmss); - goto retmodified; - } + if (op[i] < 2) i++; + else i += op[i+1]?:1; } +out: + return invert; - /* - * MSS Option not found ?! add it.. - */ - if (skb_tailroom((*pskb)) < TCPOLEN_MSS) { - struct sk_buff *newskb; - - newskb = skb_copy_expand(*pskb, skb_headroom(*pskb), - TCPOLEN_MSS, GFP_ATOMIC); - if (!newskb) { - if (net_ratelimit()) - printk(KERN_ERR "ipt_tcpmss_target:" - " unable to allocate larger skb\n"); - return NF_DROP; - } - - kfree_skb(*pskb); - *pskb = newskb; - iph = (*pskb)->nh.iph; - tcph = (void *)iph + iph->ihl*4; - } - - skb_put((*pskb), TCPOLEN_MSS); - - opt = (u_int8_t *)tcph + sizeof(struct tcphdr); - memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); - - tcph->check = cheat_check(htons(tcplen) ^ 0xFFFF, - htons(tcplen + TCPOLEN_MSS), tcph->check); - tcplen += TCPOLEN_MSS; - - opt[0] = TCPOPT_MSS; - opt[1] = TCPOLEN_MSS; - opt[2] = (newmss & 0xff00) >> 8; - opt[3] = (newmss & 0x00ff); - - tcph->check = cheat_check(~0, *((u_int32_t *)opt), tcph->check); - - oldval = ((u_int16_t *)tcph)[6]; - tcph->doff += TCPOLEN_MSS/4; - tcph->check = cheat_check(oldval ^ 0xFFFF, - ((u_int16_t *)tcph)[6], tcph->check); - - newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS); - iph->check = cheat_check(iph->tot_len ^ 0xFFFF, - newtotlen, iph->check); - iph->tot_len = newtotlen; - - DEBUGP(KERN_INFO "ipt_tcpmss_target: %u.%u.%u.%u:%hu" - "->%u.%u.%u.%u:%hu added TCP MSS option (%u)\n", - NIPQUAD((*pskb)->nh.iph->saddr), - ntohs(tcph->source), - NIPQUAD((*pskb)->nh.iph->daddr), - ntohs(tcph->dest), - newmss); - - retmodified: - /* We never hw checksum SYN packets. */ - BUG_ON((*pskb)->ip_summed == CHECKSUM_HW); - - (*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED; - return IPT_CONTINUE; + dropit: + *hotdrop = 1; + return 0; } -#define TH_SYN 0x02 - -static inline int find_syn_match(const struct ipt_entry_match *m) +static int +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const void *matchinfo, + int offset, + int *hotdrop) { - const struct ipt_tcp *tcpinfo = (const struct ipt_tcp *)m->data; + const struct ipt_tcpmss_match_info *info = matchinfo; - if (strcmp(m->u.kernel.match->name, "tcp") == 0 - && (tcpinfo->flg_cmp & TH_SYN) - && !(tcpinfo->invflags & IPT_TCP_INV_FLAGS)) - return 1; - - return 0; + return mssoption_match(info->mss_min, info->mss_max, skb, + info->invert, hotdrop); } -/* Must specify -p tcp --syn/--tcp-flags SYN */ static int -ipt_tcpmss_checkentry(const char *tablename, - const struct ipt_entry *e, - void *targinfo, - unsigned int targinfosize, - unsigned int hook_mask) +checkentry(const char *tablename, + const struct ipt_ip *ip, + void *matchinfo, + unsigned int matchsize, + unsigned int hook_mask) { - const struct ipt_tcpmss_info *tcpmssinfo = targinfo; - - if (targinfosize != IPT_ALIGN(sizeof(struct ipt_tcpmss_info))) { - DEBUGP("ipt_tcpmss_checkentry: targinfosize %u != %u\n", - targinfosize, IPT_ALIGN(sizeof(struct ipt_tcpmss_info))); + if (matchsize != IPT_ALIGN(sizeof(struct ipt_tcpmss_match_info))) return 0; - } - - if((tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) && - ((hook_mask & ~((1 << NF_IP_FORWARD) - | (1 << NF_IP_LOCAL_OUT) - | (1 << NF_IP_POST_ROUTING))) != 0)) { - printk("TCPMSS: path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n"); + /* Must specify -p tcp */ + if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) { + printk("tcpmss: Only works on TCP packets\n"); return 0; } - if (e->ip.proto == IPPROTO_TCP - && !(e->ip.invflags & IPT_INV_PROTO) - && IPT_MATCH_ITERATE(e, find_syn_match)) - return 1; - - printk("TCPMSS: Only works on TCP SYN packets\n"); - return 0; + return 1; } -static struct ipt_target ipt_tcpmss_reg = { - .name = "TCPMSS", - .target = ipt_tcpmss_target, - .checkentry = ipt_tcpmss_checkentry, +static struct ipt_match tcpmss_match = { + .name = "tcpmss", + .match = &match, + .checkentry = &checkentry, .me = THIS_MODULE, }; static int __init init(void) { - return ipt_register_target(&ipt_tcpmss_reg); + return ipt_register_match(&tcpmss_match); } static void __exit fini(void) { - ipt_unregister_target(&ipt_tcpmss_reg); + ipt_unregister_match(&tcpmss_match); } module_init(init); diff -Nru a/scripts/Makefile.build b/scripts/Makefile.build --- a/scripts/Makefile.build 2005-06-17 21:48:29 +++ b/scripts/Makefile.build 2005-03-28 01:46:48 @@ -142,8 +142,11 @@ quiet_cmd_cc_o_c = CC $(quiet_modtag) $@ +# define listing_o_c to get compiler listings from .c -> .o compilations +#listing_o_c = -Wa,-adnhls=$(subst $(comma),_,$(@D)/$(*F)).lst -g + ifndef CONFIG_MODVERSIONS -cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< +cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< $(listing_o_c) else # When module versioning is enabled the following steps are executed: diff -Nru a/scripts/checkstack.pl b/scripts/checkstack.pl --- a/scripts/checkstack.pl 2005-06-17 21:48:29 +++ b/scripts/checkstack.pl 2005-07-24 23:36:49 @@ -12,6 +12,7 @@ # sh64 port by Paul Mundt # Random bits by Matt Mackall # M68k port by Geert Uytterhoeven and Andreas Schwab +# VAX port by Jan-Benedict Glaw # # Usage: # objdump -d vmlinux | stackcheck.pl [arch] @@ -62,6 +63,10 @@ } elsif ($arch eq 'ppc64') { #XXX $re = qr/.*stdu.*r1,-($x{1,8})\(r1\)/o; + } elsif ($arch eq 'vax') { + # 80104ec2: c2 10 5e subl2 $0x10,sp + # 8019d880: 9e ce b0 fe movab 0xfffffeb0(sp),sp + $re = qr/^.*(?:subl2 \$|movab )(0x$x{1,8})(?:,sp|\(sp\),sp)$/o; } elsif ($arch =~ /^s390x?$/) { # 11160: a7 fb ff 60 aghi %r15,-160 $re = qr/.*ag?hi.*\%r15,-(([0-9]{2}|[3-9])[0-9]{2})/o;