diff options
author | Paul Mackerras <paulus@samba.org> | 2008-01-31 11:25:51 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-01-31 11:25:51 +1100 |
commit | bd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch) | |
tree | 5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /include | |
parent | 4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff) | |
parent | 5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff) |
Merge branch 'linux-2.6'
Diffstat (limited to 'include')
914 files changed, 27226 insertions, 24171 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 19c3ead2a90b..fb7171b1bd22 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -168,7 +168,8 @@ struct acpi_device_flags { u32 power_manageable:1; u32 performance_manageable:1; u32 wake_capable:1; /* Wakeup(_PRW) supported? */ - u32 reserved:20; + u32 force_power_state:1; + u32 reserved:19; }; /* File System */ @@ -318,7 +319,7 @@ struct acpi_bus_event { u32 data; }; -extern struct kset acpi_subsys; +extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); /* * External Functions diff --git a/include/acpi/reboot.h b/include/acpi/reboot.h new file mode 100644 index 000000000000..8857f57e0b78 --- /dev/null +++ b/include/acpi/reboot.h @@ -0,0 +1,9 @@ + +/* + * Dummy placeholder to make the EFI patches apply to the x86 tree. + * Andrew/Len, please just kill this file if you encounter it. + */ +#ifndef acpi_reboot +# define acpi_reboot() do { } while (0) +#endif + diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h index ef855a3bc0f5..26c179135293 100644 --- a/include/asm-alpha/agp.h +++ b/include/asm-alpha/agp.h @@ -7,7 +7,6 @@ #define map_page_into_agp(page) #define unmap_page_from_agp(page) -#define flush_agp_mappings() #define flush_agp_cache() mb() /* Convert a physical address to an address suitable for the GART. */ diff --git a/include/asm-arm/arch-at91/at91_lcdc.h b/include/asm-arm/arch-at91/at91_lcdc.h deleted file mode 100644 index ab040a40d37b..000000000000 --- a/include/asm-arm/arch-at91/at91_lcdc.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * include/asm-arm/arch-at91/at91_lcdc.h - * - * LCD Controller (LCDC). - * Based on AT91SAM9261 datasheet revision E. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef AT91_LCDC_H -#define AT91_LCDC_H - -#define AT91_LCDC_DMABADDR1 0x00 /* DMA Base Address Register 1 */ -#define AT91_LCDC_DMABADDR2 0x04 /* DMA Base Address Register 2 */ -#define AT91_LCDC_DMAFRMPT1 0x08 /* DMA Frame Pointer Register 1 */ -#define AT91_LCDC_DMAFRMPT2 0x0c /* DMA Frame Pointer Register 2 */ -#define AT91_LCDC_DMAFRMADD1 0x10 /* DMA Frame Address Register 1 */ -#define AT91_LCDC_DMAFRMADD2 0x14 /* DMA Frame Address Register 2 */ - -#define AT91_LCDC_DMAFRMCFG 0x18 /* DMA Frame Configuration Register */ -#define AT91_LCDC_FRSIZE (0x7fffff << 0) /* Frame Size */ -#define AT91_LCDC_BLENGTH (0x7f << 24) /* Burst Length */ - -#define AT91_LCDC_DMACON 0x1c /* DMA Control Register */ -#define AT91_LCDC_DMAEN (0x1 << 0) /* DMA Enable */ -#define AT91_LCDC_DMARST (0x1 << 1) /* DMA Reset */ -#define AT91_LCDC_DMABUSY (0x1 << 2) /* DMA Busy */ - -#define AT91_LCDC_LCDCON1 0x0800 /* LCD Control Register 1 */ -#define AT91_LCDC_BYPASS (1 << 0) /* Bypass lcd_dotck divider */ -#define AT91_LCDC_CLKVAL (0x1ff << 12) /* Clock Divider */ -#define AT91_LCDC_LINCNT (0x7ff << 21) /* Line Counter */ - -#define AT91_LCDC_LCDCON2 0x0804 /* LCD Control Register 2 */ -#define AT91_LCDC_DISTYPE (3 << 0) /* Display Type */ -#define AT91_LCDC_DISTYPE_STNMONO (0 << 0) -#define AT91_LCDC_DISTYPE_STNCOLOR (1 << 0) -#define AT91_LCDC_DISTYPE_TFT (2 << 0) -#define AT91_LCDC_SCANMOD (1 << 2) /* Scan Mode */ -#define AT91_LCDC_SCANMOD_SINGLE (0 << 2) -#define AT91_LCDC_SCANMOD_DUAL (1 << 2) -#define AT91_LCDC_IFWIDTH (3 << 3) /*Interface Width */ -#define AT91_LCDC_IFWIDTH_4 (0 << 3) -#define AT91_LCDC_IFWIDTH_8 (1 << 3) -#define AT91_LCDC_IFWIDTH_16 (2 << 3) -#define AT91_LCDC_PIXELSIZE (7 << 5) /* Bits per pixel */ -#define AT91_LCDC_PIXELSIZE_1 (0 << 5) -#define AT91_LCDC_PIXELSIZE_2 (1 << 5) -#define AT91_LCDC_PIXELSIZE_4 (2 << 5) -#define AT91_LCDC_PIXELSIZE_8 (3 << 5) -#define AT91_LCDC_PIXELSIZE_16 (4 << 5) -#define AT91_LCDC_PIXELSIZE_24 (5 << 5) -#define AT91_LCDC_INVVD (1 << 8) /* LCD Data polarity */ -#define AT91_LCDC_INVVD_NORMAL (0 << 8) -#define AT91_LCDC_INVVD_INVERTED (1 << 8) -#define AT91_LCDC_INVFRAME (1 << 9 ) /* LCD VSync polarity */ -#define AT91_LCDC_INVFRAME_NORMAL (0 << 9) -#define AT91_LCDC_INVFRAME_INVERTED (1 << 9) -#define AT91_LCDC_INVLINE (1 << 10) /* LCD HSync polarity */ -#define AT91_LCDC_INVLINE_NORMAL (0 << 10) -#define AT91_LCDC_INVLINE_INVERTED (1 << 10) -#define AT91_LCDC_INVCLK (1 << 11) /* LCD dotclk polarity */ -#define AT91_LCDC_INVCLK_NORMAL (0 << 11) -#define AT91_LCDC_INVCLK_INVERTED (1 << 11) -#define AT91_LCDC_INVDVAL (1 << 12) /* LCD dval polarity */ -#define AT91_LCDC_INVDVAL_NORMAL (0 << 12) -#define AT91_LCDC_INVDVAL_INVERTED (1 << 12) -#define AT91_LCDC_CLKMOD (1 << 15) /* LCD dotclk mode */ -#define AT91_LCDC_CLKMOD_ACTIVEDISPLAY (0 << 15) -#define AT91_LCDC_CLKMOD_ALWAYSACTIVE (1 << 15) -#define AT91_LCDC_MEMOR (1 << 31) /* Memory Ordering Format */ -#define AT91_LCDC_MEMOR_BIG (0 << 31) -#define AT91_LCDC_MEMOR_LITTLE (1 << 31) - -#define AT91_LCDC_TIM1 0x0808 /* LCD Timing Register 1 */ -#define AT91_LCDC_VFP (0xff << 0) /* Vertical Front Porch */ -#define AT91_LCDC_VBP (0xff << 8) /* Vertical Back Porch */ -#define AT91_LCDC_VPW (0x3f << 16) /* Vertical Synchronization Pulse Width */ -#define AT91_LCDC_VHDLY (0xf << 24) /* Vertical to Horizontal Delay */ - -#define AT91_LCDC_TIM2 0x080c /* LCD Timing Register 2 */ -#define AT91_LCDC_HBP (0xff << 0) /* Horizontal Back Porch */ -#define AT91_LCDC_HPW (0x3f << 8) /* Horizontal Synchronization Pulse Width */ -#define AT91_LCDC_HFP (0x7ff << 21) /* Horizontal Front Porch */ - -#define AT91_LCDC_LCDFRMCFG 0x0810 /* LCD Frame Configuration Register */ -#define AT91_LCDC_LINEVAL (0x7ff << 0) /* Vertical Size of LCD Module */ -#define AT91_LCDC_HOZVAL (0x7ff << 21) /* Horizontal Size of LCD Module */ - -#define AT91_LCDC_FIFO 0x0814 /* LCD FIFO Register */ -#define AT91_LCDC_FIFOTH (0xffff) /* FIFO Threshold */ - -#define AT91_LCDC_DP1_2 0x081c /* Dithering Pattern DP1_2 Register */ -#define AT91_LCDC_DP4_7 0x0820 /* Dithering Pattern DP4_7 Register */ -#define AT91_LCDC_DP3_5 0x0824 /* Dithering Pattern DP3_5 Register */ -#define AT91_LCDC_DP2_3 0x0828 /* Dithering Pattern DP2_3 Register */ -#define AT91_LCDC_DP5_7 0x082c /* Dithering Pattern DP5_7 Register */ -#define AT91_LCDC_DP3_4 0x0830 /* Dithering Pattern DP3_4 Register */ -#define AT91_LCDC_DP4_5 0x0834 /* Dithering Pattern DP4_5 Register */ -#define AT91_LCDC_DP6_7 0x0838 /* Dithering Pattern DP6_7 Register */ -#define AT91_LCDC_DP1_2_VAL (0xff) -#define AT91_LCDC_DP4_7_VAL (0xfffffff) -#define AT91_LCDC_DP3_5_VAL (0xfffff) -#define AT91_LCDC_DP2_3_VAL (0xfff) -#define AT91_LCDC_DP5_7_VAL (0xfffffff) -#define AT91_LCDC_DP3_4_VAL (0xffff) -#define AT91_LCDC_DP4_5_VAL (0xfffff) -#define AT91_LCDC_DP6_7_VAL (0xfffffff) - -#define AT91_LCDC_PWRCON 0x083c /* Power Control Register */ -#define AT91_LCDC_PWR (1 << 0) /* LCD Module Power Control */ -#define AT91_LCDC_GUARDT (0x7f << 1) /* Delay in Frame Period */ -#define AT91_LCDC_BUSY (1 << 31) /* LCD Busy */ - -#define AT91_LCDC_CONTRAST_CTR 0x0840 /* Contrast Control Register */ -#define AT91_LCDC_PS (3 << 0) /* Contrast Counter Prescaler */ -#define AT91_LCDC_PS_DIV1 (0 << 0) -#define AT91_LCDC_PS_DIV2 (1 << 0) -#define AT91_LCDC_PS_DIV4 (2 << 0) -#define AT91_LCDC_PS_DIV8 (3 << 0) -#define AT91_LCDC_POL (1 << 2) /* Polarity of output Pulse */ -#define AT91_LCDC_POL_NEGATIVE (0 << 2) -#define AT91_LCDC_POL_POSITIVE (1 << 2) -#define AT91_LCDC_ENA (1 << 3) /* PWM generator Control */ -#define AT91_LCDC_ENA_PWMDISABLE (0 << 3) -#define AT91_LCDC_ENA_PWMENABLE (1 << 3) - -#define AT91_LCDC_CONTRAST_VAL 0x0844 /* Contrast Value Register */ -#define AT91_LCDC_CVAL (0xff) /* PWM compare value */ - -#define AT91_LCDC_IER 0x0848 /* Interrupt Enable Register */ -#define AT91_LCDC_IDR 0x084c /* Interrupt Disable Register */ -#define AT91_LCDC_IMR 0x0850 /* Interrupt Mask Register */ -#define AT91_LCDC_ISR 0x0854 /* Interrupt Enable Register */ -#define AT91_LCDC_ICR 0x0858 /* Interrupt Clear Register */ -#define AT91_LCDC_LNI (1 << 0) /* Line Interrupt */ -#define AT91_LCDC_LSTLNI (1 << 1) /* Last Line Interrupt */ -#define AT91_LCDC_EOFI (1 << 2) /* DMA End Of Frame Interrupt */ -#define AT91_LCDC_UFLWI (1 << 4) /* FIFO Underflow Interrupt */ -#define AT91_LCDC_OWRI (1 << 5) /* FIFO Overwrite Interrupt */ -#define AT91_LCDC_MERI (1 << 6) /* DMA Memory Error Interrupt */ - -#define AT91_LCDC_LUT_(n) (0x0c00 + ((n)*4)) /* Palette Entry 0..255 */ - -#endif diff --git a/include/asm-arm/arch-at91/at91_pmc.h b/include/asm-arm/arch-at91/at91_pmc.h index 33ff5b6798ee..52cd8e5dabc9 100644 --- a/include/asm-arm/arch-at91/at91_pmc.h +++ b/include/asm-arm/arch-at91/at91_pmc.h @@ -25,6 +25,7 @@ #define AT91RM9200_PMC_MCKUDP (1 << 2) /* USB Device Port Master Clock Automatic Disable on Suspend [AT91RM9200 only] */ #define AT91RM9200_PMC_UHP (1 << 4) /* USB Host Port Clock [AT91RM9200 only] */ #define AT91SAM926x_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91SAM926x only] */ +#define AT91CAP9_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91CAP9 only] */ #define AT91SAM926x_PMC_UDP (1 << 7) /* USB Devcice Port Clock [AT91SAM926x only] */ #define AT91_PMC_PCK0 (1 << 8) /* Programmable Clock 0 */ #define AT91_PMC_PCK1 (1 << 9) /* Programmable Clock 1 */ @@ -37,7 +38,9 @@ #define AT91_PMC_PCDR (AT91_PMC + 0x14) /* Peripheral Clock Disable Register */ #define AT91_PMC_PCSR (AT91_PMC + 0x18) /* Peripheral Clock Status Register */ -#define AT91_CKGR_MOR (AT91_PMC + 0x20) /* Main Oscillator Register */ +#define AT91_CKGR_UCKR (AT91_PMC + 0x1C) /* UTMI Clock Register [SAM9RL, CAP9] */ + +#define AT91_CKGR_MOR (AT91_PMC + 0x20) /* Main Oscillator Register [not on SAM9RL] */ #define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ #define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass [AT91SAM926x only] */ #define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */ @@ -52,6 +55,10 @@ #define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */ #define AT91_PMC_OUT (3 << 14) /* PLL Clock Frequency Range */ #define AT91_PMC_MUL (0x7ff << 16) /* PLL Multiplier */ +#define AT91_PMC_USBDIV (3 << 28) /* USB Divisor (PLLB only) */ +#define AT91_PMC_USBDIV_1 (0 << 28) +#define AT91_PMC_USBDIV_2 (1 << 28) +#define AT91_PMC_USBDIV_4 (2 << 28) #define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */ #define AT91_PMC_MCKR (AT91_PMC + 0x30) /* Master Clock Register */ diff --git a/include/asm-arm/arch-at91/at91_rtt.h b/include/asm-arm/arch-at91/at91_rtt.h index bae1103fbbb2..39a32633b275 100644 --- a/include/asm-arm/arch-at91/at91_rtt.h +++ b/include/asm-arm/arch-at91/at91_rtt.h @@ -13,19 +13,19 @@ #ifndef AT91_RTT_H #define AT91_RTT_H -#define AT91_RTT_MR (AT91_RTT + 0x00) /* Real-time Mode Register */ +#define AT91_RTT_MR 0x00 /* Real-time Mode Register */ #define AT91_RTT_RTPRES (0xffff << 0) /* Real-time Timer Prescaler Value */ #define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */ #define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */ #define AT91_RTT_RTTRST (1 << 18) /* Real Time Timer Restart */ -#define AT91_RTT_AR (AT91_RTT + 0x04) /* Real-time Alarm Register */ +#define AT91_RTT_AR 0x04 /* Real-time Alarm Register */ #define AT91_RTT_ALMV (0xffffffff) /* Alarm Value */ -#define AT91_RTT_VR (AT91_RTT + 0x08) /* Real-time Value Register */ +#define AT91_RTT_VR 0x08 /* Real-time Value Register */ #define AT91_RTT_CRTV (0xffffffff) /* Current Real-time Value */ -#define AT91_RTT_SR (AT91_RTT + 0x0c) /* Real-time Status Register */ +#define AT91_RTT_SR 0x0c /* Real-time Status Register */ #define AT91_RTT_ALMS (1 << 0) /* Real-time Alarm Status */ #define AT91_RTT_RTTINC (1 << 1) /* Real-time Timer Increment */ diff --git a/include/asm-arm/arch-at91/at91_twi.h b/include/asm-arm/arch-at91/at91_twi.h index ca9a90733456..f9f2e3cd95c5 100644 --- a/include/asm-arm/arch-at91/at91_twi.h +++ b/include/asm-arm/arch-at91/at91_twi.h @@ -21,6 +21,8 @@ #define AT91_TWI_STOP (1 << 1) /* Send a Stop Condition */ #define AT91_TWI_MSEN (1 << 2) /* Master Transfer Enable */ #define AT91_TWI_MSDIS (1 << 3) /* Master Transfer Disable */ +#define AT91_TWI_SVEN (1 << 4) /* Slave Transfer Enable [SAM9260 only] */ +#define AT91_TWI_SVDIS (1 << 5) /* Slave Transfer Disable [SAM9260 only] */ #define AT91_TWI_SWRST (1 << 7) /* Software Reset */ #define AT91_TWI_MMR 0x04 /* Master Mode Register */ @@ -32,6 +34,9 @@ #define AT91_TWI_MREAD (1 << 12) /* Master Read Direction */ #define AT91_TWI_DADR (0x7f << 16) /* Device Address */ +#define AT91_TWI_SMR 0x08 /* Slave Mode Register [SAM9260 only] */ +#define AT91_TWI_SADR (0x7f << 16) /* Slave Address */ + #define AT91_TWI_IADR 0x0c /* Internal Address Register */ #define AT91_TWI_CWGR 0x10 /* Clock Waveform Generator Register */ @@ -43,9 +48,15 @@ #define AT91_TWI_TXCOMP (1 << 0) /* Transmission Complete */ #define AT91_TWI_RXRDY (1 << 1) /* Receive Holding Register Ready */ #define AT91_TWI_TXRDY (1 << 2) /* Transmit Holding Register Ready */ +#define AT91_TWI_SVREAD (1 << 3) /* Slave Read [SAM9260 only] */ +#define AT91_TWI_SVACC (1 << 4) /* Slave Access [SAM9260 only] */ +#define AT91_TWI_GACC (1 << 5) /* General Call Access [SAM9260 only] */ #define AT91_TWI_OVRE (1 << 6) /* Overrun Error [AT91RM9200 only] */ #define AT91_TWI_UNRE (1 << 7) /* Underrun Error [AT91RM9200 only] */ #define AT91_TWI_NACK (1 << 8) /* Not Acknowledged */ +#define AT91_TWI_ARBLST (1 << 9) /* Arbitration Lost [SAM9260 only] */ +#define AT91_TWI_SCLWS (1 << 10) /* Clock Wait State [SAM9260 only] */ +#define AT91_TWI_EOSACC (1 << 11) /* End of Slave Address [SAM9260 only] */ #define AT91_TWI_IER 0x24 /* Interrupt Enable Register */ #define AT91_TWI_IDR 0x28 /* Interrupt Disable Register */ diff --git a/include/asm-arm/arch-at91/at91cap9.h b/include/asm-arm/arch-at91/at91cap9.h new file mode 100644 index 000000000000..73e1fcf4a0aa --- /dev/null +++ b/include/asm-arm/arch-at91/at91cap9.h @@ -0,0 +1,121 @@ +/* + * include/asm-arm/arch-at91/at91cap9.h + * + * Copyright (C) 2007 Stelian Pop <stelian.pop@leadtechdesign.com> + * Copyright (C) 2007 Lead Tech Design <www.leadtechdesign.com> + * Copyright (C) 2007 Atmel Corporation. + * + * Common definitions. + * Based on AT91CAP9 datasheet revision B (Preliminary). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef AT91CAP9_H +#define AT91CAP9_H + +/* + * Peripheral identifiers/interrupts. + */ +#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */ +#define AT91_ID_SYS 1 /* System Peripherals */ +#define AT91CAP9_ID_PIOABCD 2 /* Parallel IO Controller A, B, C and D */ +#define AT91CAP9_ID_MPB0 3 /* MP Block Peripheral 0 */ +#define AT91CAP9_ID_MPB1 4 /* MP Block Peripheral 1 */ +#define AT91CAP9_ID_MPB2 5 /* MP Block Peripheral 2 */ +#define AT91CAP9_ID_MPB3 6 /* MP Block Peripheral 3 */ +#define AT91CAP9_ID_MPB4 7 /* MP Block Peripheral 4 */ +#define AT91CAP9_ID_US0 8 /* USART 0 */ +#define AT91CAP9_ID_US1 9 /* USART 1 */ +#define AT91CAP9_ID_US2 10 /* USART 2 */ +#define AT91CAP9_ID_MCI0 11 /* Multimedia Card Interface 0 */ +#define AT91CAP9_ID_MCI1 12 /* Multimedia Card Interface 1 */ +#define AT91CAP9_ID_CAN 13 /* CAN */ +#define AT91CAP9_ID_TWI 14 /* Two-Wire Interface */ +#define AT91CAP9_ID_SPI0 15 /* Serial Peripheral Interface 0 */ +#define AT91CAP9_ID_SPI1 16 /* Serial Peripheral Interface 0 */ +#define AT91CAP9_ID_SSC0 17 /* Serial Synchronous Controller 0 */ +#define AT91CAP9_ID_SSC1 18 /* Serial Synchronous Controller 1 */ +#define AT91CAP9_ID_AC97C 19 /* AC97 Controller */ +#define AT91CAP9_ID_TCB 20 /* Timer Counter 0, 1 and 2 */ +#define AT91CAP9_ID_PWMC 21 /* Pulse Width Modulation Controller */ +#define AT91CAP9_ID_EMAC 22 /* Ethernet */ +#define AT91CAP9_ID_AESTDES 23 /* Advanced Encryption Standard, Triple DES */ +#define AT91CAP9_ID_ADC 24 /* Analog-to-Digital Converter */ +#define AT91CAP9_ID_ISI 25 /* Image Sensor Interface */ +#define AT91CAP9_ID_LCDC 26 /* LCD Controller */ +#define AT91CAP9_ID_DMA 27 /* DMA Controller */ +#define AT91CAP9_ID_UDPHS 28 /* USB High Speed Device Port */ +#define AT91CAP9_ID_UHP 29 /* USB Host Port */ +#define AT91CAP9_ID_IRQ0 30 /* Advanced Interrupt Controller (IRQ0) */ +#define AT91CAP9_ID_IRQ1 31 /* Advanced Interrupt Controller (IRQ1) */ + +/* + * User Peripheral physical base addresses. + */ +#define AT91CAP9_BASE_UDPHS 0xfff78000 +#define AT91CAP9_BASE_TCB0 0xfff7c000 +#define AT91CAP9_BASE_TC0 0xfff7c000 +#define AT91CAP9_BASE_TC1 0xfff7c040 +#define AT91CAP9_BASE_TC2 0xfff7c080 +#define AT91CAP9_BASE_MCI0 0xfff80000 +#define AT91CAP9_BASE_MCI1 0xfff84000 +#define AT91CAP9_BASE_TWI 0xfff88000 +#define AT91CAP9_BASE_US0 0xfff8c000 +#define AT91CAP9_BASE_US1 0xfff90000 +#define AT91CAP9_BASE_US2 0xfff94000 +#define AT91CAP9_BASE_SSC0 0xfff98000 +#define AT91CAP9_BASE_SSC1 0xfff9c000 +#define AT91CAP9_BASE_AC97C 0xfffa0000 +#define AT91CAP9_BASE_SPI0 0xfffa4000 +#define AT91CAP9_BASE_SPI1 0xfffa8000 +#define AT91CAP9_BASE_CAN 0xfffac000 +#define AT91CAP9_BASE_PWMC 0xfffb8000 +#define AT91CAP9_BASE_EMAC 0xfffbc000 +#define AT91CAP9_BASE_ADC 0xfffc0000 +#define AT91CAP9_BASE_ISI 0xfffc4000 +#define AT91_BASE_SYS 0xffffe200 + +/* + * System Peripherals (offset from AT91_BASE_SYS) + */ +#define AT91_ECC (0xffffe200 - AT91_BASE_SYS) +#define AT91_BCRAMC (0xffffe400 - AT91_BASE_SYS) +#define AT91_DDRSDRC (0xffffe600 - AT91_BASE_SYS) +#define AT91_SMC (0xffffe800 - AT91_BASE_SYS) +#define AT91_MATRIX (0xffffea00 - AT91_BASE_SYS) +#define AT91_CCFG (0xffffeb10 - AT91_BASE_SYS) +#define AT91_DMA (0xffffec00 - AT91_BASE_SYS) +#define AT91_DBGU (0xffffee00 - AT91_BASE_SYS) +#define AT91_AIC (0xfffff000 - AT91_BASE_SYS) +#define AT91_PIOA (0xfffff200 - AT91_BASE_SYS) +#define AT91_PIOB (0xfffff400 - AT91_BASE_SYS) +#define AT91_PIOC (0xfffff600 - AT91_BASE_SYS) +#define AT91_PIOD (0xfffff800 - AT91_BASE_SYS) +#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS) +#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS) +#define AT91_SHDC (0xfffffd10 - AT91_BASE_SYS) +#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS) +#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS) +#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS) +#define AT91_GPBR (0xfffffd50 - AT91_BASE_SYS) + +/* + * Internal Memory. + */ +#define AT91CAP9_SRAM_BASE 0x00100000 /* Internal SRAM base address */ +#define AT91CAP9_SRAM_SIZE (32 * SZ_1K) /* Internal SRAM size (32Kb) */ + +#define AT91CAP9_ROM_BASE 0x00400000 /* Internal ROM base address */ +#define AT91CAP9_ROM_SIZE (32 * SZ_1K) /* Internal ROM size (32Kb) */ + +#define AT91CAP9_LCDC_BASE 0x00500000 /* LCD Controller */ +#define AT91CAP9_UDPHS_BASE 0x00600000 /* USB High Speed Device Port */ +#define AT91CAP9_UHP_BASE 0x00700000 /* USB Host controller */ + +#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6 + +#endif diff --git a/include/asm-arm/arch-at91/at91cap9_matrix.h b/include/asm-arm/arch-at91/at91cap9_matrix.h new file mode 100644 index 000000000000..a641686b6c3d --- /dev/null +++ b/include/asm-arm/arch-at91/at91cap9_matrix.h @@ -0,0 +1,132 @@ +/* + * include/asm-arm/arch-at91/at91cap9_matrix.h + * + * Copyright (C) 2007 Stelian Pop <stelian.pop@leadtechdesign.com> + * Copyright (C) 2007 Lead Tech Design <www.leadtechdesign.com> + * Copyright (C) 2006 Atmel Corporation. + * + * Memory Controllers (MATRIX, EBI) - System peripherals registers. + * Based on AT91CAP9 datasheet revision B (Preliminary). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef AT91CAP9_MATRIX_H +#define AT91CAP9_MATRIX_H + +#define AT91_MATRIX_MCFG0 (AT91_MATRIX + 0x00) /* Master Configuration Register 0 */ +#define AT91_MATRIX_MCFG1 (AT91_MATRIX + 0x04) /* Master Configuration Register 1 */ +#define AT91_MATRIX_MCFG2 (AT91_MATRIX + 0x08) /* Master Configuration Register 2 */ +#define AT91_MATRIX_MCFG3 (AT91_MATRIX + 0x0C) /* Master Configuration Register 3 */ +#define AT91_MATRIX_MCFG4 (AT91_MATRIX + 0x10) /* Master Configuration Register 4 */ +#define AT91_MATRIX_MCFG5 (AT91_MATRIX + 0x14) /* Master Configuration Register 5 */ +#define AT91_MATRIX_MCFG6 (AT91_MATRIX + 0x18) /* Master Configuration Register 6 */ +#define AT91_MATRIX_MCFG7 (AT91_MATRIX + 0x1C) /* Master Configuration Register 7 */ +#define AT91_MATRIX_MCFG8 (AT91_MATRIX + 0x20) /* Master Configuration Register 8 */ +#define AT91_MATRIX_MCFG9 (AT91_MATRIX + 0x24) /* Master Configuration Register 9 */ +#define AT91_MATRIX_MCFG10 (AT91_MATRIX + 0x28) /* Master Configuration Register 10 */ +#define AT91_MATRIX_MCFG11 (AT91_MATRIX + 0x2C) /* Master Configuration Register 11 */ +#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */ +#define AT91_MATRIX_ULBT_INFINITE (0 << 0) +#define AT91_MATRIX_ULBT_SINGLE (1 << 0) +#define AT91_MATRIX_ULBT_FOUR (2 << 0) +#define AT91_MATRIX_ULBT_EIGHT (3 << 0) +#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0) + +#define AT91_MATRIX_SCFG0 (AT91_MATRIX + 0x40) /* Slave Configuration Register 0 */ +#define AT91_MATRIX_SCFG1 (AT91_MATRIX + 0x44) /* Slave Configuration Register 1 */ +#define AT91_MATRIX_SCFG2 (AT91_MATRIX + 0x48) /* Slave Configuration Register 2 */ +#define AT91_MATRIX_SCFG3 (AT91_MATRIX + 0x4C) /* Slave Configuration Register 3 */ +#define AT91_MATRIX_SCFG4 (AT91_MATRIX + 0x50) /* Slave Configuration Register 4 */ +#define AT91_MATRIX_SCFG5 (AT91_MATRIX + 0x54) /* Slave Configuration Register 5 */ +#define AT91_MATRIX_SCFG6 (AT91_MATRIX + 0x58) /* Slave Configuration Register 6 */ +#define AT91_MATRIX_SCFG7 (AT91_MATRIX + 0x5C) /* Slave Configuration Register 7 */ +#define AT91_MATRIX_SCFG8 (AT91_MATRIX + 0x60) /* Slave Configuration Register 8 */ +#define AT91_MATRIX_SCFG9 (AT91_MATRIX + 0x64) /* Slave Configuration Register 9 */ +#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */ +#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */ +#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16) +#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16) +#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16) +#define AT91_MATRIX_FIXED_DEFMSTR (0xf << 18) /* Fixed Index of Default Master */ +#define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */ +#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24) +#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24) + +#define AT91_MATRIX_PRAS0 (AT91_MATRIX + 0x80) /* Priority Register A for Slave 0 */ +#define AT91_MATRIX_PRBS0 (AT91_MATRIX + 0x84) /* Priority Register B for Slave 0 */ +#define AT91_MATRIX_PRAS1 (AT91_MATRIX + 0x88) /* Priority Register A for Slave 1 */ +#define AT91_MATRIX_PRBS1 (AT91_MATRIX + 0x8C) /* Priority Register B for Slave 1 */ +#define AT91_MATRIX_PRAS2 (AT91_MATRIX + 0x90) /* Priority Register A for Slave 2 */ +#define AT91_MATRIX_PRBS2 (AT91_MATRIX + 0x94) /* Priority Register B for Slave 2 */ +#define AT91_MATRIX_PRAS3 (AT91_MATRIX + 0x98) /* Priority Register A for Slave 3 */ +#define AT91_MATRIX_PRBS3 (AT91_MATRIX + 0x9C) /* Priority Register B for Slave 3 */ +#define AT91_MATRIX_PRAS4 (AT91_MATRIX + 0xA0) /* Priority Register A for Slave 4 */ +#define AT91_MATRIX_PRBS4 (AT91_MATRIX + 0xA4) /* Priority Register B for Slave 4 */ +#define AT91_MATRIX_PRAS5 (AT91_MATRIX + 0xA8) /* Priority Register A for Slave 5 */ +#define AT91_MATRIX_PRBS5 (AT91_MATRIX + 0xAC) /* Priority Register B for Slave 5 */ +#define AT91_MATRIX_PRAS6 (AT91_MATRIX + 0xB0) /* Priority Register A for Slave 6 */ +#define AT91_MATRIX_PRBS6 (AT91_MATRIX + 0xB4) /* Priority Register B for Slave 6 */ +#define AT91_MATRIX_PRAS7 (AT91_MATRIX + 0xB8) /* Priority Register A for Slave 7 */ +#define AT91_MATRIX_PRBS7 (AT91_MATRIX + 0xBC) /* Priority Register B for Slave 7 */ +#define AT91_MATRIX_PRAS8 (AT91_MATRIX + 0xC0) /* Priority Register A for Slave 8 */ +#define AT91_MATRIX_PRBS8 (AT91_MATRIX + 0xC4) /* Priority Register B for Slave 8 */ +#define AT91_MATRIX_PRAS9 (AT91_MATRIX + 0xC8) /* Priority Register A for Slave 9 */ +#define AT91_MATRIX_PRBS9 (AT91_MATRIX + 0xCC) /* Priority Register B for Slave 9 */ +#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */ +#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */ +#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */ +#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */ +#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */ +#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */ +#define AT91_MATRIX_M6PR (3 << 24) /* Master 6 Priority */ +#define AT91_MATRIX_M7PR (3 << 28) /* Master 7 Priority */ +#define AT91_MATRIX_M8PR (3 << 0) /* Master 8 Priority (in Register B) */ +#define AT91_MATRIX_M9PR (3 << 4) /* Master 9 Priority (in Register B) */ +#define AT91_MATRIX_M10PR (3 << 8) /* Master 10 Priority (in Register B) */ +#define AT91_MATRIX_M11PR (3 << 12) /* Master 11 Priority (in Register B) */ + +#define AT91_MATRIX_MRCR (AT91_MATRIX + 0x100) /* Master Remap Control Register */ +#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */ +#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */ +#define AT91_MATRIX_RCB2 (1 << 2) +#define AT91_MATRIX_RCB3 (1 << 3) +#define AT91_MATRIX_RCB4 (1 << 4) +#define AT91_MATRIX_RCB5 (1 << 5) +#define AT91_MATRIX_RCB6 (1 << 6) +#define AT91_MATRIX_RCB7 (1 << 7) +#define AT91_MATRIX_RCB8 (1 << 8) +#define AT91_MATRIX_RCB9 (1 << 9) +#define AT91_MATRIX_RCB10 (1 << 10) +#define AT91_MATRIX_RCB11 (1 << 11) + +#define AT91_MPBS0_SFR (AT91_MATRIX + 0x114) /* MPBlock Slave 0 Special Function Register */ +#define AT91_MPBS1_SFR (AT91_MATRIX + 0x11C) /* MPBlock Slave 1 Special Function Register */ + +#define AT91_MATRIX_EBICSA (AT91_MATRIX + 0x120) /* EBI Chip Select Assignment Register */ +#define AT91_MATRIX_EBI_CS1A (1 << 1) /* Chip Select 1 Assignment */ +#define AT91_MATRIX_EBI_CS1A_SMC (0 << 1) +#define AT91_MATRIX_EBI_CS1A_BCRAMC (1 << 1) +#define AT91_MATRIX_EBI_CS3A (1 << 3) /* Chip Select 3 Assignment */ +#define AT91_MATRIX_EBI_CS3A_SMC (0 << 3) +#define AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA (1 << 3) +#define AT91_MATRIX_EBI_CS4A (1 << 4) /* Chip Select 4 Assignment */ +#define AT91_MATRIX_EBI_CS4A_SMC (0 << 4) +#define AT91_MATRIX_EBI_CS4A_SMC_CF1 (1 << 4) +#define AT91_MATRIX_EBI_CS5A (1 << 5) /* Chip Select 5 Assignment */ +#define AT91_MATRIX_EBI_CS5A_SMC (0 << 5) +#define AT91_MATRIX_EBI_CS5A_SMC_CF2 (1 << 5) +#define AT91_MATRIX_EBI_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */ +#define AT91_MATRIX_EBI_DQSPDC (1 << 9) /* Data Qualifier Strobe Pull-Down Configuration */ +#define AT91_MATRIX_EBI_VDDIOMSEL (1 << 16) /* Memory voltage selection */ +#define AT91_MATRIX_EBI_VDDIOMSEL_1_8V (0 << 16) +#define AT91_MATRIX_EBI_VDDIOMSEL_3_3V (1 << 16) + +#define AT91_MPBS2_SFR (AT91_MATRIX + 0x12C) /* MPBlock Slave 2 Special Function Register */ +#define AT91_MPBS3_SFR (AT91_MATRIX + 0x130) /* MPBlock Slave 3 Special Function Register */ +#define AT91_APB_SFR (AT91_MATRIX + 0x134) /* APB Bridge Special Function Register */ + +#endif diff --git a/include/asm-arm/arch-at91/at91sam9260_matrix.h b/include/asm-arm/arch-at91/at91sam9260_matrix.h index aacb1e976422..a8e9fec6c735 100644 --- a/include/asm-arm/arch-at91/at91sam9260_matrix.h +++ b/include/asm-arm/arch-at91/at91sam9260_matrix.h @@ -67,7 +67,7 @@ #define AT91_MATRIX_CS4A (1 << 4) /* Chip Select 4 Assignment */ #define AT91_MATRIX_CS4A_SMC (0 << 4) #define AT91_MATRIX_CS4A_SMC_CF1 (1 << 4) -#define AT91_MATRIX_CS5A (1 << 5 ) /* Chip Select 5 Assignment */ +#define AT91_MATRIX_CS5A (1 << 5) /* Chip Select 5 Assignment */ #define AT91_MATRIX_CS5A_SMC (0 << 5) #define AT91_MATRIX_CS5A_SMC_CF2 (1 << 5) #define AT91_MATRIX_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */ diff --git a/include/asm-arm/arch-at91/at91sam9263_matrix.h b/include/asm-arm/arch-at91/at91sam9263_matrix.h index 6fc6e4be624e..72f6e668e414 100644 --- a/include/asm-arm/arch-at91/at91sam9263_matrix.h +++ b/include/asm-arm/arch-at91/at91sam9263_matrix.h @@ -44,7 +44,7 @@ #define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16) #define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16) #define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16) -#define AT91_MATRIX_FIXED_DEFMSTR (7 << 18) /* Fixed Index of Default Master */ +#define AT91_MATRIX_FIXED_DEFMSTR (0xf << 18) /* Fixed Index of Default Master */ #define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */ #define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24) #define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24) diff --git a/include/asm-arm/arch-at91/at91sam9rl_matrix.h b/include/asm-arm/arch-at91/at91sam9rl_matrix.h index b15f11b7c08d..84224174e6a1 100644 --- a/include/asm-arm/arch-at91/at91sam9rl_matrix.h +++ b/include/asm-arm/arch-at91/at91sam9rl_matrix.h @@ -38,7 +38,7 @@ #define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16) #define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16) #define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16) -#define AT91_MATRIX_FIXED_DEFMSTR (7 << 18) /* Fixed Index of Default Master */ +#define AT91_MATRIX_FIXED_DEFMSTR (0xf << 18) /* Fixed Index of Default Master */ #define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */ #define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24) #define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24) diff --git a/include/asm-arm/arch-at91/board.h b/include/asm-arm/arch-at91/board.h index 79054965baa6..55b07bd5316c 100644 --- a/include/asm-arm/arch-at91/board.h +++ b/include/asm-arm/arch-at91/board.h @@ -34,6 +34,7 @@ #include <linux/mtd/partitions.h> #include <linux/device.h> #include <linux/i2c.h> +#include <linux/leds.h> #include <linux/spi/spi.h> /* USB Device */ @@ -71,7 +72,7 @@ struct at91_eth_data { }; extern void __init at91_add_device_eth(struct at91_eth_data *data); -#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) +#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) #define eth_platform_data at91_eth_data #endif @@ -101,13 +102,23 @@ extern void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_de extern void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices); /* Serial */ +#define ATMEL_UART_CTS 0x01 +#define ATMEL_UART_RTS 0x02 +#define ATMEL_UART_DSR 0x04 +#define ATMEL_UART_DTR 0x08 +#define ATMEL_UART_DCD 0x10 +#define ATMEL_UART_RI 0x20 + +extern void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins); +extern void __init at91_set_serial_console(unsigned portnr); + struct at91_uart_config { unsigned short console_tty; /* tty number of serial console */ unsigned short nr_tty; /* number of serial tty's */ short tty_map[]; /* map UART to tty number */ }; extern struct platform_device *atmel_default_console_device; -extern void __init at91_init_serial(struct at91_uart_config *config); +extern void __init __deprecated at91_init_serial(struct at91_uart_config *config); struct atmel_uart_data { short use_dma_tx; /* use transmit DMA? */ @@ -116,6 +127,23 @@ struct atmel_uart_data { }; extern void __init at91_add_device_serial(void); +/* + * SSC -- accessed through ssc_request(id). Drivers don't bind to SSC + * platform devices. Their SSC ID is part of their configuration data, + * along with information about which SSC signals they should use. + */ +#define ATMEL_SSC_TK 0x01 +#define ATMEL_SSC_TF 0x02 +#define ATMEL_SSC_TD 0x04 +#define ATMEL_SSC_TX (ATMEL_SSC_TK | ATMEL_SSC_TF | ATMEL_SSC_TD) + +#define ATMEL_SSC_RK 0x10 +#define ATMEL_SSC_RF 0x20 +#define ATMEL_SSC_RD 0x40 +#define ATMEL_SSC_RX (ATMEL_SSC_RK | ATMEL_SSC_RF | ATMEL_SSC_RD) + +extern void __init at91_add_device_ssc(unsigned id, unsigned pins); + /* LCD Controller */ struct atmel_lcdfb_info; extern void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data); @@ -126,10 +154,12 @@ struct atmel_ac97_data { }; extern void __init at91_add_device_ac97(struct atmel_ac97_data *data); + /* ISI */ +extern void __init at91_add_device_isi(void); + /* LEDs */ -extern u8 at91_leds_cpu; -extern u8 at91_leds_timer; extern void __init at91_init_leds(u8 cpu_led, u8 timer_led); +extern void __init at91_gpio_leds(struct gpio_led *leds, int nr); /* FIXME: this needs a better location, but gets stuff building again */ extern int at91_suspend_entering_slow_clock(void); diff --git a/include/asm-arm/arch-at91/cpu.h b/include/asm-arm/arch-at91/cpu.h index 080cbb401a87..7145166826a2 100644 --- a/include/asm-arm/arch-at91/cpu.h +++ b/include/asm-arm/arch-at91/cpu.h @@ -21,13 +21,13 @@ #define ARCH_ID_AT91SAM9260 0x019803a0 #define ARCH_ID_AT91SAM9261 0x019703a0 #define ARCH_ID_AT91SAM9263 0x019607a0 +#define ARCH_ID_AT91SAM9RL64 0x019b03a0 +#define ARCH_ID_AT91CAP9 0x039A03A0 #define ARCH_ID_AT91SAM9XE128 0x329973a0 #define ARCH_ID_AT91SAM9XE256 0x329a93a0 #define ARCH_ID_AT91SAM9XE512 0x329aa3a0 -#define ARCH_ID_AT91SAM9RL64 0x019b03a0 - #define ARCH_ID_AT91M40800 0x14080044 #define ARCH_ID_AT91R40807 0x44080746 #define ARCH_ID_AT91M40807 0x14080745 @@ -81,6 +81,11 @@ static inline unsigned long at91_arch_identify(void) #define cpu_is_at91sam9rl() (0) #endif +#ifdef CONFIG_ARCH_AT91CAP9 +#define cpu_is_at91cap9() (at91_cpu_identify() == ARCH_ID_AT91CAP9) +#else +#define cpu_is_at91cap9() (0) +#endif /* * Since this is ARM, we will never run on any AVR32 CPU. But these diff --git a/include/asm-arm/arch-at91/entry-macro.S b/include/asm-arm/arch-at91/entry-macro.S index cc1d850a0788..1005eee6219b 100644 --- a/include/asm-arm/arch-at91/entry-macro.S +++ b/include/asm-arm/arch-at91/entry-macro.S @@ -17,13 +17,13 @@ .endm .macro get_irqnr_preamble, base, tmp + ldr \base, =(AT91_VA_BASE_SYS + AT91_AIC) @ base virtual address of AIC peripheral .endm .macro arch_ret_to_user, tmp1, tmp2 .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - ldr \base, =(AT91_VA_BASE_SYS + AT91_AIC) @ base virtual address of AIC peripheral ldr \irqnr, [\base, #(AT91_AIC_IVR - AT91_AIC)] @ read IRQ vector register: de-asserts nIRQ to processor (and clears interrupt) ldr \irqstat, [\base, #(AT91_AIC_ISR - AT91_AIC)] @ read interrupt source number teq \irqstat, #0 @ ISR is 0 when no current interrupt, or spurious interrupt diff --git a/include/asm-arm/arch-at91/hardware.h b/include/asm-arm/arch-at91/hardware.h index 8f1cdd38a969..2c826d8247a3 100644 --- a/include/asm-arm/arch-at91/hardware.h +++ b/include/asm-arm/arch-at91/hardware.h @@ -26,6 +26,8 @@ #include <asm/arch/at91sam9263.h> #elif defined(CONFIG_ARCH_AT91SAM9RL) #include <asm/arch/at91sam9rl.h> +#elif defined(CONFIG_ARCH_AT91CAP9) +#include <asm/arch/at91cap9.h> #elif defined(CONFIG_ARCH_AT91X40) #include <asm/arch/at91x40.h> #else diff --git a/include/asm-arm/arch-at91/timex.h b/include/asm-arm/arch-at91/timex.h index a310698fb4da..f1933b0fa43f 100644 --- a/include/asm-arm/arch-at91/timex.h +++ b/include/asm-arm/arch-at91/timex.h @@ -42,6 +42,11 @@ #define AT91SAM9_MASTER_CLOCK 100000000 #define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16) +#elif defined(CONFIG_ARCH_AT91CAP9) + +#define AT91CAP9_MASTER_CLOCK 100000000 +#define CLOCK_TICK_RATE (AT91CAP9_MASTER_CLOCK/16) + #elif defined(CONFIG_ARCH_AT91X40) #define AT91X40_MASTER_CLOCK 40000000 diff --git a/include/asm-arm/arch-ep93xx/gpio.h b/include/asm-arm/arch-ep93xx/gpio.h index 1ee14a14cba0..9b1864bbd9a8 100644 --- a/include/asm-arm/arch-ep93xx/gpio.h +++ b/include/asm-arm/arch-ep93xx/gpio.h @@ -5,16 +5,6 @@ #ifndef __ASM_ARCH_GPIO_H #define __ASM_ARCH_GPIO_H -#define GPIO_IN 0 -#define GPIO_OUT 1 - -#define EP93XX_GPIO_LOW 0 -#define EP93XX_GPIO_HIGH 1 - -extern void gpio_line_config(int line, int direction); -extern int gpio_line_get(int line); -extern void gpio_line_set(int line, int value); - /* GPIO port A. */ #define EP93XX_GPIO_LINE_A(x) ((x) + 0) #define EP93XX_GPIO_LINE_EGPIO0 EP93XX_GPIO_LINE_A(0) @@ -38,7 +28,7 @@ extern void gpio_line_set(int line, int value); #define EP93XX_GPIO_LINE_EGPIO15 EP93XX_GPIO_LINE_B(7) /* GPIO port C. */ -#define EP93XX_GPIO_LINE_C(x) ((x) + 16) +#define EP93XX_GPIO_LINE_C(x) ((x) + 40) #define EP93XX_GPIO_LINE_ROW0 EP93XX_GPIO_LINE_C(0) #define EP93XX_GPIO_LINE_ROW1 EP93XX_GPIO_LINE_C(1) #define EP93XX_GPIO_LINE_ROW2 EP93XX_GPIO_LINE_C(2) @@ -71,7 +61,7 @@ extern void gpio_line_set(int line, int value); #define EP93XX_GPIO_LINE_IDEDA2 EP93XX_GPIO_LINE_E(7) /* GPIO port F. */ -#define EP93XX_GPIO_LINE_F(x) ((x) + 40) +#define EP93XX_GPIO_LINE_F(x) ((x) + 16) #define EP93XX_GPIO_LINE_WP EP93XX_GPIO_LINE_F(0) #define EP93XX_GPIO_LINE_MCCD1 EP93XX_GPIO_LINE_F(1) #define EP93XX_GPIO_LINE_MCCD2 EP93XX_GPIO_LINE_F(2) @@ -103,5 +93,49 @@ extern void gpio_line_set(int line, int value); #define EP93XX_GPIO_LINE_DD6 EP93XX_GPIO_LINE_H(6) #define EP93XX_GPIO_LINE_DD7 EP93XX_GPIO_LINE_H(7) +/* maximum value for gpio line identifiers */ +#define EP93XX_GPIO_LINE_MAX EP93XX_GPIO_LINE_H(7) + +/* maximum value for irq capable line identifiers */ +#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7) + +/* new generic GPIO API - see Documentation/gpio.txt */ + +static inline int gpio_request(unsigned gpio, const char *label) +{ + if (gpio > EP93XX_GPIO_LINE_MAX) + return -EINVAL; + return 0; +} + +static inline void gpio_free(unsigned gpio) +{ +} + +int gpio_direction_input(unsigned gpio); +int gpio_direction_output(unsigned gpio, int value); +int gpio_get_value(unsigned gpio); +void gpio_set_value(unsigned gpio, int value); + +#include <asm-generic/gpio.h> /* cansleep wrappers */ + +/* + * Map GPIO A0..A7 (0..7) to irq 64..71, + * B0..B7 (7..15) to irq 72..79, and + * F0..F7 (16..24) to irq 80..87. + */ + +static inline int gpio_to_irq(unsigned gpio) +{ + if (gpio <= EP93XX_GPIO_LINE_MAX_IRQ) + return 64 + gpio; + + return -EINVAL; +} + +static inline int irq_to_gpio(unsigned irq) +{ + return irq - gpio_to_irq(0); +} #endif diff --git a/include/asm-arm/arch-ep93xx/irqs.h b/include/asm-arm/arch-ep93xx/irqs.h index 2a8c63638c5e..53d4a68bfc88 100644 --- a/include/asm-arm/arch-ep93xx/irqs.h +++ b/include/asm-arm/arch-ep93xx/irqs.h @@ -67,12 +67,6 @@ #define IRQ_EP93XX_SAI 60 #define EP93XX_VIC2_VALID_IRQ_MASK 0x1fffffff -/* - * Map GPIO A0..A7 to irq 64..71, B0..B7 to 72..79, and - * F0..F7 to 80..87. - */ -#define IRQ_EP93XX_GPIO(x) (64 + (((x) + (((x) >> 2) & 8)) & 0x1f)) - #define NR_EP93XX_IRQS (64 + 24) #define EP93XX_BOARD_IRQ(x) (NR_EP93XX_IRQS + (x)) diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h index eeeea90cd5a9..9c5d2357aff3 100644 --- a/include/asm-arm/arch-ixp4xx/io.h +++ b/include/asm-arm/arch-ixp4xx/io.h @@ -61,13 +61,13 @@ __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype) if((addr < PCIBIOS_MIN_MEM) || (addr > 0x4fffffff)) return __arm_ioremap(addr, size, mtype); - return (void *)addr; + return (void __iomem *)addr; } static inline void __ixp4xx_iounmap(void __iomem *addr) { - if ((u32)addr >= VMALLOC_START) + if ((__force u32)addr >= VMALLOC_START) __iounmap(addr); } @@ -141,9 +141,9 @@ __ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count) static inline void __ixp4xx_writel(u32 value, volatile void __iomem *p) { - u32 addr = (u32)p; + u32 addr = (__force u32)p; if (addr >= VMALLOC_START) { - __raw_writel(value, addr); + __raw_writel(value, p); return; } @@ -208,11 +208,11 @@ __ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count) static inline unsigned long __ixp4xx_readl(const volatile void __iomem *p) { - u32 addr = (u32)p; + u32 addr = (__force u32)p; u32 data; if (addr >= VMALLOC_START) - return __raw_readl(addr); + return __raw_readl(p); if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data)) return 0xffffffff; @@ -438,7 +438,7 @@ __ixp4xx_ioread32(const void __iomem *addr) return (unsigned int)__ixp4xx_inl(port & PIO_MASK); else { #ifndef CONFIG_IXP4XX_INDIRECT_PCI - return le32_to_cpu(__raw_readl((u32)port)); + return le32_to_cpu((__force __le32)__raw_readl(addr)); #else return (unsigned int)__ixp4xx_readl(addr); #endif @@ -523,7 +523,7 @@ __ixp4xx_iowrite32(u32 value, void __iomem *addr) __ixp4xx_outl(value, port & PIO_MASK); else #ifndef CONFIG_IXP4XX_INDIRECT_PCI - __raw_writel(cpu_to_le32(value), port); + __raw_writel((u32 __force)cpu_to_le32(value), addr); #else __ixp4xx_writel(value, addr); #endif diff --git a/include/asm-arm/arch-ixp4xx/platform.h b/include/asm-arm/arch-ixp4xx/platform.h index 2a44d3d67980..2ce28e3fd325 100644 --- a/include/asm-arm/arch-ixp4xx/platform.h +++ b/include/asm-arm/arch-ixp4xx/platform.h @@ -76,17 +76,6 @@ extern unsigned long ixp4xx_exp_bus_size; #define IXP4XX_UART_XTAL 14745600 /* - * The IXP4xx chips do not have an I2C unit, so GPIO lines are just - * used to - * Used as platform_data to provide GPIO pin information to the ixp42x - * I2C driver. - */ -struct ixp4xx_i2c_pins { - unsigned long sda_pin; - unsigned long scl_pin; -}; - -/* * This structure provide a means for the board setup code * to give information to th pata_ixp4xx driver. It is * passed as platform_data. diff --git a/include/asm-arm/arch-ks8695/regs-gpio.h b/include/asm-arm/arch-ks8695/regs-gpio.h index 57fcf9fc82e4..6b95d77aea19 100644 --- a/include/asm-arm/arch-ks8695/regs-gpio.h +++ b/include/asm-arm/arch-ks8695/regs-gpio.h @@ -49,5 +49,7 @@ #define IOPC_TM_FALLING (4) /* Falling Edge Detection */ #define IOPC_TM_EDGE (6) /* Both Edge Detection */ +/* Port Data Register */ +#define IOPD_(x) (1 << (x)) /* Signal Level of GPIO Pin x */ #endif diff --git a/include/asm-arm/arch-msm/board.h b/include/asm-arm/arch-msm/board.h new file mode 100644 index 000000000000..763051f8ba14 --- /dev/null +++ b/include/asm-arm/arch-msm/board.h @@ -0,0 +1,37 @@ +/* linux/include/asm-arm/arch-msm/board.h + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_BOARD_H +#define __ASM_ARCH_MSM_BOARD_H + +#include <linux/types.h> + +/* platform device data structures */ + +struct msm_mddi_platform_data +{ + void (*panel_power)(int on); + unsigned has_vsync_irq:1; +}; + +/* common init routines for use by arch/arm/mach-msm/board-*.c */ + +void __init msm_add_devices(void); +void __init msm_map_common_io(void); +void __init msm_init_irq(void); +void __init msm_init_gpio(void); + +#endif diff --git a/include/asm-arm/arch-msm/debug-macro.S b/include/asm-arm/arch-msm/debug-macro.S new file mode 100644 index 000000000000..393d5272e506 --- /dev/null +++ b/include/asm-arm/arch-msm/debug-macro.S @@ -0,0 +1,40 @@ +/* include/asm-arm/arch-msm7200/debug-macro.S + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/hardware.h> +#include <asm/arch/msm_iomap.h> + + .macro addruart,rx + @ see if the MMU is enabled and select appropriate base address + mrc p15, 0, \rx, c1, c0 + tst \rx, #1 + ldreq \rx, =MSM_UART1_PHYS + ldrne \rx, =MSM_UART1_BASE + .endm + + .macro senduart,rd,rx + str \rd, [\rx, #0x0C] + .endm + + .macro waituart,rd,rx + @ wait for TX_READY +1: ldr \rd, [\rx, #0x08] + tst \rd, #0x04 + beq 1b + .endm + + .macro busyuart,rd,rx + .endm diff --git a/include/asm-arm/arch-msm/dma.h b/include/asm-arm/arch-msm/dma.h new file mode 100644 index 000000000000..e4b565b27b35 --- /dev/null +++ b/include/asm-arm/arch-msm/dma.h @@ -0,0 +1,151 @@ +/* linux/include/asm-arm/arch-msm/dma.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_DMA_H + +#include <linux/list.h> +#include <asm/arch/msm_iomap.h> + +struct msm_dmov_cmd { + struct list_head list; + unsigned int cmdptr; + void (*complete_func)(struct msm_dmov_cmd *cmd, unsigned int result); +/* void (*user_result_func)(struct msm_dmov_cmd *cmd); */ +}; + +void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd); +void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd); +int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr); +/* int msm_dmov_exec_cmd_etc(unsigned id, unsigned int cmdptr, int timeout, int interruptible); */ + + + +#define DMOV_SD0(off, ch) (MSM_DMOV_BASE + 0x0000 + (off) + ((ch) << 2)) +#define DMOV_SD1(off, ch) (MSM_DMOV_BASE + 0x0400 + (off) + ((ch) << 2)) +#define DMOV_SD2(off, ch) (MSM_DMOV_BASE + 0x0800 + (off) + ((ch) << 2)) +#define DMOV_SD3(off, ch) (MSM_DMOV_BASE + 0x0C00 + (off) + ((ch) << 2)) + +/* only security domain 3 is available to the ARM11 + * SD0 -> mARM trusted, SD1 -> mARM nontrusted, SD2 -> aDSP, SD3 -> aARM + */ + +#define DMOV_CMD_PTR(ch) DMOV_SD3(0x000, ch) +#define DMOV_CMD_LIST (0 << 29) /* does not work */ +#define DMOV_CMD_PTR_LIST (1 << 29) /* works */ +#define DMOV_CMD_INPUT_CFG (2 << 29) /* untested */ +#define DMOV_CMD_OUTPUT_CFG (3 << 29) /* untested */ +#define DMOV_CMD_ADDR(addr) ((addr) >> 3) + +#define DMOV_RSLT(ch) DMOV_SD3(0x040, ch) +#define DMOV_RSLT_VALID (1 << 31) /* 0 == host has empties result fifo */ +#define DMOV_RSLT_ERROR (1 << 3) +#define DMOV_RSLT_FLUSH (1 << 2) +#define DMOV_RSLT_DONE (1 << 1) /* top pointer done */ +#define DMOV_RSLT_USER (1 << 0) /* command with FR force result */ + +#define DMOV_FLUSH0(ch) DMOV_SD3(0x080, ch) +#define DMOV_FLUSH1(ch) DMOV_SD3(0x0C0, ch) +#define DMOV_FLUSH2(ch) DMOV_SD3(0x100, ch) +#define DMOV_FLUSH3(ch) DMOV_SD3(0x140, ch) +#define DMOV_FLUSH4(ch) DMOV_SD3(0x180, ch) +#define DMOV_FLUSH5(ch) DMOV_SD3(0x1C0, ch) + +#define DMOV_STATUS(ch) DMOV_SD3(0x200, ch) +#define DMOV_STATUS_RSLT_COUNT(n) (((n) >> 29)) +#define DMOV_STATUS_CMD_COUNT(n) (((n) >> 27) & 3) +#define DMOV_STATUS_RSLT_VALID (1 << 1) +#define DMOV_STATUS_CMD_PTR_RDY (1 << 0) + +#define DMOV_ISR DMOV_SD3(0x380, 0) + +#define DMOV_CONFIG(ch) DMOV_SD3(0x300, ch) +#define DMOV_CONFIG_FORCE_TOP_PTR_RSLT (1 << 2) +#define DMOV_CONFIG_FORCE_FLUSH_RSLT (1 << 1) +#define DMOV_CONFIG_IRQ_EN (1 << 0) + +/* channel assignments */ + +#define DMOV_NAND_CHAN 7 +#define DMOV_NAND_CRCI_CMD 5 +#define DMOV_NAND_CRCI_DATA 4 + +#define DMOV_SDC1_CHAN 8 +#define DMOV_SDC1_CRCI 6 + +#define DMOV_SDC2_CHAN 8 +#define DMOV_SDC2_CRCI 7 + +#define DMOV_TSIF_CHAN 10 +#define DMOV_TSIF_CRCI 10 + +#define DMOV_USB_CHAN 11 + +/* no client rate control ifc (eg, ram) */ +#define DMOV_NONE_CRCI 0 + + +/* If the CMD_PTR register has CMD_PTR_LIST selected, the data mover + * is going to walk a list of 32bit pointers as described below. Each + * pointer points to a *array* of dmov_s, etc structs. The last pointer + * in the list is marked with CMD_PTR_LP. The last struct in each array + * is marked with CMD_LC (see below). + */ +#define CMD_PTR_ADDR(addr) ((addr) >> 3) +#define CMD_PTR_LP (1 << 31) /* last pointer */ +#define CMD_PTR_PT (3 << 29) /* ? */ + +/* Single Item Mode */ +typedef struct { + unsigned cmd; + unsigned src; + unsigned dst; + unsigned len; +} dmov_s; + +/* Scatter/Gather Mode */ +typedef struct { + unsigned cmd; + unsigned src_dscr; + unsigned dst_dscr; + unsigned _reserved; +} dmov_sg; + +/* bits for the cmd field of the above structures */ + +#define CMD_LC (1 << 31) /* last command */ +#define CMD_FR (1 << 22) /* force result -- does not work? */ +#define CMD_OCU (1 << 21) /* other channel unblock */ +#define CMD_OCB (1 << 20) /* other channel block */ +#define CMD_TCB (1 << 19) /* ? */ +#define CMD_DAH (1 << 18) /* destination address hold -- does not work?*/ +#define CMD_SAH (1 << 17) /* source address hold -- does not work? */ + +#define CMD_MODE_SINGLE (0 << 0) /* dmov_s structure used */ +#define CMD_MODE_SG (1 << 0) /* untested */ +#define CMD_MODE_IND_SG (2 << 0) /* untested */ +#define CMD_MODE_BOX (3 << 0) /* untested */ + +#define CMD_DST_SWAP_BYTES (1 << 14) /* exchange each byte n with byte n+1 */ +#define CMD_DST_SWAP_SHORTS (1 << 15) /* exchange each short n with short n+1 */ +#define CMD_DST_SWAP_WORDS (1 << 16) /* exchange each word n with word n+1 */ + +#define CMD_SRC_SWAP_BYTES (1 << 11) /* exchange each byte n with byte n+1 */ +#define CMD_SRC_SWAP_SHORTS (1 << 12) /* exchange each short n with short n+1 */ +#define CMD_SRC_SWAP_WORDS (1 << 13) /* exchange each word n with word n+1 */ + +#define CMD_DST_CRCI(n) (((n) & 15) << 7) +#define CMD_SRC_CRCI(n) (((n) & 15) << 3) + +#endif diff --git a/include/asm-arm/arch-msm/entry-macro.S b/include/asm-arm/arch-msm/entry-macro.S new file mode 100644 index 000000000000..ee24aece4cb0 --- /dev/null +++ b/include/asm-arm/arch-msm/entry-macro.S @@ -0,0 +1,38 @@ +/* include/asm-arm/arch-msm7200/entry-macro.S + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/arch/msm_iomap.h> + + .macro disable_fiq + .endm + + .macro get_irqnr_preamble, base, tmp + @ enable imprecise aborts + cpsie a + mov \base, #MSM_VIC_BASE + .endm + + .macro arch_ret_to_user, tmp1, tmp2 + .endm + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + @ 0xD0 has irq# or old irq# if the irq has been handled + @ 0xD4 has irq# or -1 if none pending *but* if you just + @ read 0xD4 you never get the first irq for some reason + ldr \irqnr, [\base, #0xD0] + ldr \irqnr, [\base, #0xD4] + cmp \irqnr, #0xffffffff + .endm diff --git a/include/asm-arm/arch-msm/hardware.h b/include/asm-arm/arch-msm/hardware.h new file mode 100644 index 000000000000..89af2b70182f --- /dev/null +++ b/include/asm-arm/arch-msm/hardware.h @@ -0,0 +1,18 @@ +/* linux/include/asm-arm/arch-msm/hardware.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_HARDWARE_H + +#endif diff --git a/include/asm-arm/arch-msm/io.h b/include/asm-arm/arch-msm/io.h new file mode 100644 index 000000000000..4645ae26b62a --- /dev/null +++ b/include/asm-arm/arch-msm/io.h @@ -0,0 +1,33 @@ +/* include/asm-arm/arch-msm/io.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARM_ARCH_IO_H +#define __ASM_ARM_ARCH_IO_H + +#define IO_SPACE_LIMIT 0xffffffff + +#define __arch_ioremap __msm_ioremap +#define __arch_iounmap __iounmap + +void __iomem *__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype); + +static inline void __iomem *__io(unsigned long addr) +{ + return (void __iomem *)addr; +} +#define __io(a) __io(a) +#define __mem_pci(a) (a) + +#endif diff --git a/include/asm-arm/arch-msm/irqs.h b/include/asm-arm/arch-msm/irqs.h new file mode 100644 index 000000000000..565430cfaa7e --- /dev/null +++ b/include/asm-arm/arch-msm/irqs.h @@ -0,0 +1,89 @@ +/* linux/include/asm-arm/arch-msm/irqs.h + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_IRQS_H + +/* MSM ARM11 Interrupt Numbers */ +/* See 80-VE113-1 A, pp219-221 */ + +#define INT_A9_M2A_0 0 +#define INT_A9_M2A_1 1 +#define INT_A9_M2A_2 2 +#define INT_A9_M2A_3 3 +#define INT_A9_M2A_4 4 +#define INT_A9_M2A_5 5 +#define INT_A9_M2A_6 6 +#define INT_GP_TIMER_EXP 7 +#define INT_DEBUG_TIMER_EXP 8 +#define INT_UART1 9 +#define INT_UART2 10 +#define INT_UART3 11 +#define INT_UART1_RX 12 +#define INT_UART2_RX 13 +#define INT_UART3_RX 14 +#define INT_USB_OTG 15 +#define INT_MDDI_PRI 16 +#define INT_MDDI_EXT 17 +#define INT_MDDI_CLIENT 18 +#define INT_MDP 19 +#define INT_GRAPHICS 20 +#define INT_ADM_AARM 21 +#define INT_ADSP_A11 22 +#define INT_ADSP_A9_A11 23 +#define INT_SDC1_0 24 +#define INT_SDC1_1 25 +#define INT_SDC2_0 26 +#define INT_SDC2_1 27 +#define INT_KEYSENSE 28 +#define INT_TCHSCRN_SSBI 29 +#define INT_TCHSCRN1 30 +#define INT_TCHSCRN2 31 + +#define INT_GPIO_GROUP1 (32 + 0) +#define INT_GPIO_GROUP2 (32 + 1) +#define INT_PWB_I2C (32 + 2) +#define INT_SOFTRESET (32 + 3) +#define INT_NAND_WR_ER_DONE (32 + 4) +#define INT_NAND_OP_DONE (32 + 5) +#define INT_PBUS_ARM11 (32 + 6) +#define INT_AXI_MPU_SMI (32 + 7) +#define INT_AXI_MPU_EBI1 (32 + 8) +#define INT_AD_HSSD (32 + 9) +#define INT_ARM11_PMU (32 + 10) +#define INT_ARM11_DMA (32 + 11) +#define INT_TSIF_IRQ (32 + 12) +#define INT_UART1DM_IRQ (32 + 13) +#define INT_UART1DM_RX (32 + 14) +#define INT_USB_HS (32 + 15) +#define INT_SDC3_0 (32 + 16) +#define INT_SDC3_1 (32 + 17) +#define INT_SDC4_0 (32 + 18) +#define INT_SDC4_1 (32 + 19) +#define INT_UART2DM_RX (32 + 20) +#define INT_UART2DM_IRQ (32 + 21) + +/* 22-31 are reserved */ + +#define MSM_IRQ_BIT(irq) (1 << ((irq) & 31)) + +#define NR_MSM_IRQS 64 +#define NR_GPIO_IRQS 122 +#define NR_BOARD_IRQS 64 +#define NR_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS) + +#define MSM_GPIO_TO_INT(n) (NR_MSM_IRQS + (n)) + +#endif diff --git a/include/asm-arm/arch-msm/memory.h b/include/asm-arm/arch-msm/memory.h new file mode 100644 index 000000000000..b5ce0e9ac86d --- /dev/null +++ b/include/asm-arm/arch-msm/memory.h @@ -0,0 +1,27 @@ +/* linux/include/asm-arm/arch-msm/memory.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MEMORY_H +#define __ASM_ARCH_MEMORY_H + +/* physical offset of RAM */ +#define PHYS_OFFSET UL(0x10000000) + +/* bus address and physical addresses are identical */ +#define __virt_to_bus(x) __virt_to_phys(x) +#define __bus_to_virt(x) __phys_to_virt(x) + +#endif + diff --git a/include/asm-arm/arch-msm/msm_iomap.h b/include/asm-arm/arch-msm/msm_iomap.h new file mode 100644 index 000000000000..b8955cc26fec --- /dev/null +++ b/include/asm-arm/arch-msm/msm_iomap.h @@ -0,0 +1,104 @@ +/* linux/include/asm-arm/arch-msm/msm_iomap.h + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * The MSM peripherals are spread all over across 768MB of physical + * space, which makes just having a simple IO_ADDRESS macro to slide + * them into the right virtual location rough. Instead, we will + * provide a master phys->virt mapping for peripherals here. + * + */ + +#ifndef __ASM_ARCH_MSM_IOMAP_H +#define __ASM_ARCH_MSM_IOMAP_H + +#include <asm/sizes.h> + +/* Physical base address and size of peripherals. + * Ordered by the virtual base addresses they will be mapped at. + * + * MSM_VIC_BASE must be an value that can be loaded via a "mov" + * instruction, otherwise entry-macro.S will not compile. + * + * If you add or remove entries here, you'll want to edit the + * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your + * changes. + * + */ + +#define MSM_VIC_BASE 0xE0000000 +#define MSM_VIC_PHYS 0xC0000000 +#define MSM_VIC_SIZE SZ_4K + +#define MSM_CSR_BASE 0xE0001000 +#define MSM_CSR_PHYS 0xC0100000 +#define MSM_CSR_SIZE SZ_4K + +#define MSM_GPT_PHYS MSM_CSR_PHYS +#define MSM_GPT_BASE MSM_CSR_BASE +#define MSM_GPT_SIZE SZ_4K + +#define MSM_DMOV_BASE 0xE0002000 +#define MSM_DMOV_PHYS 0xA9700000 +#define MSM_DMOV_SIZE SZ_4K + +#define MSM_UART1_BASE 0xE0003000 +#define MSM_UART1_PHYS 0xA9A00000 +#define MSM_UART1_SIZE SZ_4K + +#define MSM_UART2_BASE 0xE0004000 +#define MSM_UART2_PHYS 0xA9B00000 +#define MSM_UART2_SIZE SZ_4K + +#define MSM_UART3_BASE 0xE0005000 +#define MSM_UART3_PHYS 0xA9C00000 +#define MSM_UART3_SIZE SZ_4K + +#define MSM_I2C_BASE 0xE0006000 +#define MSM_I2C_PHYS 0xA9900000 +#define MSM_I2C_SIZE SZ_4K + +#define MSM_GPIO1_BASE 0xE0007000 +#define MSM_GPIO1_PHYS 0xA9200000 +#define MSM_GPIO1_SIZE SZ_4K + +#define MSM_GPIO2_BASE 0xE0008000 +#define MSM_GPIO2_PHYS 0xA9300000 +#define MSM_GPIO2_SIZE SZ_4K + +#define MSM_HSUSB_BASE 0xE0009000 +#define MSM_HSUSB_PHYS 0xA0800000 +#define MSM_HSUSB_SIZE SZ_4K + +#define MSM_CLK_CTL_BASE 0xE000A000 +#define MSM_CLK_CTL_PHYS 0xA8600000 +#define MSM_CLK_CTL_SIZE SZ_4K + +#define MSM_PMDH_BASE 0xE000B000 +#define MSM_PMDH_PHYS 0xAA600000 +#define MSM_PMDH_SIZE SZ_4K + +#define MSM_EMDH_BASE 0xE000C000 +#define MSM_EMDH_PHYS 0xAA700000 +#define MSM_EMDH_SIZE SZ_4K + +#define MSM_MDP_BASE 0xE0010000 +#define MSM_MDP_PHYS 0xAA200000 +#define MSM_MDP_SIZE 0x000F0000 + +#define MSM_SHARED_RAM_BASE 0xE0100000 +#define MSM_SHARED_RAM_PHYS 0x01F00000 +#define MSM_SHARED_RAM_SIZE SZ_1M + +#endif diff --git a/include/asm-arm/arch-msm/system.h b/include/asm-arm/arch-msm/system.h new file mode 100644 index 000000000000..7c5544bdd0c7 --- /dev/null +++ b/include/asm-arm/arch-msm/system.h @@ -0,0 +1,23 @@ +/* linux/include/asm-arm/arch-msm/system.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/hardware.h> + +void arch_idle(void); + +static inline void arch_reset(char mode) +{ + for (;;) ; /* depends on IPC w/ other core */ +} diff --git a/include/asm-arm/arch-msm/timex.h b/include/asm-arm/arch-msm/timex.h new file mode 100644 index 000000000000..154b23fb3599 --- /dev/null +++ b/include/asm-arm/arch-msm/timex.h @@ -0,0 +1,20 @@ +/* linux/include/asm-arm/arch-msm/timex.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_TIMEX_H + +#define CLOCK_TICK_RATE 1000000 + +#endif diff --git a/include/asm-arm/arch-msm/uncompress.h b/include/asm-arm/arch-msm/uncompress.h new file mode 100644 index 000000000000..e91ed786ffec --- /dev/null +++ b/include/asm-arm/arch-msm/uncompress.h @@ -0,0 +1,36 @@ +/* linux/include/asm-arm/arch-msm/uncompress.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_UNCOMPRESS_H + +#include "hardware.h" + +static void putc(int c) +{ +} + +static inline void flush(void) +{ +} + +static inline void arch_decomp_setup(void) +{ +} + +static inline void arch_decomp_wdog(void) +{ +} + +#endif diff --git a/include/asm-arm/arch-msm/vmalloc.h b/include/asm-arm/arch-msm/vmalloc.h new file mode 100644 index 000000000000..60f8d910e825 --- /dev/null +++ b/include/asm-arm/arch-msm/vmalloc.h @@ -0,0 +1,22 @@ +/* linux/include/asm-arm/arch-msm/vmalloc.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_VMALLOC_H +#define __ASM_ARCH_MSM_VMALLOC_H + +#define VMALLOC_END (PAGE_OFFSET + 0x10000000) + +#endif + diff --git a/include/asm-arm/arch-orion/debug-macro.S b/include/asm-arm/arch-orion/debug-macro.S new file mode 100644 index 000000000000..e2a80641f214 --- /dev/null +++ b/include/asm-arm/arch-orion/debug-macro.S @@ -0,0 +1,17 @@ +/* + * linux/include/asm-arm/arch-orion/debug-macro.S + * + * Debugging macro include header + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + + .macro addruart,rx + mov \rx, #0xf1000000 + orr \rx, \rx, #0x00012000 + .endm + +#define UART_SHIFT 2 +#include <asm/hardware/debug-8250.S> diff --git a/include/asm-arm/arch-orion/dma.h b/include/asm-arm/arch-orion/dma.h new file mode 100644 index 000000000000..40a8c178f10d --- /dev/null +++ b/include/asm-arm/arch-orion/dma.h @@ -0,0 +1 @@ +/* empty */ diff --git a/include/asm-arm/arch-orion/entry-macro.S b/include/asm-arm/arch-orion/entry-macro.S new file mode 100644 index 000000000000..b76075a7e44b --- /dev/null +++ b/include/asm-arm/arch-orion/entry-macro.S @@ -0,0 +1,31 @@ +/* + * include/asm-arm/arch-orion/entry-macro.S + * + * Low-level IRQ helper macros for Orion platforms + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <asm/arch/orion.h> + + .macro disable_fiq + .endm + + .macro arch_ret_to_user, tmp1, tmp2 + .endm + + .macro get_irqnr_preamble, base, tmp + ldr \base, =MAIN_IRQ_CAUSE + .endm + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + ldr \irqstat, [\base, #0] @ main cause + ldr \tmp, [\base, #(MAIN_IRQ_MASK - MAIN_IRQ_CAUSE)] @ main mask + mov \irqnr, #0 @ default irqnr + @ find cause bits that are unmasked + ands \irqstat, \irqstat, \tmp @ clear Z flag if any + clzne \irqnr, \irqstat @ calc irqnr + rsbne \irqnr, \irqnr, #31 + .endm diff --git a/include/asm-arm/arch-orion/gpio.h b/include/asm-arm/arch-orion/gpio.h new file mode 100644 index 000000000000..d66284f9a14c --- /dev/null +++ b/include/asm-arm/arch-orion/gpio.h @@ -0,0 +1,28 @@ +/* + * include/asm-arm/arch-orion/gpio.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +extern int gpio_request(unsigned pin, const char *label); +extern void gpio_free(unsigned pin); +extern int gpio_direction_input(unsigned pin); +extern int gpio_direction_output(unsigned pin, int value); +extern int gpio_get_value(unsigned pin); +extern void gpio_set_value(unsigned pin, int value); +extern void orion_gpio_set_blink(unsigned pin, int blink); +extern void gpio_display(void); /* debug */ + +static inline int gpio_to_irq(int pin) +{ + return pin + IRQ_ORION_GPIO_START; +} + +static inline int irq_to_gpio(int irq) +{ + return irq - IRQ_ORION_GPIO_START; +} + +#include <asm-generic/gpio.h> /* cansleep wrappers */ diff --git a/include/asm-arm/arch-orion/hardware.h b/include/asm-arm/arch-orion/hardware.h new file mode 100644 index 000000000000..8a12d213fbdc --- /dev/null +++ b/include/asm-arm/arch-orion/hardware.h @@ -0,0 +1,24 @@ +/* + * include/asm-arm/arch-orion/hardware.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ASM_ARCH_HARDWARE_H__ +#define __ASM_ARCH_HARDWARE_H__ + +#include "orion.h" + +#define PCI_MEMORY_VADDR ORION_PCI_SYS_MEM_BASE +#define PCI_IO_VADDR ORION_PCI_SYS_IO_BASE + +#define pcibios_assign_all_busses() 1 + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x01000000 +#define PCIMEM_BASE PCI_MEMORY_VADDR /* mem base for VGA */ + +#endif /* _ASM_ARCH_HARDWARE_H */ diff --git a/include/asm-arm/arch-orion/io.h b/include/asm-arm/arch-orion/io.h new file mode 100644 index 000000000000..e0b8c39b9167 --- /dev/null +++ b/include/asm-arm/arch-orion/io.h @@ -0,0 +1,27 @@ +/* + * include/asm-arm/arch-orion/io.h + * + * Tzachi Perelstein <tzachi@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __ASM_ARM_ARCH_IO_H +#define __ASM_ARM_ARCH_IO_H + +#include "orion.h" + +#define IO_SPACE_LIMIT 0xffffffff +#define IO_SPACE_REMAP ORION_PCI_SYS_IO_BASE + +static inline void __iomem *__io(unsigned long addr) +{ + return (void __iomem *)addr; +} + +#define __io(a) __io(a) +#define __mem_pci(a) (a) + +#endif diff --git a/include/asm-arm/arch-orion/irqs.h b/include/asm-arm/arch-orion/irqs.h new file mode 100644 index 000000000000..eea65ca6076a --- /dev/null +++ b/include/asm-arm/arch-orion/irqs.h @@ -0,0 +1,61 @@ +/* + * include/asm-arm/arch-orion/irqs.h + * + * IRQ definitions for Orion SoC + * + * Maintainer: Tzachi Perelstein <tzachi@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __ASM_ARCH_IRQS_H__ +#define __ASM_ARCH_IRQS_H__ + +#include "orion.h" /* need GPIO_MAX */ + +/* + * Orion Main Interrupt Controller + */ +#define IRQ_ORION_BRIDGE 0 +#define IRQ_ORION_DOORBELL_H2C 1 +#define IRQ_ORION_DOORBELL_C2H 2 +#define IRQ_ORION_UART0 3 +#define IRQ_ORION_UART1 4 +#define IRQ_ORION_I2C 5 +#define IRQ_ORION_GPIO_0_7 6 +#define IRQ_ORION_GPIO_8_15 7 +#define IRQ_ORION_GPIO_16_23 8 +#define IRQ_ORION_GPIO_24_31 9 +#define IRQ_ORION_PCIE0_ERR 10 +#define IRQ_ORION_PCIE0_INT 11 +#define IRQ_ORION_USB1_CTRL 12 +#define IRQ_ORION_DEV_BUS_ERR 14 +#define IRQ_ORION_PCI_ERR 15 +#define IRQ_ORION_USB_BR_ERR 16 +#define IRQ_ORION_USB0_CTRL 17 +#define IRQ_ORION_ETH_RX 18 +#define IRQ_ORION_ETH_TX 19 +#define IRQ_ORION_ETH_MISC 20 +#define IRQ_ORION_ETH_SUM 21 +#define IRQ_ORION_ETH_ERR 22 +#define IRQ_ORION_IDMA_ERR 23 +#define IRQ_ORION_IDMA_0 24 +#define IRQ_ORION_IDMA_1 25 +#define IRQ_ORION_IDMA_2 26 +#define IRQ_ORION_IDMA_3 27 +#define IRQ_ORION_CESA 28 +#define IRQ_ORION_SATA 29 +#define IRQ_ORION_XOR0 30 +#define IRQ_ORION_XOR1 31 + +/* + * Orion General Purpose Pins + */ +#define IRQ_ORION_GPIO_START 32 +#define NR_GPIO_IRQS GPIO_MAX + +#define NR_IRQS (IRQ_ORION_GPIO_START + NR_GPIO_IRQS) + +#endif /* __ASM_ARCH_IRQS_H__ */ diff --git a/include/asm-arm/arch-orion/memory.h b/include/asm-arm/arch-orion/memory.h new file mode 100644 index 000000000000..d954dba87ced --- /dev/null +++ b/include/asm-arm/arch-orion/memory.h @@ -0,0 +1,15 @@ +/* + * include/asm-arm/arch-orion/memory.h + * + * Marvell Orion memory definitions + */ + +#ifndef __ASM_ARCH_MMU_H +#define __ASM_ARCH_MMU_H + +#define PHYS_OFFSET UL(0x00000000) + +#define __virt_to_bus(x) __virt_to_phys(x) +#define __bus_to_virt(x) __phys_to_virt(x) + +#endif diff --git a/include/asm-arm/arch-orion/orion.h b/include/asm-arm/arch-orion/orion.h new file mode 100644 index 000000000000..f787f752e58c --- /dev/null +++ b/include/asm-arm/arch-orion/orion.h @@ -0,0 +1,143 @@ +/* + * include/asm-arm/arch-orion/orion.h + * + * Generic definitions of Orion SoC flavors: + * Orion-1, Orion-NAS, Orion-VoIP, and Orion-2. + * + * Maintainer: Tzachi Perelstein <tzachi@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __ASM_ARCH_ORION_H__ +#define __ASM_ARCH_ORION_H__ + +/******************************************************************************* + * Orion Address Map + * Use the same mapping (1:1 virtual:physical) of internal registers and + * PCI system (PCI+PCIE) for all machines. + * Each machine defines the rest of its mapping (e.g. device bus flashes) + ******************************************************************************/ +#define ORION_REGS_BASE 0xf1000000 +#define ORION_REGS_SIZE SZ_1M + +#define ORION_PCI_SYS_MEM_BASE 0xe0000000 +#define ORION_PCIE_MEM_BASE ORION_PCI_SYS_MEM_BASE +#define ORION_PCIE_MEM_SIZE SZ_128M +#define ORION_PCI_MEM_BASE (ORION_PCIE_MEM_BASE + ORION_PCIE_MEM_SIZE) +#define ORION_PCI_MEM_SIZE SZ_128M + +#define ORION_PCI_SYS_IO_BASE 0xf2000000 +#define ORION_PCIE_IO_BASE ORION_PCI_SYS_IO_BASE +#define ORION_PCIE_IO_SIZE SZ_1M +#define ORION_PCIE_IO_REMAP (ORION_PCIE_IO_BASE - ORION_PCI_SYS_IO_BASE) +#define ORION_PCI_IO_BASE (ORION_PCIE_IO_BASE + ORION_PCIE_IO_SIZE) +#define ORION_PCI_IO_SIZE SZ_1M +#define ORION_PCI_IO_REMAP (ORION_PCI_IO_BASE - ORION_PCI_SYS_IO_BASE) +/* Relevant only for Orion-NAS */ +#define ORION_PCIE_WA_BASE 0xf0000000 +#define ORION_PCIE_WA_SIZE SZ_16M + +/******************************************************************************* + * Supported Devices & Revisions + ******************************************************************************/ +/* Orion-1 (88F5181) */ +#define MV88F5181_DEV_ID 0x5181 +#define MV88F5181_REV_B1 3 +/* Orion-NAS (88F5182) */ +#define MV88F5182_DEV_ID 0x5182 +#define MV88F5182_REV_A2 2 +/* Orion-2 (88F5281) */ +#define MV88F5281_DEV_ID 0x5281 +#define MV88F5281_REV_D1 5 +#define MV88F5281_REV_D2 6 + +/******************************************************************************* + * Orion Registers Map + ******************************************************************************/ +#define ORION_DDR_REG_BASE (ORION_REGS_BASE | 0x00000) +#define ORION_DEV_BUS_REG_BASE (ORION_REGS_BASE | 0x10000) +#define ORION_BRIDGE_REG_BASE (ORION_REGS_BASE | 0x20000) +#define ORION_PCI_REG_BASE (ORION_REGS_BASE | 0x30000) +#define ORION_PCIE_REG_BASE (ORION_REGS_BASE | 0x40000) +#define ORION_USB0_REG_BASE (ORION_REGS_BASE | 0x50000) +#define ORION_ETH_REG_BASE (ORION_REGS_BASE | 0x70000) +#define ORION_SATA_REG_BASE (ORION_REGS_BASE | 0x80000) +#define ORION_USB1_REG_BASE (ORION_REGS_BASE | 0xa0000) + +#define ORION_DDR_REG(x) (ORION_DDR_REG_BASE | (x)) +#define ORION_DEV_BUS_REG(x) (ORION_DEV_BUS_REG_BASE | (x)) +#define ORION_BRIDGE_REG(x) (ORION_BRIDGE_REG_BASE | (x)) +#define ORION_PCI_REG(x) (ORION_PCI_REG_BASE | (x)) +#define ORION_PCIE_REG(x) (ORION_PCIE_REG_BASE | (x)) +#define ORION_USB0_REG(x) (ORION_USB0_REG_BASE | (x)) +#define ORION_USB1_REG(x) (ORION_USB1_REG_BASE | (x)) +#define ORION_ETH_REG(x) (ORION_ETH_REG_BASE | (x)) +#define ORION_SATA_REG(x) (ORION_SATA_REG_BASE | (x)) + +/******************************************************************************* + * Device Bus Registers + ******************************************************************************/ +#define MPP_0_7_CTRL ORION_DEV_BUS_REG(0x000) +#define MPP_8_15_CTRL ORION_DEV_BUS_REG(0x004) +#define MPP_16_19_CTRL ORION_DEV_BUS_REG(0x050) +#define MPP_DEV_CTRL ORION_DEV_BUS_REG(0x008) +#define MPP_RESET_SAMPLE ORION_DEV_BUS_REG(0x010) +#define GPIO_OUT ORION_DEV_BUS_REG(0x100) +#define GPIO_IO_CONF ORION_DEV_BUS_REG(0x104) +#define GPIO_BLINK_EN ORION_DEV_BUS_REG(0x108) +#define GPIO_IN_POL ORION_DEV_BUS_REG(0x10c) +#define GPIO_DATA_IN ORION_DEV_BUS_REG(0x110) +#define GPIO_EDGE_CAUSE ORION_DEV_BUS_REG(0x114) +#define GPIO_EDGE_MASK ORION_DEV_BUS_REG(0x118) +#define GPIO_LEVEL_MASK ORION_DEV_BUS_REG(0x11c) +#define DEV_BANK_0_PARAM ORION_DEV_BUS_REG(0x45c) +#define DEV_BANK_1_PARAM ORION_DEV_BUS_REG(0x460) +#define DEV_BANK_2_PARAM ORION_DEV_BUS_REG(0x464) +#define DEV_BANK_BOOT_PARAM ORION_DEV_BUS_REG(0x46c) +#define DEV_BUS_CTRL ORION_DEV_BUS_REG(0x4c0) +#define DEV_BUS_INT_CAUSE ORION_DEV_BUS_REG(0x4d0) +#define DEV_BUS_INT_MASK ORION_DEV_BUS_REG(0x4d4) +#define I2C_BASE ORION_DEV_BUS_REG(0x1000) +#define UART0_BASE ORION_DEV_BUS_REG(0x2000) +#define UART1_BASE ORION_DEV_BUS_REG(0x2100) +#define GPIO_MAX 32 + +/*************************************************************************** + * Orion CPU Bridge Registers + **************************************************************************/ +#define CPU_CONF ORION_BRIDGE_REG(0x100) +#define CPU_CTRL ORION_BRIDGE_REG(0x104) +#define CPU_RESET_MASK ORION_BRIDGE_REG(0x108) +#define CPU_SOFT_RESET ORION_BRIDGE_REG(0x10c) +#define POWER_MNG_CTRL_REG ORION_BRIDGE_REG(0x11C) +#define BRIDGE_CAUSE ORION_BRIDGE_REG(0x110) +#define BRIDGE_MASK ORION_BRIDGE_REG(0x114) +#define MAIN_IRQ_CAUSE ORION_BRIDGE_REG(0x200) +#define MAIN_IRQ_MASK ORION_BRIDGE_REG(0x204) +#define TIMER_CTRL ORION_BRIDGE_REG(0x300) +#define TIMER_VAL(x) ORION_BRIDGE_REG(0x314 + ((x) * 8)) +#define TIMER_VAL_RELOAD(x) ORION_BRIDGE_REG(0x310 + ((x) * 8)) + +#ifndef __ASSEMBLY__ + +/******************************************************************************* + * Helpers to access Orion registers + ******************************************************************************/ +#include <asm/types.h> +#include <asm/io.h> + +#define orion_read(r) __raw_readl(r) +#define orion_write(r, val) __raw_writel(val, r) + +/* + * These are not preempt safe. Locks, if needed, must be taken care by caller. + */ +#define orion_setbits(r, mask) orion_write((r), orion_read(r) | (mask)) +#define orion_clrbits(r, mask) orion_write((r), orion_read(r) & ~(mask)) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARCH_ORION_H__ */ diff --git a/include/asm-arm/arch-orion/platform.h b/include/asm-arm/arch-orion/platform.h new file mode 100644 index 000000000000..143c38e2fa0b --- /dev/null +++ b/include/asm-arm/arch-orion/platform.h @@ -0,0 +1,25 @@ +/* + * asm-arm/arch-orion/platform.h + * + * Tzachi Perelstein <tzachi@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __ASM_ARCH_PLATFORM_H__ +#define __ASM_ARCH_PLATFORM_H__ + +/* + * Device bus NAND private data + */ +struct orion_nand_data { + struct mtd_partition *parts; + u32 nr_parts; + u8 ale; /* address line number connected to ALE */ + u8 cle; /* address line number connected to CLE */ + u8 width; /* buswidth */ +}; + +#endif diff --git a/include/asm-arm/arch-orion/system.h b/include/asm-arm/arch-orion/system.h new file mode 100644 index 000000000000..17704c68f90e --- /dev/null +++ b/include/asm-arm/arch-orion/system.h @@ -0,0 +1,31 @@ +/* + * include/asm-arm/arch-orion/system.h + * + * Tzachi Perelstein <tzachi@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __ASM_ARCH_SYSTEM_H +#define __ASM_ARCH_SYSTEM_H + +#include <asm/arch/hardware.h> +#include <asm/arch/orion.h> + +static inline void arch_idle(void) +{ + cpu_do_idle(); +} + +static inline void arch_reset(char mode) +{ + /* + * Enable and issue soft reset + */ + orion_setbits(CPU_RESET_MASK, (1 << 2)); + orion_setbits(CPU_SOFT_RESET, 1); +} + +#endif diff --git a/include/asm-arm/arch-orion/timex.h b/include/asm-arm/arch-orion/timex.h new file mode 100644 index 000000000000..26c2c91eecf0 --- /dev/null +++ b/include/asm-arm/arch-orion/timex.h @@ -0,0 +1,12 @@ +/* + * include/asm-arm/arch-orion/timex.h + * + * Tzachi Perelstein <tzachi@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define ORION_TCLK 166666667 +#define CLOCK_TICK_RATE ORION_TCLK diff --git a/include/asm-arm/arch-orion/uncompress.h b/include/asm-arm/arch-orion/uncompress.h new file mode 100644 index 000000000000..a1a222fb438c --- /dev/null +++ b/include/asm-arm/arch-orion/uncompress.h @@ -0,0 +1,44 @@ +/* + * include/asm-arm/arch-orion/uncompress.h + * + * Tzachi Perelstein <tzachi@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <asm/arch/orion.h> + +#define MV_UART_LSR ((volatile unsigned char *)(UART0_BASE + 0x14)) +#define MV_UART_THR ((volatile unsigned char *)(UART0_BASE + 0x0)) + +#define LSR_THRE 0x20 + +static void putc(const char c) +{ + int j = 0x1000; + while (--j && !(*MV_UART_LSR & LSR_THRE)) + barrier(); + *MV_UART_THR = c; +} + +static void flush(void) +{ +} + +static void orion_early_putstr(const char *ptr) +{ + char c; + while ((c = *ptr++) != '\0') { + if (c == '\n') + putc('\r'); + putc(c); + } +} + +/* + * nothing to do + */ +#define arch_decomp_setup() +#define arch_decomp_wdog() diff --git a/include/asm-arm/arch-orion/vmalloc.h b/include/asm-arm/arch-orion/vmalloc.h new file mode 100644 index 000000000000..23e2a102fe0c --- /dev/null +++ b/include/asm-arm/arch-orion/vmalloc.h @@ -0,0 +1,5 @@ +/* + * include/asm-arm/arch-orion/vmalloc.h + */ + +#define VMALLOC_END 0xf0000000 diff --git a/include/asm-arm/arch-pxa/colibri.h b/include/asm-arm/arch-pxa/colibri.h new file mode 100644 index 000000000000..2ae373fb5675 --- /dev/null +++ b/include/asm-arm/arch-pxa/colibri.h @@ -0,0 +1,19 @@ +#ifndef _COLIBRI_H_ +#define _COLIBRI_H_ + +/* physical memory regions */ +#define COLIBRI_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ +#define COLIBRI_ETH_PHYS (PXA_CS2_PHYS) /* Ethernet DM9000 region */ +#define COLIBRI_SDRAM_BASE 0xa0000000 /* SDRAM region */ + +/* virtual memory regions */ +#define COLIBRI_DISK_VIRT 0xF0000000 /* Disk On Chip region */ + +/* size of flash */ +#define COLIBRI_FLASH_SIZE 0x02000000 /* Flash size 32 MB */ + +/* Ethernet Controller Davicom DM9000 */ +#define GPIO_DM9000 114 +#define COLIBRI_ETH_IRQ IRQ_GPIO(GPIO_DM9000) + +#endif /* _COLIBRI_H_ */ diff --git a/include/asm-arm/arch-pxa/corgi.h b/include/asm-arm/arch-pxa/corgi.h index e554caa0d18b..bf856503baf6 100644 --- a/include/asm-arm/arch-pxa/corgi.h +++ b/include/asm-arm/arch-pxa/corgi.h @@ -104,7 +104,6 @@ */ extern struct platform_device corgiscoop_device; extern struct platform_device corgissp_device; -extern struct platform_device corgifb_device; #endif /* __ASM_ARCH_CORGI_H */ diff --git a/include/asm-arm/arch-pxa/i2c.h b/include/asm-arm/arch-pxa/i2c.h index e404b233d8a8..80596b013443 100644 --- a/include/asm-arm/arch-pxa/i2c.h +++ b/include/asm-arm/arch-pxa/i2c.h @@ -65,7 +65,13 @@ struct i2c_pxa_platform_data { unsigned int slave_addr; struct i2c_slave_client *slave; unsigned int class; + int use_pio; }; extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info); + +#ifdef CONFIG_PXA27x +extern void pxa_set_i2c_power_info(struct i2c_pxa_platform_data *info); +#endif + #endif diff --git a/include/asm-arm/arch-pxa/irqs.h b/include/asm-arm/arch-pxa/irqs.h index b76ee6d1f5b4..c562b972a4a6 100644 --- a/include/asm-arm/arch-pxa/irqs.h +++ b/include/asm-arm/arch-pxa/irqs.h @@ -180,7 +180,8 @@ #define NR_IRQS (IRQ_LOCOMO_SPI_TEND + 1) #elif defined(CONFIG_ARCH_LUBBOCK) || \ defined(CONFIG_MACH_LOGICPD_PXA270) || \ - defined(CONFIG_MACH_MAINSTONE) + defined(CONFIG_MACH_MAINSTONE) || \ + defined(CONFIG_MACH_PCM027) #define NR_IRQS (IRQ_BOARD_END) #else #define NR_IRQS (IRQ_BOARD_START) @@ -227,6 +228,13 @@ #define IRQ_LOCOMO_LT_BASE (IRQ_BOARD_START + 2) #define IRQ_LOCOMO_SPI_BASE (IRQ_BOARD_START + 3) +/* phyCORE-PXA270 (PCM027) Interrupts */ +#define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) +#define PCM027_BTDET_IRQ PCM027_IRQ(0) +#define PCM027_FF_RI_IRQ PCM027_IRQ(1) +#define PCM027_MMCDET_IRQ PCM027_IRQ(2) +#define PCM027_PM_5V_IRQ PCM027_IRQ(3) + /* ITE8152 irqs */ /* add IT8152 IRQs beyond BOARD_END */ #ifdef CONFIG_PCI_HOST_ITE8152 diff --git a/include/asm-arm/arch-pxa/littleton.h b/include/asm-arm/arch-pxa/littleton.h new file mode 100644 index 000000000000..79d209b826f4 --- /dev/null +++ b/include/asm-arm/arch-pxa/littleton.h @@ -0,0 +1,6 @@ +#ifndef __ASM_ARCH_ZYLONITE_H +#define __ASM_ARCH_ZYLONITE_H + +#define LITTLETON_ETH_PHYS 0x30000000 + +#endif /* __ASM_ARCH_ZYLONITE_H */ diff --git a/include/asm-arm/arch-pxa/magician.h b/include/asm-arm/arch-pxa/magician.h new file mode 100644 index 000000000000..337f51f06b3a --- /dev/null +++ b/include/asm-arm/arch-pxa/magician.h @@ -0,0 +1,111 @@ +/* + * GPIO and IRQ definitions for HTC Magician PDA phones + * + * Copyright (c) 2007 Philipp Zabel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _MAGICIAN_H_ +#define _MAGICIAN_H_ + +#include <asm/arch/pxa-regs.h> + +/* + * PXA GPIOs + */ + +#define GPIO0_MAGICIAN_KEY_POWER 0 +#define GPIO9_MAGICIAN_UNKNOWN 9 +#define GPIO10_MAGICIAN_GSM_IRQ 10 +#define GPIO11_MAGICIAN_GSM_OUT1 11 +#define GPIO13_MAGICIAN_CPLD_IRQ 13 +#define GPIO18_MAGICIAN_UNKNOWN 18 +#define GPIO22_MAGICIAN_VIBRA_EN 22 +#define GPIO26_MAGICIAN_GSM_POWER 26 +#define GPIO27_MAGICIAN_USBC_PUEN 27 +#define GPIO30_MAGICIAN_nCHARGE_EN 30 +#define GPIO37_MAGICIAN_KEY_HANGUP 37 +#define GPIO38_MAGICIAN_KEY_CONTACTS 38 +#define GPIO40_MAGICIAN_GSM_OUT2 40 +#define GPIO48_MAGICIAN_UNKNOWN 48 +#define GPIO56_MAGICIAN_UNKNOWN 56 +#define GPIO57_MAGICIAN_CAM_RESET 57 +#define GPIO83_MAGICIAN_nIR_EN 83 +#define GPIO86_MAGICIAN_GSM_RESET 86 +#define GPIO87_MAGICIAN_GSM_SELECT 87 +#define GPIO90_MAGICIAN_KEY_CALENDAR 90 +#define GPIO91_MAGICIAN_KEY_CAMERA 91 +#define GPIO93_MAGICIAN_KEY_UP 93 +#define GPIO94_MAGICIAN_KEY_DOWN 94 +#define GPIO95_MAGICIAN_KEY_LEFT 95 +#define GPIO96_MAGICIAN_KEY_RIGHT 96 +#define GPIO97_MAGICIAN_KEY_ENTER 97 +#define GPIO98_MAGICIAN_KEY_RECORD 98 +#define GPIO99_MAGICIAN_HEADPHONE_IN 99 +#define GPIO100_MAGICIAN_KEY_VOL_UP 100 +#define GPIO101_MAGICIAN_KEY_VOL_DOWN 101 +#define GPIO102_MAGICIAN_KEY_PHONE 102 +#define GPIO103_MAGICIAN_LED_KP 103 +#define GPIO104_MAGICIAN_LCD_POWER_1 104 +#define GPIO105_MAGICIAN_LCD_POWER_2 105 +#define GPIO106_MAGICIAN_LCD_POWER_3 106 +#define GPIO107_MAGICIAN_DS1WM_IRQ 107 +#define GPIO108_MAGICIAN_GSM_READY 108 +#define GPIO114_MAGICIAN_UNKNOWN 114 +#define GPIO115_MAGICIAN_nPEN_IRQ 115 +#define GPIO116_MAGICIAN_nCAM_EN 116 +#define GPIO119_MAGICIAN_UNKNOWN 119 +#define GPIO120_MAGICIAN_UNKNOWN 120 + +/* + * PXA GPIO alternate function mode & direction + */ + +#define GPIO0_MAGICIAN_KEY_POWER_MD (0 | GPIO_IN) +#define GPIO9_MAGICIAN_UNKNOWN_MD (9 | GPIO_IN) +#define GPIO10_MAGICIAN_GSM_IRQ_MD (10 | GPIO_IN) +#define GPIO11_MAGICIAN_GSM_OUT1_MD (11 | GPIO_OUT) +#define GPIO13_MAGICIAN_CPLD_IRQ_MD (13 | GPIO_IN) +#define GPIO18_MAGICIAN_UNKNOWN_MD (18 | GPIO_OUT) +#define GPIO22_MAGICIAN_VIBRA_EN_MD (22 | GPIO_OUT) +#define GPIO26_MAGICIAN_GSM_POWER_MD (26 | GPIO_OUT) +#define GPIO27_MAGICIAN_USBC_PUEN_MD (27 | GPIO_OUT) +#define GPIO30_MAGICIAN_nCHARGE_EN_MD (30 | GPIO_OUT) +#define GPIO37_MAGICIAN_KEY_HANGUP_MD (37 | GPIO_OUT) +#define GPIO38_MAGICIAN_KEY_CONTACTS_MD (38 | GPIO_OUT) +#define GPIO40_MAGICIAN_GSM_OUT2_MD (40 | GPIO_OUT) +#define GPIO48_MAGICIAN_UNKNOWN_MD (48 | GPIO_OUT) +#define GPIO56_MAGICIAN_UNKNOWN_MD (56 | GPIO_OUT) +#define GPIO57_MAGICIAN_CAM_RESET_MD (57 | GPIO_OUT) +#define GPIO83_MAGICIAN_nIR_EN_MD (83 | GPIO_OUT) +#define GPIO86_MAGICIAN_GSM_RESET_MD (86 | GPIO_OUT) +#define GPIO87_MAGICIAN_GSM_SELECT_MD (87 | GPIO_OUT) +#define GPIO90_MAGICIAN_KEY_CALENDAR_MD (90 | GPIO_OUT) +#define GPIO91_MAGICIAN_KEY_CAMERA_MD (91 | GPIO_OUT) +#define GPIO93_MAGICIAN_KEY_UP_MD (93 | GPIO_IN) +#define GPIO94_MAGICIAN_KEY_DOWN_MD (94 | GPIO_IN) +#define GPIO95_MAGICIAN_KEY_LEFT_MD (95 | GPIO_IN) +#define GPIO96_MAGICIAN_KEY_RIGHT_MD (96 | GPIO_IN) +#define GPIO97_MAGICIAN_KEY_ENTER_MD (97 | GPIO_IN) +#define GPIO98_MAGICIAN_KEY_RECORD_MD (98 | GPIO_IN) +#define GPIO99_MAGICIAN_HEADPHONE_IN_MD (99 | GPIO_IN) +#define GPIO100_MAGICIAN_KEY_VOL_UP_MD (100 | GPIO_IN) +#define GPIO101_MAGICIAN_KEY_VOL_DOWN_MD (101 | GPIO_IN) +#define GPIO102_MAGICIAN_KEY_PHONE_MD (102 | GPIO_IN) +#define GPIO103_MAGICIAN_LED_KP_MD (103 | GPIO_OUT) +#define GPIO104_MAGICIAN_LCD_POWER_1_MD (104 | GPIO_OUT) +#define GPIO105_MAGICIAN_LCD_POWER_2_MD (105 | GPIO_OUT) +#define GPIO106_MAGICIAN_LCD_POWER_3_MD (106 | GPIO_OUT) +#define GPIO107_MAGICIAN_DS1WM_IRQ_MD (107 | GPIO_IN) +#define GPIO108_MAGICIAN_GSM_READY_MD (108 | GPIO_IN) +#define GPIO114_MAGICIAN_UNKNOWN_MD (114 | GPIO_OUT) +#define GPIO115_MAGICIAN_nPEN_IRQ_MD (115 | GPIO_IN) +#define GPIO116_MAGICIAN_nCAM_EN_MD (116 | GPIO_OUT) +#define GPIO119_MAGICIAN_UNKNOWN_MD (119 | GPIO_OUT) +#define GPIO120_MAGICIAN_UNKNOWN_MD (120 | GPIO_OUT) + +#endif /* _MAGICIAN_H_ */ diff --git a/include/asm-arm/arch-pxa/mfp-pxa300.h b/include/asm-arm/arch-pxa/mfp-pxa300.h index a20996649889..bb410313556f 100644 --- a/include/asm-arm/arch-pxa/mfp-pxa300.h +++ b/include/asm-arm/arch-pxa/mfp-pxa300.h @@ -16,6 +16,7 @@ #define __ASM_ARCH_MFP_PXA300_H #include <asm/arch/mfp.h> +#include <asm/arch/mfp-pxa3xx.h> /* GPIO */ #define GPIO46_GPIO MFP_CFG(GPIO46, AF1) diff --git a/include/asm-arm/arch-pxa/mfp-pxa320.h b/include/asm-arm/arch-pxa/mfp-pxa320.h index 52deedcaf3bd..576aa46d90fc 100644 --- a/include/asm-arm/arch-pxa/mfp-pxa320.h +++ b/include/asm-arm/arch-pxa/mfp-pxa320.h @@ -16,6 +16,7 @@ #define __ASM_ARCH_MFP_PXA320_H #include <asm/arch/mfp.h> +#include <asm/arch/mfp-pxa3xx.h> /* GPIO */ #define GPIO46_GPIO MFP_CFG(GPIO46, AF0) diff --git a/include/asm-arm/arch-pxa/mfp-pxa3xx.h b/include/asm-arm/arch-pxa/mfp-pxa3xx.h new file mode 100644 index 000000000000..1f6b35c015d0 --- /dev/null +++ b/include/asm-arm/arch-pxa/mfp-pxa3xx.h @@ -0,0 +1,252 @@ +#ifndef __ASM_ARCH_MFP_PXA3XX_H +#define __ASM_ARCH_MFP_PXA3XX_H + +#define MFPR_BASE (0x40e10000) +#define MFPR_SIZE (PAGE_SIZE) + +/* MFPR register bit definitions */ +#define MFPR_PULL_SEL (0x1 << 15) +#define MFPR_PULLUP_EN (0x1 << 14) +#define MFPR_PULLDOWN_EN (0x1 << 13) +#define MFPR_SLEEP_SEL (0x1 << 9) +#define MFPR_SLEEP_OE_N (0x1 << 7) +#define MFPR_EDGE_CLEAR (0x1 << 6) +#define MFPR_EDGE_FALL_EN (0x1 << 5) +#define MFPR_EDGE_RISE_EN (0x1 << 4) + +#define MFPR_SLEEP_DATA(x) ((x) << 8) +#define MFPR_DRIVE(x) (((x) & 0x7) << 10) +#define MFPR_AF_SEL(x) (((x) & 0x7) << 0) + +#define MFPR_EDGE_NONE (0) +#define MFPR_EDGE_RISE (MFPR_EDGE_RISE_EN) +#define MFPR_EDGE_FALL (MFPR_EDGE_FALL_EN) +#define MFPR_EDGE_BOTH (MFPR_EDGE_RISE | MFPR_EDGE_FALL) + +/* + * Table that determines the low power modes outputs, with actual settings + * used in parentheses for don't-care values. Except for the float output, + * the configured driven and pulled levels match, so if there is a need for + * non-LPM pulled output, the same configuration could probably be used. + * + * Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel + * (bit 7) (bit 8) (bit 14) (bit 13) (bit 15) + * + * Input 0 X(0) X(0) X(0) 0 + * Drive 0 0 0 0 X(1) 0 + * Drive 1 0 1 X(1) 0 0 + * Pull hi (1) 1 X(1) 1 0 0 + * Pull lo (0) 1 X(0) 0 1 0 + * Z (float) 1 X(0) 0 0 0 + */ +#define MFPR_LPM_INPUT (0) +#define MFPR_LPM_DRIVE_LOW (MFPR_SLEEP_DATA(0) | MFPR_PULLDOWN_EN) +#define MFPR_LPM_DRIVE_HIGH (MFPR_SLEEP_DATA(1) | MFPR_PULLUP_EN) +#define MFPR_LPM_PULL_LOW (MFPR_LPM_DRIVE_LOW | MFPR_SLEEP_OE_N) +#define MFPR_LPM_PULL_HIGH (MFPR_LPM_DRIVE_HIGH | MFPR_SLEEP_OE_N) +#define MFPR_LPM_FLOAT (MFPR_SLEEP_OE_N) +#define MFPR_LPM_MASK (0xe080) + +/* + * The pullup and pulldown state of the MFP pin at run mode is by default + * determined by the selected alternate function. In case that some buggy + * devices need to override this default behavior, the definitions below + * indicates the setting of corresponding MFPR bits + * + * Definition pull_sel pullup_en pulldown_en + * MFPR_PULL_NONE 0 0 0 + * MFPR_PULL_LOW 1 0 1 + * MFPR_PULL_HIGH 1 1 0 + * MFPR_PULL_BOTH 1 1 1 + */ +#define MFPR_PULL_NONE (0) +#define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN) +#define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN) +#define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN) + +/* PXA3xx common MFP configurations - processor specific ones defined + * in mfp-pxa300.h and mfp-pxa320.h + */ +#define GPIO0_GPIO MFP_CFG(GPIO0, AF0) +#define GPIO1_GPIO MFP_CFG(GPIO1, AF0) +#define GPIO2_GPIO MFP_CFG(GPIO2, AF0) +#define GPIO3_GPIO MFP_CFG(GPIO3, AF0) +#define GPIO4_GPIO MFP_CFG(GPIO4, AF0) +#define GPIO5_GPIO MFP_CFG(GPIO5, AF0) +#define GPIO6_GPIO MFP_CFG(GPIO6, AF0) +#define GPIO7_GPIO MFP_CFG(GPIO7, AF0) +#define GPIO8_GPIO MFP_CFG(GPIO8, AF0) +#define GPIO9_GPIO MFP_CFG(GPIO9, AF0) +#define GPIO10_GPIO MFP_CFG(GPIO10, AF0) +#define GPIO11_GPIO MFP_CFG(GPIO11, AF0) +#define GPIO12_GPIO MFP_CFG(GPIO12, AF0) +#define GPIO13_GPIO MFP_CFG(GPIO13, AF0) +#define GPIO14_GPIO MFP_CFG(GPIO14, AF0) +#define GPIO15_GPIO MFP_CFG(GPIO15, AF0) +#define GPIO16_GPIO MFP_CFG(GPIO16, AF0) +#define GPIO17_GPIO MFP_CFG(GPIO17, AF0) +#define GPIO18_GPIO MFP_CFG(GPIO18, AF0) +#define GPIO19_GPIO MFP_CFG(GPIO19, AF0) +#define GPIO20_GPIO MFP_CFG(GPIO20, AF0) +#define GPIO21_GPIO MFP_CFG(GPIO21, AF0) +#define GPIO22_GPIO MFP_CFG(GPIO22, AF0) +#define GPIO23_GPIO MFP_CFG(GPIO23, AF0) +#define GPIO24_GPIO MFP_CFG(GPIO24, AF0) +#define GPIO25_GPIO MFP_CFG(GPIO25, AF0) +#define GPIO26_GPIO MFP_CFG(GPIO26, AF0) +#define GPIO27_GPIO MFP_CFG(GPIO27, AF0) +#define GPIO28_GPIO MFP_CFG(GPIO28, AF0) +#define GPIO29_GPIO MFP_CFG(GPIO29, AF0) +#define GPIO30_GPIO MFP_CFG(GPIO30, AF0) +#define GPIO31_GPIO MFP_CFG(GPIO31, AF0) +#define GPIO32_GPIO MFP_CFG(GPIO32, AF0) +#define GPIO33_GPIO MFP_CFG(GPIO33, AF0) +#define GPIO34_GPIO MFP_CFG(GPIO34, AF0) +#define GPIO35_GPIO MFP_CFG(GPIO35, AF0) +#define GPIO36_GPIO MFP_CFG(GPIO36, AF0) +#define GPIO37_GPIO MFP_CFG(GPIO37, AF0) +#define GPIO38_GPIO MFP_CFG(GPIO38, AF0) +#define GPIO39_GPIO MFP_CFG(GPIO39, AF0) +#define GPIO40_GPIO MFP_CFG(GPIO40, AF0) +#define GPIO41_GPIO MFP_CFG(GPIO41, AF0) +#define GPIO42_GPIO MFP_CFG(GPIO42, AF0) +#define GPIO43_GPIO MFP_CFG(GPIO43, AF0) +#define GPIO44_GPIO MFP_CFG(GPIO44, AF0) +#define GPIO45_GPIO MFP_CFG(GPIO45, AF0) + +#define GPIO47_GPIO MFP_CFG(GPIO47, AF0) +#define GPIO48_GPIO MFP_CFG(GPIO48, AF0) + +#define GPIO53_GPIO MFP_CFG(GPIO53, AF0) +#define GPIO54_GPIO MFP_CFG(GPIO54, AF0) +#define GPIO55_GPIO MFP_CFG(GPIO55, AF0) + +#define GPIO57_GPIO MFP_CFG(GPIO57, AF0) + +#define GPIO63_GPIO MFP_CFG(GPIO63, AF0) +#define GPIO64_GPIO MFP_CFG(GPIO64, AF0) +#define GPIO65_GPIO MFP_CFG(GPIO65, AF0) +#define GPIO66_GPIO MFP_CFG(GPIO66, AF0) +#define GPIO67_GPIO MFP_CFG(GPIO67, AF0) +#define GPIO68_GPIO MFP_CFG(GPIO68, AF0) +#define GPIO69_GPIO MFP_CFG(GPIO69, AF0) +#define GPIO70_GPIO MFP_CFG(GPIO70, AF0) +#define GPIO71_GPIO MFP_CFG(GPIO71, AF0) +#define GPIO72_GPIO MFP_CFG(GPIO72, AF0) +#define GPIO73_GPIO MFP_CFG(GPIO73, AF0) +#define GPIO74_GPIO MFP_CFG(GPIO74, AF0) +#define GPIO75_GPIO MFP_CFG(GPIO75, AF0) +#define GPIO76_GPIO MFP_CFG(GPIO76, AF0) +#define GPIO77_GPIO MFP_CFG(GPIO77, AF0) +#define GPIO78_GPIO MFP_CFG(GPIO78, AF0) +#define GPIO79_GPIO MFP_CFG(GPIO79, AF0) +#define GPIO80_GPIO MFP_CFG(GPIO80, AF0) +#define GPIO81_GPIO MFP_CFG(GPIO81, AF0) +#define GPIO82_GPIO MFP_CFG(GPIO82, AF0) +#define GPIO83_GPIO MFP_CFG(GPIO83, AF0) +#define GPIO84_GPIO MFP_CFG(GPIO84, AF0) +#define GPIO85_GPIO MFP_CFG(GPIO85, AF0) +#define GPIO86_GPIO MFP_CFG(GPIO86, AF0) +#define GPIO87_GPIO MFP_CFG(GPIO87, AF0) +#define GPIO88_GPIO MFP_CFG(GPIO88, AF0) +#define GPIO89_GPIO MFP_CFG(GPIO89, AF0) +#define GPIO90_GPIO MFP_CFG(GPIO90, AF0) +#define GPIO91_GPIO MFP_CFG(GPIO91, AF0) +#define GPIO92_GPIO MFP_CFG(GPIO92, AF0) +#define GPIO93_GPIO MFP_CFG(GPIO93, AF0) +#define GPIO94_GPIO MFP_CFG(GPIO94, AF0) +#define GPIO95_GPIO MFP_CFG(GPIO95, AF0) +#define GPIO96_GPIO MFP_CFG(GPIO96, AF0) +#define GPIO97_GPIO MFP_CFG(GPIO97, AF0) +#define GPIO98_GPIO MFP_CFG(GPIO98, AF0) +#define GPIO99_GPIO MFP_CFG(GPIO99, AF0) +#define GPIO100_GPIO MFP_CFG(GPIO100, AF0) +#define GPIO101_GPIO MFP_CFG(GPIO101, AF0) +#define GPIO102_GPIO MFP_CFG(GPIO102, AF0) +#define GPIO103_GPIO MFP_CFG(GPIO103, AF0) +#define GPIO104_GPIO MFP_CFG(GPIO104, AF0) +#define GPIO105_GPIO MFP_CFG(GPIO105, AF0) +#define GPIO106_GPIO MFP_CFG(GPIO106, AF0) +#define GPIO107_GPIO MFP_CFG(GPIO107, AF0) +#define GPIO108_GPIO MFP_CFG(GPIO108, AF0) +#define GPIO109_GPIO MFP_CFG(GPIO109, AF0) +#define GPIO110_GPIO MFP_CFG(GPIO110, AF0) +#define GPIO111_GPIO MFP_CFG(GPIO111, AF0) +#define GPIO112_GPIO MFP_CFG(GPIO112, AF0) +#define GPIO113_GPIO MFP_CFG(GPIO113, AF0) +#define GPIO114_GPIO MFP_CFG(GPIO114, AF0) +#define GPIO115_GPIO MFP_CFG(GPIO115, AF0) +#define GPIO116_GPIO MFP_CFG(GPIO116, AF0) +#define GPIO117_GPIO MFP_CFG(GPIO117, AF0) +#define GPIO118_GPIO MFP_CFG(GPIO118, AF0) +#define GPIO119_GPIO MFP_CFG(GPIO119, AF0) +#define GPIO120_GPIO MFP_CFG(GPIO120, AF0) +#define GPIO121_GPIO MFP_CFG(GPIO121, AF0) +#define GPIO122_GPIO MFP_CFG(GPIO122, AF0) +#define GPIO123_GPIO MFP_CFG(GPIO123, AF0) +#define GPIO124_GPIO MFP_CFG(GPIO124, AF0) +#define GPIO125_GPIO MFP_CFG(GPIO125, AF0) +#define GPIO126_GPIO MFP_CFG(GPIO126, AF0) +#define GPIO127_GPIO MFP_CFG(GPIO127, AF0) + +#define GPIO0_2_GPIO MFP_CFG(GPIO0_2, AF0) +#define GPIO1_2_GPIO MFP_CFG(GPIO1_2, AF0) +#define GPIO2_2_GPIO MFP_CFG(GPIO2_2, AF0) +#define GPIO3_2_GPIO MFP_CFG(GPIO3_2, AF0) +#define GPIO4_2_GPIO MFP_CFG(GPIO4_2, AF0) +#define GPIO5_2_GPIO MFP_CFG(GPIO5_2, AF0) +#define GPIO6_2_GPIO MFP_CFG(GPIO6_2, AF0) + +/* + * each MFP pin will have a MFPR register, since the offset of the + * register varies between processors, the processor specific code + * should initialize the pin offsets by pxa3xx_mfp_init_addr() + * + * pxa3xx_mfp_init_addr - accepts a table of "pxa3xx_mfp_addr_map" + * structure, which represents a range of MFP pins from "start" to + * "end", with the offset begining at "offset", to define a single + * pin, let "end" = -1 + * + * use + * + * MFP_ADDR_X() to define a range of pins + * MFP_ADDR() to define a single pin + * MFP_ADDR_END to signal the end of pin offset definitions + */ +struct pxa3xx_mfp_addr_map { + unsigned int start; + unsigned int end; + unsigned long offset; +}; + +#define MFP_ADDR_X(start, end, offset) \ + { MFP_PIN_##start, MFP_PIN_##end, offset } + +#define MFP_ADDR(pin, offset) \ + { MFP_PIN_##pin, -1, offset } + +#define MFP_ADDR_END { MFP_PIN_INVALID, 0 } + +/* + * pxa3xx_mfp_read()/pxa3xx_mfp_write() - for direct read/write access + * to the MFPR register + */ +unsigned long pxa3xx_mfp_read(int mfp); +void pxa3xx_mfp_write(int mfp, unsigned long mfpr_val); + +/* + * pxa3xx_mfp_config - configure the MFPR registers + * + * used by board specific initialization code + */ +void pxa3xx_mfp_config(unsigned long *mfp_cfgs, int num); + +/* + * pxa3xx_mfp_init_addr() - initialize the mapping between mfp pin + * index and MFPR register offset + * + * used by processor specific code + */ +void __init pxa3xx_mfp_init_addr(struct pxa3xx_mfp_addr_map *); +void __init pxa3xx_init_mfp(void); +#endif /* __ASM_ARCH_MFP_PXA3XX_H */ diff --git a/include/asm-arm/arch-pxa/mfp.h b/include/asm-arm/arch-pxa/mfp.h index 03c508d94f0e..02f6157396d3 100644 --- a/include/asm-arm/arch-pxa/mfp.h +++ b/include/asm-arm/arch-pxa/mfp.h @@ -16,9 +16,6 @@ #ifndef __ASM_ARCH_MFP_H #define __ASM_ARCH_MFP_H -#define MFPR_BASE (0x40e10000) -#define MFPR_SIZE (PAGE_SIZE) - #define mfp_to_gpio(m) ((m) % 128) /* list of all the configurable MFP pins */ @@ -217,114 +214,21 @@ enum { }; /* - * Table that determines the low power modes outputs, with actual settings - * used in parentheses for don't-care values. Except for the float output, - * the configured driven and pulled levels match, so if there is a need for - * non-LPM pulled output, the same configuration could probably be used. - * - * Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel - * (bit 7) (bit 8) (bit 14d) (bit 13d) - * - * Drive 0 0 0 0 X (1) 0 - * Drive 1 0 1 X (1) 0 0 - * Pull hi (1) 1 X(1) 1 0 0 - * Pull lo (0) 1 X(0) 0 1 0 - * Z (float) 1 X(0) 0 0 0 - */ -#define MFP_LPM_DRIVE_LOW 0x8 -#define MFP_LPM_DRIVE_HIGH 0x6 -#define MFP_LPM_PULL_HIGH 0x7 -#define MFP_LPM_PULL_LOW 0x9 -#define MFP_LPM_FLOAT 0x1 -#define MFP_LPM_PULL_NEITHER 0x0 - -/* - * The pullup and pulldown state of the MFP pin is by default determined by - * selected alternate function. In case some buggy devices need to override - * this default behavior, pxa3xx_mfp_set_pull() can be invoked with one of - * the following definition as the parameter. - * - * Definition pull_sel pullup_en pulldown_en - * MFP_PULL_HIGH 1 1 0 - * MFP_PULL_LOW 1 0 1 - * MFP_PULL_BOTH 1 1 1 - * MFP_PULL_NONE 1 0 0 - * MFP_PULL_DEFAULT 0 X X - * - * NOTE: pxa3xx_mfp_set_pull() will modify the PULLUP_EN and PULLDOWN_EN - * bits, which will cause potential conflicts with the low power mode - * setting, device drivers should take care of this - */ -#define MFP_PULL_BOTH (0x7u) -#define MFP_PULL_HIGH (0x6u) -#define MFP_PULL_LOW (0x5u) -#define MFP_PULL_NONE (0x4u) -#define MFP_PULL_DEFAULT (0x0u) - -#define MFP_AF0 (0) -#define MFP_AF1 (1) -#define MFP_AF2 (2) -#define MFP_AF3 (3) -#define MFP_AF4 (4) -#define MFP_AF5 (5) -#define MFP_AF6 (6) -#define MFP_AF7 (7) - -#define MFP_DS01X (0) -#define MFP_DS02X (1) -#define MFP_DS03X (2) -#define MFP_DS04X (3) -#define MFP_DS06X (4) -#define MFP_DS08X (5) -#define MFP_DS10X (6) -#define MFP_DS12X (7) - -#define MFP_EDGE_BOTH 0x3 -#define MFP_EDGE_RISE 0x2 -#define MFP_EDGE_FALL 0x1 -#define MFP_EDGE_NONE 0x0 - -#define MFPR_AF_MASK 0x0007 -#define MFPR_DRV_MASK 0x1c00 -#define MFPR_RDH_MASK 0x0200 -#define MFPR_LPM_MASK 0xe180 -#define MFPR_PULL_MASK 0xe000 -#define MFPR_EDGE_MASK 0x0070 - -#define MFPR_ALT_OFFSET 0 -#define MFPR_ERE_OFFSET 4 -#define MFPR_EFE_OFFSET 5 -#define MFPR_EC_OFFSET 6 -#define MFPR_SON_OFFSET 7 -#define MFPR_SD_OFFSET 8 -#define MFPR_SS_OFFSET 9 -#define MFPR_DRV_OFFSET 10 -#define MFPR_PD_OFFSET 13 -#define MFPR_PU_OFFSET 14 -#define MFPR_PS_OFFSET 15 - -#define MFPR(af, drv, rdh, lpm, edge) \ - (((af) & 0x7) | (((drv) & 0x7) << 10) |\ - (((rdh) & 0x1) << 9) |\ - (((lpm) & 0x3) << 7) |\ - (((lpm) & 0x4) << 12)|\ - (((lpm) & 0x8) << 10)|\ - ((!(edge)) << 6) |\ - (((edge) & 0x1) << 5) |\ - (((edge) & 0x2) << 3)) - -/* * a possible MFP configuration is represented by a 32-bit integer - * bit 0..15 - MFPR value (16-bit) - * bit 16..31 - mfp pin index (used to obtain the MFPR offset) + * + * bit 0.. 9 - MFP Pin Number (1024 Pins Maximum) + * bit 10..12 - Alternate Function Selection + * bit 13..15 - Drive Strength + * bit 16..18 - Low Power Mode State + * bit 19..20 - Low Power Mode Edge Detection + * bit 21..22 - Run Mode Pull State * * to facilitate the definition, the following macros are provided * - * MFPR_DEFAULT - default MFPR value, with + * MFP_CFG_DEFAULT - default MFP configuration value, with * alternate function = 0, - * drive strength = fast 1mA (MFP_DS01X) + * drive strength = fast 3mA (MFP_DS03X) * low power mode = default - * release dalay hold = false (RDH bit) * edge detection = none * * MFP_CFG - default MFPR value with alternate function @@ -334,251 +238,74 @@ enum { * low power mode * MFP_CFG_X - default MFPR value with alternate function, * pin drive strength and low power mode - * - * use - * - * MFP_CFG_PIN - to get the MFP pin index - * MFP_CFG_VAL - to get the corresponding MFPR value */ -typedef uint32_t mfp_cfg_t; - -#define MFP_CFG_PIN(mfp_cfg) (((mfp_cfg) >> 16) & 0xffff) -#define MFP_CFG_VAL(mfp_cfg) ((mfp_cfg) & 0xffff) - -/* - * MFP register defaults to - * drive strength fast 3mA (010'b) - * edge detection logic disabled - * alternate function 0 - */ -#define MFPR_DEFAULT (0x0840) +typedef unsigned long mfp_cfg_t; + +#define MFP_PIN(x) ((x) & 0x3ff) + +#define MFP_AF0 (0x0 << 10) +#define MFP_AF1 (0x1 << 10) +#define MFP_AF2 (0x2 << 10) +#define MFP_AF3 (0x3 << 10) +#define MFP_AF4 (0x4 << 10) +#define MFP_AF5 (0x5 << 10) +#define MFP_AF6 (0x6 << 10) +#define MFP_AF7 (0x7 << 10) +#define MFP_AF_MASK (0x7 << 10) +#define MFP_AF(x) (((x) >> 10) & 0x7) + +#define MFP_DS01X (0x0 << 13) +#define MFP_DS02X (0x1 << 13) +#define MFP_DS03X (0x2 << 13) +#define MFP_DS04X (0x3 << 13) +#define MFP_DS06X (0x4 << 13) +#define MFP_DS08X (0x5 << 13) +#define MFP_DS10X (0x6 << 13) +#define MFP_DS13X (0x7 << 13) +#define MFP_DS_MASK (0x7 << 13) +#define MFP_DS(x) (((x) >> 13) & 0x7) + +#define MFP_LPM_INPUT (0x0 << 16) +#define MFP_LPM_DRIVE_LOW (0x1 << 16) +#define MFP_LPM_DRIVE_HIGH (0x2 << 16) +#define MFP_LPM_PULL_LOW (0x3 << 16) +#define MFP_LPM_PULL_HIGH (0x4 << 16) +#define MFP_LPM_FLOAT (0x5 << 16) +#define MFP_LPM_STATE_MASK (0x7 << 16) +#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7) + +#define MFP_LPM_EDGE_NONE (0x0 << 19) +#define MFP_LPM_EDGE_RISE (0x1 << 19) +#define MFP_LPM_EDGE_FALL (0x2 << 19) +#define MFP_LPM_EDGE_BOTH (0x3 << 19) +#define MFP_LPM_EDGE_MASK (0x3 << 19) +#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3) + +#define MFP_PULL_NONE (0x0 << 21) +#define MFP_PULL_LOW (0x1 << 21) +#define MFP_PULL_HIGH (0x2 << 21) +#define MFP_PULL_BOTH (0x3 << 21) +#define MFP_PULL_MASK (0x3 << 21) +#define MFP_PULL(x) (((x) >> 21) & 0x3) + +#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_INPUT |\ + MFP_LPM_EDGE_NONE | MFP_PULL_NONE) #define MFP_CFG(pin, af) \ - ((MFP_PIN_##pin << 16) | MFPR_DEFAULT | (MFP_##af)) + ((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af)) #define MFP_CFG_DRV(pin, af, drv) \ - ((MFP_PIN_##pin << 16) | (MFPR_DEFAULT & ~MFPR_DRV_MASK) |\ - ((MFP_##drv) << 10) | (MFP_##af)) + ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv)) #define MFP_CFG_LPM(pin, af, lpm) \ - ((MFP_PIN_##pin << 16) | (MFPR_DEFAULT & ~MFPR_LPM_MASK) |\ - (((MFP_LPM_##lpm) & 0x3) << 7) |\ - (((MFP_LPM_##lpm) & 0x4) << 12) |\ - (((MFP_LPM_##lpm) & 0x8) << 10) |\ - (MFP_##af)) + ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm)) #define MFP_CFG_X(pin, af, drv, lpm) \ - ((MFP_PIN_##pin << 16) |\ - (MFPR_DEFAULT & ~(MFPR_DRV_MASK | MFPR_LPM_MASK)) |\ - ((MFP_##drv) << 10) | (MFP_##af) |\ - (((MFP_LPM_##lpm) & 0x3) << 7) |\ - (((MFP_LPM_##lpm) & 0x4) << 12) |\ - (((MFP_LPM_##lpm) & 0x8) << 10)) - -/* common MFP configurations - processor specific ones defined - * in mfp-pxa3xx.h - */ -#define GPIO0_GPIO MFP_CFG(GPIO0, AF0) -#define GPIO1_GPIO MFP_CFG(GPIO1, AF0) -#define GPIO2_GPIO MFP_CFG(GPIO2, AF0) -#define GPIO3_GPIO MFP_CFG(GPIO3, AF0) -#define GPIO4_GPIO MFP_CFG(GPIO4, AF0) -#define GPIO5_GPIO MFP_CFG(GPIO5, AF0) -#define GPIO6_GPIO MFP_CFG(GPIO6, AF0) -#define GPIO7_GPIO MFP_CFG(GPIO7, AF0) -#define GPIO8_GPIO MFP_CFG(GPIO8, AF0) -#define GPIO9_GPIO MFP_CFG(GPIO9, AF0) -#define GPIO10_GPIO MFP_CFG(GPIO10, AF0) -#define GPIO11_GPIO MFP_CFG(GPIO11, AF0) -#define GPIO12_GPIO MFP_CFG(GPIO12, AF0) -#define GPIO13_GPIO MFP_CFG(GPIO13, AF0) -#define GPIO14_GPIO MFP_CFG(GPIO14, AF0) -#define GPIO15_GPIO MFP_CFG(GPIO15, AF0) -#define GPIO16_GPIO MFP_CFG(GPIO16, AF0) -#define GPIO17_GPIO MFP_CFG(GPIO17, AF0) -#define GPIO18_GPIO MFP_CFG(GPIO18, AF0) -#define GPIO19_GPIO MFP_CFG(GPIO19, AF0) -#define GPIO20_GPIO MFP_CFG(GPIO20, AF0) -#define GPIO21_GPIO MFP_CFG(GPIO21, AF0) -#define GPIO22_GPIO MFP_CFG(GPIO22, AF0) -#define GPIO23_GPIO MFP_CFG(GPIO23, AF0) -#define GPIO24_GPIO MFP_CFG(GPIO24, AF0) -#define GPIO25_GPIO MFP_CFG(GPIO25, AF0) -#define GPIO26_GPIO MFP_CFG(GPIO26, AF0) -#define GPIO27_GPIO MFP_CFG(GPIO27, AF0) -#define GPIO28_GPIO MFP_CFG(GPIO28, AF0) -#define GPIO29_GPIO MFP_CFG(GPIO29, AF0) -#define GPIO30_GPIO MFP_CFG(GPIO30, AF0) -#define GPIO31_GPIO MFP_CFG(GPIO31, AF0) -#define GPIO32_GPIO MFP_CFG(GPIO32, AF0) -#define GPIO33_GPIO MFP_CFG(GPIO33, AF0) -#define GPIO34_GPIO MFP_CFG(GPIO34, AF0) -#define GPIO35_GPIO MFP_CFG(GPIO35, AF0) -#define GPIO36_GPIO MFP_CFG(GPIO36, AF0) -#define GPIO37_GPIO MFP_CFG(GPIO37, AF0) -#define GPIO38_GPIO MFP_CFG(GPIO38, AF0) -#define GPIO39_GPIO MFP_CFG(GPIO39, AF0) -#define GPIO40_GPIO MFP_CFG(GPIO40, AF0) -#define GPIO41_GPIO MFP_CFG(GPIO41, AF0) -#define GPIO42_GPIO MFP_CFG(GPIO42, AF0) -#define GPIO43_GPIO MFP_CFG(GPIO43, AF0) -#define GPIO44_GPIO MFP_CFG(GPIO44, AF0) -#define GPIO45_GPIO MFP_CFG(GPIO45, AF0) - -#define GPIO47_GPIO MFP_CFG(GPIO47, AF0) -#define GPIO48_GPIO MFP_CFG(GPIO48, AF0) - -#define GPIO53_GPIO MFP_CFG(GPIO53, AF0) -#define GPIO54_GPIO MFP_CFG(GPIO54, AF0) -#define GPIO55_GPIO MFP_CFG(GPIO55, AF0) - -#define GPIO57_GPIO MFP_CFG(GPIO57, AF0) - -#define GPIO63_GPIO MFP_CFG(GPIO63, AF0) -#define GPIO64_GPIO MFP_CFG(GPIO64, AF0) -#define GPIO65_GPIO MFP_CFG(GPIO65, AF0) -#define GPIO66_GPIO MFP_CFG(GPIO66, AF0) -#define GPIO67_GPIO MFP_CFG(GPIO67, AF0) -#define GPIO68_GPIO MFP_CFG(GPIO68, AF0) -#define GPIO69_GPIO MFP_CFG(GPIO69, AF0) -#define GPIO70_GPIO MFP_CFG(GPIO70, AF0) -#define GPIO71_GPIO MFP_CFG(GPIO71, AF0) -#define GPIO72_GPIO MFP_CFG(GPIO72, AF0) -#define GPIO73_GPIO MFP_CFG(GPIO73, AF0) -#define GPIO74_GPIO MFP_CFG(GPIO74, AF0) -#define GPIO75_GPIO MFP_CFG(GPIO75, AF0) -#define GPIO76_GPIO MFP_CFG(GPIO76, AF0) -#define GPIO77_GPIO MFP_CFG(GPIO77, AF0) -#define GPIO78_GPIO MFP_CFG(GPIO78, AF0) -#define GPIO79_GPIO MFP_CFG(GPIO79, AF0) -#define GPIO80_GPIO MFP_CFG(GPIO80, AF0) -#define GPIO81_GPIO MFP_CFG(GPIO81, AF0) -#define GPIO82_GPIO MFP_CFG(GPIO82, AF0) -#define GPIO83_GPIO MFP_CFG(GPIO83, AF0) -#define GPIO84_GPIO MFP_CFG(GPIO84, AF0) -#define GPIO85_GPIO MFP_CFG(GPIO85, AF0) -#define GPIO86_GPIO MFP_CFG(GPIO86, AF0) -#define GPIO87_GPIO MFP_CFG(GPIO87, AF0) -#define GPIO88_GPIO MFP_CFG(GPIO88, AF0) -#define GPIO89_GPIO MFP_CFG(GPIO89, AF0) -#define GPIO90_GPIO MFP_CFG(GPIO90, AF0) -#define GPIO91_GPIO MFP_CFG(GPIO91, AF0) -#define GPIO92_GPIO MFP_CFG(GPIO92, AF0) -#define GPIO93_GPIO MFP_CFG(GPIO93, AF0) -#define GPIO94_GPIO MFP_CFG(GPIO94, AF0) -#define GPIO95_GPIO MFP_CFG(GPIO95, AF0) -#define GPIO96_GPIO MFP_CFG(GPIO96, AF0) -#define GPIO97_GPIO MFP_CFG(GPIO97, AF0) -#define GPIO98_GPIO MFP_CFG(GPIO98, AF0) -#define GPIO99_GPIO MFP_CFG(GPIO99, AF0) -#define GPIO100_GPIO MFP_CFG(GPIO100, AF0) -#define GPIO101_GPIO MFP_CFG(GPIO101, AF0) -#define GPIO102_GPIO MFP_CFG(GPIO102, AF0) -#define GPIO103_GPIO MFP_CFG(GPIO103, AF0) -#define GPIO104_GPIO MFP_CFG(GPIO104, AF0) -#define GPIO105_GPIO MFP_CFG(GPIO105, AF0) -#define GPIO106_GPIO MFP_CFG(GPIO106, AF0) -#define GPIO107_GPIO MFP_CFG(GPIO107, AF0) -#define GPIO108_GPIO MFP_CFG(GPIO108, AF0) -#define GPIO109_GPIO MFP_CFG(GPIO109, AF0) -#define GPIO110_GPIO MFP_CFG(GPIO110, AF0) -#define GPIO111_GPIO MFP_CFG(GPIO111, AF0) -#define GPIO112_GPIO MFP_CFG(GPIO112, AF0) -#define GPIO113_GPIO MFP_CFG(GPIO113, AF0) -#define GPIO114_GPIO MFP_CFG(GPIO114, AF0) -#define GPIO115_GPIO MFP_CFG(GPIO115, AF0) -#define GPIO116_GPIO MFP_CFG(GPIO116, AF0) -#define GPIO117_GPIO MFP_CFG(GPIO117, AF0) -#define GPIO118_GPIO MFP_CFG(GPIO118, AF0) -#define GPIO119_GPIO MFP_CFG(GPIO119, AF0) -#define GPIO120_GPIO MFP_CFG(GPIO120, AF0) -#define GPIO121_GPIO MFP_CFG(GPIO121, AF0) -#define GPIO122_GPIO MFP_CFG(GPIO122, AF0) -#define GPIO123_GPIO MFP_CFG(GPIO123, AF0) -#define GPIO124_GPIO MFP_CFG(GPIO124, AF0) -#define GPIO125_GPIO MFP_CFG(GPIO125, AF0) -#define GPIO126_GPIO MFP_CFG(GPIO126, AF0) -#define GPIO127_GPIO MFP_CFG(GPIO127, AF0) - -#define GPIO0_2_GPIO MFP_CFG(GPIO0_2, AF0) -#define GPIO1_2_GPIO MFP_CFG(GPIO1_2, AF0) -#define GPIO2_2_GPIO MFP_CFG(GPIO2_2, AF0) -#define GPIO3_2_GPIO MFP_CFG(GPIO3_2, AF0) -#define GPIO4_2_GPIO MFP_CFG(GPIO4_2, AF0) -#define GPIO5_2_GPIO MFP_CFG(GPIO5_2, AF0) -#define GPIO6_2_GPIO MFP_CFG(GPIO6_2, AF0) - -/* - * each MFP pin will have a MFPR register, since the offset of the - * register varies between processors, the processor specific code - * should initialize the pin offsets by pxa3xx_mfp_init_addr() - * - * pxa3xx_mfp_init_addr - accepts a table of "pxa3xx_mfp_addr_map" - * structure, which represents a range of MFP pins from "start" to - * "end", with the offset begining at "offset", to define a single - * pin, let "end" = -1 - * - * use - * - * MFP_ADDR_X() to define a range of pins - * MFP_ADDR() to define a single pin - * MFP_ADDR_END to signal the end of pin offset definitions - */ -struct pxa3xx_mfp_addr_map { - unsigned int start; - unsigned int end; - unsigned long offset; -}; - -#define MFP_ADDR_X(start, end, offset) \ - { MFP_PIN_##start, MFP_PIN_##end, offset } - -#define MFP_ADDR(pin, offset) \ - { MFP_PIN_##pin, -1, offset } - -#define MFP_ADDR_END { MFP_PIN_INVALID, 0 } - -struct pxa3xx_mfp_pin { - unsigned long mfpr_off; /* MFPRxx register offset */ - unsigned long mfpr_val; /* MFPRxx register value */ -}; - -/* - * pxa3xx_mfp_read()/pxa3xx_mfp_write() - for direct read/write access - * to the MFPR register - */ -unsigned long pxa3xx_mfp_read(int mfp); -void pxa3xx_mfp_write(int mfp, unsigned long mfpr_val); - -/* - * pxa3xx_mfp_set_afds - set MFP alternate function and drive strength - * pxa3xx_mfp_set_rdh - set MFP release delay hold on/off - * pxa3xx_mfp_set_lpm - set MFP low power mode state - * pxa3xx_mfp_set_edge - set MFP edge detection in low power mode - * - * use these functions to override/change the default configuration - * done by pxa3xx_mfp_set_config(s) - */ -void pxa3xx_mfp_set_afds(int mfp, int af, int ds); -void pxa3xx_mfp_set_rdh(int mfp, int rdh); -void pxa3xx_mfp_set_lpm(int mfp, int lpm); -void pxa3xx_mfp_set_edge(int mfp, int edge); - -/* - * pxa3xx_mfp_config - configure the MFPR registers - * - * used by board specific initialization code - */ -void pxa3xx_mfp_config(mfp_cfg_t *mfp_cfgs, int num); - -/* - * pxa3xx_mfp_init_addr() - initialize the mapping between mfp pin - * index and MFPR register offset - * - * used by processor specific code - */ -void __init pxa3xx_mfp_init_addr(struct pxa3xx_mfp_addr_map *); -void __init pxa3xx_init_mfp(void); + ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\ + (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm)) #endif /* __ASM_ARCH_MFP_H */ diff --git a/include/asm-arm/arch-pxa/mmc.h b/include/asm-arm/arch-pxa/mmc.h index ef4f570381d1..6d1304c9270f 100644 --- a/include/asm-arm/arch-pxa/mmc.h +++ b/include/asm-arm/arch-pxa/mmc.h @@ -17,5 +17,7 @@ struct pxamci_platform_data { }; extern void pxa_set_mci_info(struct pxamci_platform_data *info); +extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info); +extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info); #endif diff --git a/include/asm-arm/arch-pxa/pcm027.h b/include/asm-arm/arch-pxa/pcm027.h new file mode 100644 index 000000000000..7beae1472c3e --- /dev/null +++ b/include/asm-arm/arch-pxa/pcm027.h @@ -0,0 +1,75 @@ +/* + * linux/include/asm-arm/arch-pxa/pcm027.h + * + * (c) 2003 Phytec Messtechnik GmbH <armlinux@phytec.de> + * (c) 2007 Juergen Beisert <j.beisert@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * Definitions of CPU card resources only + */ + +/* I2C RTC */ +#define PCM027_RTC_IRQ_GPIO 0 +#define PCM027_RTC_IRQ IRQ_GPIO(PCM027_RTC_IRQ_GPIO) +#define PCM027_RTC_IRQ_EDGE IRQ_TYPE_EDGE_FALLING +#define ADR_PCM027_RTC 0x51 /* I2C address */ + +/* I2C EEPROM */ +#define ADR_PCM027_EEPROM 0x54 /* I2C address */ + +/* Ethernet chip (SMSC91C111) */ +#define PCM027_ETH_IRQ_GPIO 52 +#define PCM027_ETH_IRQ IRQ_GPIO(PCM027_ETH_IRQ_GPIO) +#define PCM027_ETH_IRQ_EDGE IRQ_TYPE_EDGE_RISING +#define PCM027_ETH_PHYS PXA_CS5_PHYS +#define PCM027_ETH_SIZE (1*1024*1024) + +/* CAN controller SJA1000 (unsupported yet) */ +#define PCM027_CAN_IRQ_GPIO 114 +#define PCM027_CAN_IRQ IRQ_GPIO(PCM027_CAN_IRQ_GPIO) +#define PCM027_CAN_IRQ_EDGE IRQ_TYPE_EDGE_FALLING +#define PCM027_CAN_PHYS 0x22000000 +#define PCM027_CAN_SIZE 0x100 + +/* SPI GPIO expander (unsupported yet) */ +#define PCM027_EGPIO_IRQ_GPIO 27 +#define PCM027_EGPIO_IRQ IRQ_GPIO(PCM027_EGPIO_IRQ_GPIO) +#define PCM027_EGPIO_IRQ_EDGE IRQ_TYPE_EDGE_FALLING +#define PCM027_EGPIO_CS 24 +/* + * TODO: Switch this pin from dedicated usage to GPIO if + * more than the MAX7301 device is connected to this SPI bus + */ +#define PCM027_EGPIO_CS_MODE GPIO24_SFRM_MD + +/* Flash memory */ +#define PCM027_FLASH_PHYS 0x00000000 +#define PCM027_FLASH_SIZE 0x02000000 + +/* onboard LEDs connected to GPIO */ +#define PCM027_LED_CPU 90 +#define PCM027_LED_HEARD_BEAT 91 + +/* + * This CPU module needs a baseboard to work. After basic initializing + * its own devices, it calls baseboard's init function. + * TODO: Add your own basebaord init function and call it from + * inside pcm027_init(). This example here is for the developmen board. + * Refer pcm990-baseboard.c + */ +extern void pcm990_baseboard_init(void); diff --git a/include/asm-arm/arch-pxa/pcm990_baseboard.h b/include/asm-arm/arch-pxa/pcm990_baseboard.h new file mode 100644 index 000000000000..b699d0d7bdb2 --- /dev/null +++ b/include/asm-arm/arch-pxa/pcm990_baseboard.h @@ -0,0 +1,275 @@ +/* + * include/asm-arm/arch-pxa/pcm990_baseboard.h + * + * (c) 2003 Phytec Messtechnik GmbH <armlinux@phytec.de> + * (c) 2007 Juergen Beisert <j.beisert@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <asm/arch/pcm027.h> + +/* + * definitions relevant only when the PCM-990 + * development base board is in use + */ + +/* CPLD's interrupt controller is connected to PCM-027 GPIO 9 */ +#define PCM990_CTRL_INT_IRQ_GPIO 9 +#define PCM990_CTRL_INT_IRQ IRQ_GPIO(PCM990_CTRL_INT_IRQ_GPIO) +#define PCM990_CTRL_INT_IRQ_EDGE IRQT_RISING +#define PCM990_CTRL_PHYS PXA_CS1_PHYS /* 16-Bit */ +#define PCM990_CTRL_BASE 0xea000000 +#define PCM990_CTRL_SIZE (1*1024*1024) + +#define PCM990_CTRL_PWR_IRQ_GPIO 14 +#define PCM990_CTRL_PWR_IRQ IRQ_GPIO(PCM990_CTRL_PWR_IRQ_GPIO) +#define PCM990_CTRL_PWR_IRQ_EDGE IRQT_RISING + +/* visible CPLD (U7) registers */ +#define PCM990_CTRL_REG0 0x0000 /* RESET REGISTER */ +#define PCM990_CTRL_SYSRES 0x0001 /* System RESET REGISTER */ +#define PCM990_CTRL_RESOUT 0x0002 /* RESETOUT Enable REGISTER */ +#define PCM990_CTRL_RESGPIO 0x0004 /* RESETGPIO Enable REGISTER */ + +#define PCM990_CTRL_REG1 0x0002 /* Power REGISTER */ +#define PCM990_CTRL_5VOFF 0x0001 /* Disable 5V Regulators */ +#define PCM990_CTRL_CANPWR 0x0004 /* Enable CANPWR ADUM */ +#define PCM990_CTRL_PM_5V 0x0008 /* Read 5V OK */ + +#define PCM990_CTRL_REG2 0x0004 /* LED REGISTER */ +#define PCM990_CTRL_LEDPWR 0x0001 /* POWER LED enable */ +#define PCM990_CTRL_LEDBAS 0x0002 /* BASIS LED enable */ +#define PCM990_CTRL_LEDUSR 0x0004 /* USER LED enable */ + +#define PCM990_CTRL_REG3 0x0006 /* LCD CTRL REGISTER 3 */ +#define PCM990_CTRL_LCDPWR 0x0001 /* RW LCD Power on */ +#define PCM990_CTRL_LCDON 0x0002 /* RW LCD Latch on */ +#define PCM990_CTRL_LCDPOS1 0x0004 /* RW POS 1 */ +#define PCM990_CTRL_LCDPOS2 0x0008 /* RW POS 2 */ + +#define PCM990_CTRL_REG4 0x0008 /* MMC1 CTRL REGISTER 4 */ +#define PCM990_CTRL_MMC1PWR 0x0001 /* RW MMC1 Power on */ + +#define PCM990_CTRL_REG5 0x000A /* MMC2 CTRL REGISTER 5 */ +#define PCM990_CTRL_MMC2PWR 0x0001 /* RW MMC2 Power on */ +#define PCM990_CTRL_MMC2LED 0x0002 /* RW MMC2 LED */ +#define PCM990_CTRL_MMC2DE 0x0004 /* R MMC2 Card detect */ +#define PCM990_CTRL_MMC2WP 0x0008 /* R MMC2 Card write protect */ + +#define PCM990_CTRL_REG6 0x000C /* Interrupt Clear REGISTER */ +#define PCM990_CTRL_INTC0 0x0001 /* Clear Reg BT Detect */ +#define PCM990_CTRL_INTC1 0x0002 /* Clear Reg FR RI */ +#define PCM990_CTRL_INTC2 0x0004 /* Clear Reg MMC1 Detect */ +#define PCM990_CTRL_INTC3 0x0008 /* Clear Reg PM_5V off */ + +#define PCM990_CTRL_REG7 0x000E /* Interrupt Enable REGISTER */ +#define PCM990_CTRL_ENAINT0 0x0001 /* Enable Int BT Detect */ +#define PCM990_CTRL_ENAINT1 0x0002 /* Enable Int FR RI */ +#define PCM990_CTRL_ENAINT2 0x0004 /* Enable Int MMC1 Detect */ +#define PCM990_CTRL_ENAINT3 0x0008 /* Enable Int PM_5V off */ + +#define PCM990_CTRL_REG8 0x0014 /* Uart REGISTER */ +#define PCM990_CTRL_FFSD 0x0001 /* BT Uart Enable */ +#define PCM990_CTRL_BTSD 0x0002 /* FF Uart Enable */ +#define PCM990_CTRL_FFRI 0x0004 /* FF Uart RI detect */ +#define PCM990_CTRL_BTRX 0x0008 /* BT Uart Rx detect */ + +#define PCM990_CTRL_REG9 0x0010 /* AC97 Flash REGISTER */ +#define PCM990_CTRL_FLWP 0x0001 /* pC Flash Write Protect */ +#define PCM990_CTRL_FLDIS 0x0002 /* pC Flash Disable */ +#define PCM990_CTRL_AC97ENA 0x0004 /* Enable AC97 Expansion */ + +#define PCM990_CTRL_REG10 0x0012 /* GPS-REGISTER */ +#define PCM990_CTRL_GPSPWR 0x0004 /* GPS-Modul Power on */ +#define PCM990_CTRL_GPSENA 0x0008 /* GPS-Modul Enable */ + +#define PCM990_CTRL_REG11 0x0014 /* Accu REGISTER */ +#define PCM990_CTRL_ACENA 0x0001 /* Charge Enable */ +#define PCM990_CTRL_ACSEL 0x0002 /* Charge Akku -> DC Enable */ +#define PCM990_CTRL_ACPRES 0x0004 /* DC Present */ +#define PCM990_CTRL_ACALARM 0x0008 /* Error Akku */ + +#define PCM990_CTRL_P2V(x) ((x) - PCM990_CTRL_PHYS + PCM990_CTRL_BASE) +#define PCM990_CTRL_V2P(x) ((x) - PCM990_CTRL_BASE + PCM990_CTRL_PHYS) + +#ifndef __ASSEMBLY__ +# define __PCM990_CTRL_REG(x) \ + (*((volatile unsigned char *)PCM990_CTRL_P2V(x))) +#else +# define __PCM990_CTRL_REG(x) PCM990_CTRL_P2V(x) +#endif + +#define PCM990_INTMSKENA __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG7) +#define PCM990_INTSETCLR __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG6) +#define PCM990_CTRL0 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG0) +#define PCM990_CTRL1 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG1) +#define PCM990_CTRL2 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG2) +#define PCM990_CTRL3 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) +#define PCM990_CTRL4 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG4) +#define PCM990_CTRL5 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) +#define PCM990_CTRL6 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG6) +#define PCM990_CTRL7 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG7) +#define PCM990_CTRL8 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG8) +#define PCM990_CTRL9 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG9) +#define PCM990_CTRL10 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG10) +#define PCM990_CTRL11 __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG11) + + +/* + * IDE + */ +#define PCM990_IDE_IRQ_GPIO 13 +#define PCM990_IDE_IRQ IRQ_GPIO(PCM990_IDE_IRQ_GPIO) +#define PCM990_IDE_IRQ_EDGE IRQT_RISING +#define PCM990_IDE_PLD_PHYS 0x20000000 /* 16 bit wide */ +#define PCM990_IDE_PLD_BASE 0xee000000 +#define PCM990_IDE_PLD_SIZE (1*1024*1024) + +/* visible CPLD (U6) registers */ +#define PCM990_IDE_PLD_REG0 0x1000 /* OFFSET IDE REGISTER 0 */ +#define PCM990_IDE_PM5V 0x0004 /* R System VCC_5V */ +#define PCM990_IDE_STBY 0x0008 /* R System StandBy */ + +#define PCM990_IDE_PLD_REG1 0x1002 /* OFFSET IDE REGISTER 1 */ +#define PCM990_IDE_IDEMODE 0x0001 /* R TrueIDE Mode */ +#define PCM990_IDE_DMAENA 0x0004 /* RW DMA Enable */ +#define PCM990_IDE_DMA1_0 0x0008 /* RW 1=DREQ1 0=DREQ0 */ + +#define PCM990_IDE_PLD_REG2 0x1004 /* OFFSET IDE REGISTER 2 */ +#define PCM990_IDE_RESENA 0x0001 /* RW IDE Reset Bit enable */ +#define PCM990_IDE_RES 0x0002 /* RW IDE Reset Bit */ +#define PCM990_IDE_RDY 0x0008 /* RDY */ + +#define PCM990_IDE_PLD_REG3 0x1006 /* OFFSET IDE REGISTER 3 */ +#define PCM990_IDE_IDEOE 0x0001 /* RW Latch on Databus */ +#define PCM990_IDE_IDEON 0x0002 /* RW Latch on Control Address */ +#define PCM990_IDE_IDEIN 0x0004 /* RW Latch on Interrupt usw. */ + +#define PCM990_IDE_PLD_REG4 0x1008 /* OFFSET IDE REGISTER 4 */ +#define PCM990_IDE_PWRENA 0x0001 /* RW IDE Power enable */ +#define PCM990_IDE_5V 0x0002 /* R IDE Power 5V */ +#define PCM990_IDE_PWG 0x0008 /* R IDE Power is on */ + +#define PCM990_IDE_PLD_P2V(x) ((x) - PCM990_IDE_PLD_PHYS + PCM990_IDE_PLD_BASE) +#define PCM990_IDE_PLD_V2P(x) ((x) - PCM990_IDE_PLD_BASE + PCM990_IDE_PLD_PHYS) + +#ifndef __ASSEMBLY__ +# define __PCM990_IDE_PLD_REG(x) \ + (*((volatile unsigned char *)PCM990_IDE_PLD_P2V(x))) +#else +# define __PCM990_IDE_PLD_REG(x) PCM990_IDE_PLD_P2V(x) +#endif + +#define PCM990_IDE0 \ + __PCM990_IDE_PLD_REG(PCM990_IDE_PLD_PHYS + PCM990_IDE_PLD_REG0) +#define PCM990_IDE1 \ + __PCM990_IDE_PLD_REG(PCM990_IDE_PLD_PHYS + PCM990_IDE_PLD_REG1) +#define PCM990_IDE2 \ + __PCM990_IDE_PLD_REG(PCM990_IDE_PLD_PHYS + PCM990_IDE_PLD_REG2) +#define PCM990_IDE3 \ + __PCM990_IDE_PLD_REG(PCM990_IDE_PLD_PHYS + PCM990_IDE_PLD_REG3) +#define PCM990_IDE4 \ + __PCM990_IDE_PLD_REG(PCM990_IDE_PLD_PHYS + PCM990_IDE_PLD_REG4) + +/* + * Compact Flash + */ +#define PCM990_CF_IRQ_GPIO 11 +#define PCM990_CF_IRQ IRQ_GPIO(PCM990_CF_IRQ_GPIO) +#define PCM990_CF_IRQ_EDGE IRQT_RISING + +#define PCM990_CF_CD_GPIO 12 +#define PCM990_CF_CD IRQ_GPIO(PCM990_CF_CD_GPIO) +#define PCM990_CF_CD_EDGE IRQT_RISING + +#define PCM990_CF_PLD_PHYS 0x30000000 /* 16 bit wide */ +#define PCM990_CF_PLD_BASE 0xef000000 +#define PCM990_CF_PLD_SIZE (1*1024*1024) +#define PCM990_CF_PLD_P2V(x) ((x) - PCM990_CF_PLD_PHYS + PCM990_CF_PLD_BASE) +#define PCM990_CF_PLD_V2P(x) ((x) - PCM990_CF_PLD_BASE + PCM990_CF_PLD_PHYS) + +/* visible CPLD (U6) registers */ +#define PCM990_CF_PLD_REG0 0x1000 /* OFFSET CF REGISTER 0 */ +#define PCM990_CF_REG0_LED 0x0001 /* RW LED on */ +#define PCM990_CF_REG0_BLK 0x0002 /* RW LED flash when access */ +#define PCM990_CF_REG0_PM5V 0x0004 /* R System VCC_5V enable */ +#define PCM990_CF_REG0_STBY 0x0008 /* R System StandBy */ + +#define PCM990_CF_PLD_REG1 0x1002 /* OFFSET CF REGISTER 1 */ +#define PCM990_CF_REG1_IDEMODE 0x0001 /* RW CF card run as TrueIDE */ +#define PCM990_CF_REG1_CF0 0x0002 /* RW CF card at ADDR 0x28000000 */ + +#define PCM990_CF_PLD_REG2 0x1004 /* OFFSET CF REGISTER 2 */ +#define PCM990_CF_REG2_RES 0x0002 /* RW CF RESET BIT */ +#define PCM990_CF_REG2_RDYENA 0x0004 /* RW Enable CF_RDY */ +#define PCM990_CF_REG2_RDY 0x0008 /* R CF_RDY auf PWAIT */ + +#define PCM990_CF_PLD_REG3 0x1006 /* OFFSET CF REGISTER 3 */ +#define PCM990_CF_REG3_CFOE 0x0001 /* RW Latch on Databus */ +#define PCM990_CF_REG3_CFON 0x0002 /* RW Latch on Control Address */ +#define PCM990_CF_REG3_CFIN 0x0004 /* RW Latch on Interrupt usw. */ +#define PCM990_CF_REG3_CFCD 0x0008 /* RW Latch on CD1/2 VS1/2 usw */ + +#define PCM990_CF_PLD_REG4 0x1008 /* OFFSET CF REGISTER 4 */ +#define PCM990_CF_REG4_PWRENA 0x0001 /* RW CF Power on (CD1/2 = "00") */ +#define PCM990_CF_REG4_5_3V 0x0002 /* RW 1 = 5V CF_VCC 0 = 3 V CF_VCC */ +#define PCM990_CF_REG4_3B 0x0004 /* RW 3.0V Backup from VCC (5_3V=0) */ +#define PCM990_CF_REG4_PWG 0x0008 /* R CF-Power is on */ + +#define PCM990_CF_PLD_REG5 0x100A /* OFFSET CF REGISTER 5 */ +#define PCM990_CF_REG5_BVD1 0x0001 /* R CF /BVD1 */ +#define PCM990_CF_REG5_BVD2 0x0002 /* R CF /BVD2 */ +#define PCM990_CF_REG5_VS1 0x0004 /* R CF /VS1 */ +#define PCM990_CF_REG5_VS2 0x0008 /* R CF /VS2 */ + +#define PCM990_CF_PLD_REG6 0x100C /* OFFSET CF REGISTER 6 */ +#define PCM990_CF_REG6_CD1 0x0001 /* R CF Card_Detect1 */ +#define PCM990_CF_REG6_CD2 0x0002 /* R CF Card_Detect2 */ + +#ifndef __ASSEMBLY__ +# define __PCM990_CF_PLD_REG(x) \ + (*((volatile unsigned char *)PCM990_CF_PLD_P2V(x))) +#else +# define __PCM990_CF_PLD_REG(x) PCM990_CF_PLD_P2V(x) +#endif + +#define PCM990_CF0 __PCM990_CF_PLD_REG(PCM990_CF_PLD_PHYS + PCM990_CF_PLD_REG0) +#define PCM990_CF1 __PCM990_CF_PLD_REG(PCM990_CF_PLD_PHYS + PCM990_CF_PLD_REG1) +#define PCM990_CF2 __PCM990_CF_PLD_REG(PCM990_CF_PLD_PHYS + PCM990_CF_PLD_REG2) +#define PCM990_CF3 __PCM990_CF_PLD_REG(PCM990_CF_PLD_PHYS + PCM990_CF_PLD_REG3) +#define PCM990_CF4 __PCM990_CF_PLD_REG(PCM990_CF_PLD_PHYS + PCM990_CF_PLD_REG4) +#define PCM990_CF5 __PCM990_CF_PLD_REG(PCM990_CF_PLD_PHYS + PCM990_CF_PLD_REG5) +#define PCM990_CF6 __PCM990_CF_PLD_REG(PCM990_CF_PLD_PHYS + PCM990_CF_PLD_REG6) + +/* + * Wolfson AC97 Touch + */ +#define PCM990_AC97_IRQ_GPIO 10 +#define PCM990_AC97_IRQ IRQ_GPIO(PCM990_AC97_IRQ_GPIO) +#define PCM990_AC97_IRQ_EDGE IRQT_RISING + +/* + * MMC phyCORE + */ +#define PCM990_MMC0_IRQ_GPIO 9 +#define PCM990_MMC0_IRQ IRQ_GPIO(PCM990_MMC0_IRQ_GPIO) +#define PCM990_MMC0_IRQ_EDGE IRQT_FALLING + +/* + * USB phyCore + */ +#define PCM990_USB_OVERCURRENT (88 | GPIO_ALT_FN_1_IN) +#define PCM990_USB_PWR_EN (89 | GPIO_ALT_FN_2_OUT) diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h index 1bd398da07da..442494d71f12 100644 --- a/include/asm-arm/arch-pxa/pxa-regs.h +++ b/include/asm-arm/arch-pxa/pxa-regs.h @@ -1597,176 +1597,10 @@ #define PWER_GPIO15 PWER_GPIO (15) /* GPIO [15] wake-up enable */ #define PWER_RTC 0x80000000 /* RTC alarm wake-up enable */ - /* - * SSP Serial Port Registers - * PXA250, PXA255, PXA26x and PXA27x SSP controllers are all slightly different. - * PXA255, PXA26x and PXA27x have extra ports, registers and bits. + * SSP Serial Port Registers - see include/asm-arm/arch-pxa/regs-ssp.h */ - /* Common PXA2xx bits first */ -#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ -#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ -#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */ -#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */ -#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */ -#define SSCR0_National (0x2 << 4) /* National Microwire */ -#define SSCR0_ECS (1 << 6) /* External clock select */ -#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ -#if defined(CONFIG_PXA25x) -#define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */ -#define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */ -#elif defined(CONFIG_PXA27x) -#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */ -#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */ -#define SSCR0_EDSS (1 << 20) /* Extended data size select */ -#define SSCR0_NCS (1 << 21) /* Network clock select */ -#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */ -#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */ -#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */ -#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ -#define SSCR0_ADC (1 << 30) /* Audio clock select */ -#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ -#endif - -#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ -#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ -#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ -#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */ -#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */ -#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */ -#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ -#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ -#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ -#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ - -#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */ -#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */ -#define SSSR_BSY (1 << 4) /* SSP Busy */ -#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */ -#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */ -#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */ - -#define SSCR0_TIM (1 << 23) /* Transmit FIFO Under Run Interrupt Mask */ -#define SSCR0_RIM (1 << 22) /* Receive FIFO Over Run interrupt Mask */ -#define SSCR0_NCS (1 << 21) /* Network Clock Select */ -#define SSCR0_EDSS (1 << 20) /* Extended Data Size Select */ - -/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ -#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ -#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ -#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */ -#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */ -#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */ -#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */ -#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */ -#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */ -#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */ -#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */ -#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */ -#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */ -#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */ -#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */ -#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */ -#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interupt Enable */ -#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */ -#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */ - -#define SSSR_BCE (1 << 23) /* Bit Count Error */ -#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */ -#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */ -#define SSSR_EOC (1 << 20) /* End Of Chain */ -#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */ -#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */ - -#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */ -#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */ -#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */ -#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */ -#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */ -#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */ -#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */ -#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */ -#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */ - -#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ -#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ -#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ - -#define SSCR0_P1 __REG(0x41000000) /* SSP Port 1 Control Register 0 */ -#define SSCR1_P1 __REG(0x41000004) /* SSP Port 1 Control Register 1 */ -#define SSSR_P1 __REG(0x41000008) /* SSP Port 1 Status Register */ -#define SSITR_P1 __REG(0x4100000C) /* SSP Port 1 Interrupt Test Register */ -#define SSDR_P1 __REG(0x41000010) /* (Write / Read) SSP Port 1 Data Write Register/SSP Data Read Register */ - -/* Support existing PXA25x drivers */ -#define SSCR0 SSCR0_P1 /* SSP Control Register 0 */ -#define SSCR1 SSCR1_P1 /* SSP Control Register 1 */ -#define SSSR SSSR_P1 /* SSP Status Register */ -#define SSITR SSITR_P1 /* SSP Interrupt Test Register */ -#define SSDR SSDR_P1 /* (Write / Read) SSP Data Write Register/SSP Data Read Register */ - -/* PXA27x ports */ -#if defined (CONFIG_PXA27x) -#define SSTO_P1 __REG(0x41000028) /* SSP Port 1 Time Out Register */ -#define SSPSP_P1 __REG(0x4100002C) /* SSP Port 1 Programmable Serial Protocol */ -#define SSTSA_P1 __REG(0x41000030) /* SSP Port 1 Tx Timeslot Active */ -#define SSRSA_P1 __REG(0x41000034) /* SSP Port 1 Rx Timeslot Active */ -#define SSTSS_P1 __REG(0x41000038) /* SSP Port 1 Timeslot Status */ -#define SSACD_P1 __REG(0x4100003C) /* SSP Port 1 Audio Clock Divider */ -#define SSCR0_P2 __REG(0x41700000) /* SSP Port 2 Control Register 0 */ -#define SSCR1_P2 __REG(0x41700004) /* SSP Port 2 Control Register 1 */ -#define SSSR_P2 __REG(0x41700008) /* SSP Port 2 Status Register */ -#define SSITR_P2 __REG(0x4170000C) /* SSP Port 2 Interrupt Test Register */ -#define SSDR_P2 __REG(0x41700010) /* (Write / Read) SSP Port 2 Data Write Register/SSP Data Read Register */ -#define SSTO_P2 __REG(0x41700028) /* SSP Port 2 Time Out Register */ -#define SSPSP_P2 __REG(0x4170002C) /* SSP Port 2 Programmable Serial Protocol */ -#define SSTSA_P2 __REG(0x41700030) /* SSP Port 2 Tx Timeslot Active */ -#define SSRSA_P2 __REG(0x41700034) /* SSP Port 2 Rx Timeslot Active */ -#define SSTSS_P2 __REG(0x41700038) /* SSP Port 2 Timeslot Status */ -#define SSACD_P2 __REG(0x4170003C) /* SSP Port 2 Audio Clock Divider */ -#define SSCR0_P3 __REG(0x41900000) /* SSP Port 3 Control Register 0 */ -#define SSCR1_P3 __REG(0x41900004) /* SSP Port 3 Control Register 1 */ -#define SSSR_P3 __REG(0x41900008) /* SSP Port 3 Status Register */ -#define SSITR_P3 __REG(0x4190000C) /* SSP Port 3 Interrupt Test Register */ -#define SSDR_P3 __REG(0x41900010) /* (Write / Read) SSP Port 3 Data Write Register/SSP Data Read Register */ -#define SSTO_P3 __REG(0x41900028) /* SSP Port 3 Time Out Register */ -#define SSPSP_P3 __REG(0x4190002C) /* SSP Port 3 Programmable Serial Protocol */ -#define SSTSA_P3 __REG(0x41900030) /* SSP Port 3 Tx Timeslot Active */ -#define SSRSA_P3 __REG(0x41900034) /* SSP Port 3 Rx Timeslot Active */ -#define SSTSS_P3 __REG(0x41900038) /* SSP Port 3 Timeslot Status */ -#define SSACD_P3 __REG(0x4190003C) /* SSP Port 3 Audio Clock Divider */ -#else /* PXA255 (only port 2) and PXA26x ports*/ -#define SSTO_P1 __REG(0x41000028) /* SSP Port 1 Time Out Register */ -#define SSPSP_P1 __REG(0x4100002C) /* SSP Port 1 Programmable Serial Protocol */ -#define SSCR0_P2 __REG(0x41400000) /* SSP Port 2 Control Register 0 */ -#define SSCR1_P2 __REG(0x41400004) /* SSP Port 2 Control Register 1 */ -#define SSSR_P2 __REG(0x41400008) /* SSP Port 2 Status Register */ -#define SSITR_P2 __REG(0x4140000C) /* SSP Port 2 Interrupt Test Register */ -#define SSDR_P2 __REG(0x41400010) /* (Write / Read) SSP Port 2 Data Write Register/SSP Data Read Register */ -#define SSTO_P2 __REG(0x41400028) /* SSP Port 2 Time Out Register */ -#define SSPSP_P2 __REG(0x4140002C) /* SSP Port 2 Programmable Serial Protocol */ -#define SSCR0_P3 __REG(0x41500000) /* SSP Port 3 Control Register 0 */ -#define SSCR1_P3 __REG(0x41500004) /* SSP Port 3 Control Register 1 */ -#define SSSR_P3 __REG(0x41500008) /* SSP Port 3 Status Register */ -#define SSITR_P3 __REG(0x4150000C) /* SSP Port 3 Interrupt Test Register */ -#define SSDR_P3 __REG(0x41500010) /* (Write / Read) SSP Port 3 Data Write Register/SSP Data Read Register */ -#define SSTO_P3 __REG(0x41500028) /* SSP Port 3 Time Out Register */ -#define SSPSP_P3 __REG(0x4150002C) /* SSP Port 3 Programmable Serial Protocol */ -#endif - -#define SSCR0_P(x) (*(((x) == 1) ? &SSCR0_P1 : ((x) == 2) ? &SSCR0_P2 : ((x) == 3) ? &SSCR0_P3 : NULL)) -#define SSCR1_P(x) (*(((x) == 1) ? &SSCR1_P1 : ((x) == 2) ? &SSCR1_P2 : ((x) == 3) ? &SSCR1_P3 : NULL)) -#define SSSR_P(x) (*(((x) == 1) ? &SSSR_P1 : ((x) == 2) ? &SSSR_P2 : ((x) == 3) ? &SSSR_P3 : NULL)) -#define SSITR_P(x) (*(((x) == 1) ? &SSITR_P1 : ((x) == 2) ? &SSITR_P2 : ((x) == 3) ? &SSITR_P3 : NULL)) -#define SSDR_P(x) (*(((x) == 1) ? &SSDR_P1 : ((x) == 2) ? &SSDR_P2 : ((x) == 3) ? &SSDR_P3 : NULL)) -#define SSTO_P(x) (*(((x) == 1) ? &SSTO_P1 : ((x) == 2) ? &SSTO_P2 : ((x) == 3) ? &SSTO_P3 : NULL)) -#define SSPSP_P(x) (*(((x) == 1) ? &SSPSP_P1 : ((x) == 2) ? &SSPSP_P2 : ((x) == 3) ? &SSPSP_P3 : NULL)) -#define SSTSA_P(x) (*(((x) == 1) ? &SSTSA_P1 : ((x) == 2) ? &SSTSA_P2 : ((x) == 3) ? &SSTSA_P3 : NULL)) -#define SSRSA_P(x) (*(((x) == 1) ? &SSRSA_P1 : ((x) == 2) ? &SSRSA_P2 : ((x) == 3) ? &SSRSA_P3 : NULL)) -#define SSTSS_P(x) (*(((x) == 1) ? &SSTSS_P1 : ((x) == 2) ? &SSTSS_P2 : ((x) == 3) ? &SSTSS_P3 : NULL)) -#define SSACD_P(x) (*(((x) == 1) ? &SSACD_P1 : ((x) == 2) ? &SSACD_P2 : ((x) == 3) ? &SSACD_P3 : NULL)) - /* * MultiMediaCard (MMC) controller - see drivers/mmc/host/pxamci.h */ @@ -2014,71 +1848,8 @@ #define LDCMD_PAL (1 << 26) /* instructs DMA to load palette buffer */ -/* - * Memory controller - */ - -#define MDCNFG __REG(0x48000000) /* SDRAM Configuration Register 0 */ -#define MDREFR __REG(0x48000004) /* SDRAM Refresh Control Register */ -#define MSC0 __REG(0x48000008) /* Static Memory Control Register 0 */ -#define MSC1 __REG(0x4800000C) /* Static Memory Control Register 1 */ -#define MSC2 __REG(0x48000010) /* Static Memory Control Register 2 */ -#define MECR __REG(0x48000014) /* Expansion Memory (PCMCIA/Compact Flash) Bus Configuration */ -#define SXLCR __REG(0x48000018) /* LCR value to be written to SDRAM-Timing Synchronous Flash */ -#define SXCNFG __REG(0x4800001C) /* Synchronous Static Memory Control Register */ -#define SXMRS __REG(0x48000024) /* MRS value to be written to Synchronous Flash or SMROM */ -#define MCMEM0 __REG(0x48000028) /* Card interface Common Memory Space Socket 0 Timing */ -#define MCMEM1 __REG(0x4800002C) /* Card interface Common Memory Space Socket 1 Timing */ -#define MCATT0 __REG(0x48000030) /* Card interface Attribute Space Socket 0 Timing Configuration */ -#define MCATT1 __REG(0x48000034) /* Card interface Attribute Space Socket 1 Timing Configuration */ -#define MCIO0 __REG(0x48000038) /* Card interface I/O Space Socket 0 Timing Configuration */ -#define MCIO1 __REG(0x4800003C) /* Card interface I/O Space Socket 1 Timing Configuration */ -#define MDMRS __REG(0x48000040) /* MRS value to be written to SDRAM */ -#define BOOT_DEF __REG(0x48000044) /* Read-Only Boot-Time Register. Contains BOOT_SEL and PKG_SEL */ - -/* - * More handy macros for PCMCIA - * - * Arg is socket number - */ -#define MCMEM(s) __REG2(0x48000028, (s)<<2 ) /* Card interface Common Memory Space Socket s Timing */ -#define MCATT(s) __REG2(0x48000030, (s)<<2 ) /* Card interface Attribute Space Socket s Timing Configuration */ -#define MCIO(s) __REG2(0x48000038, (s)<<2 ) /* Card interface I/O Space Socket s Timing Configuration */ - -/* MECR register defines */ -#define MECR_NOS (1 << 0) /* Number Of Sockets: 0 -> 1 sock, 1 -> 2 sock */ -#define MECR_CIT (1 << 1) /* Card Is There: 0 -> no card, 1 -> card inserted */ - -#define MDREFR_K0DB4 (1 << 29) /* SDCLK0 Divide by 4 Control/Status */ -#define MDREFR_K2FREE (1 << 25) /* SDRAM Free-Running Control */ -#define MDREFR_K1FREE (1 << 24) /* SDRAM Free-Running Control */ -#define MDREFR_K0FREE (1 << 23) /* SDRAM Free-Running Control */ -#define MDREFR_SLFRSH (1 << 22) /* SDRAM Self-Refresh Control/Status */ -#define MDREFR_APD (1 << 20) /* SDRAM/SSRAM Auto-Power-Down Enable */ -#define MDREFR_K2DB2 (1 << 19) /* SDCLK2 Divide by 2 Control/Status */ -#define MDREFR_K2RUN (1 << 18) /* SDCLK2 Run Control/Status */ -#define MDREFR_K1DB2 (1 << 17) /* SDCLK1 Divide by 2 Control/Status */ -#define MDREFR_K1RUN (1 << 16) /* SDCLK1 Run Control/Status */ -#define MDREFR_E1PIN (1 << 15) /* SDCKE1 Level Control/Status */ -#define MDREFR_K0DB2 (1 << 14) /* SDCLK0 Divide by 2 Control/Status */ -#define MDREFR_K0RUN (1 << 13) /* SDCLK0 Run Control/Status */ -#define MDREFR_E0PIN (1 << 12) /* SDCKE0 Level Control/Status */ - - #ifdef CONFIG_PXA27x -#define ARB_CNTRL __REG(0x48000048) /* Arbiter Control Register */ - -#define ARB_DMA_SLV_PARK (1<<31) /* Be parked with DMA slave when idle */ -#define ARB_CI_PARK (1<<30) /* Be parked with Camera Interface when idle */ -#define ARB_EX_MEM_PARK (1<<29) /* Be parked with external MEMC when idle */ -#define ARB_INT_MEM_PARK (1<<28) /* Be parked with internal MEMC when idle */ -#define ARB_USB_PARK (1<<27) /* Be parked with USB when idle */ -#define ARB_LCD_PARK (1<<26) /* Be parked with LCD when idle */ -#define ARB_DMA_PARK (1<<25) /* Be parked with DMA when idle */ -#define ARB_CORE_PARK (1<<24) /* Be parked with core when idle */ -#define ARB_LOCK_FLAG (1<<23) /* Only Locking masters gain access to the bus */ - /* * Keypad */ @@ -2135,74 +1906,6 @@ #define KPAS_SO (0x1 << 31) #define KPASMKPx_SO (0x1 << 31) -/* - * UHC: USB Host Controller (OHCI-like) register definitions - */ -#define UHC_BASE_PHYS (0x4C000000) -#define UHCREV __REG(0x4C000000) /* UHC HCI Spec Revision */ -#define UHCHCON __REG(0x4C000004) /* UHC Host Control Register */ -#define UHCCOMS __REG(0x4C000008) /* UHC Command Status Register */ -#define UHCINTS __REG(0x4C00000C) /* UHC Interrupt Status Register */ -#define UHCINTE __REG(0x4C000010) /* UHC Interrupt Enable */ -#define UHCINTD __REG(0x4C000014) /* UHC Interrupt Disable */ -#define UHCHCCA __REG(0x4C000018) /* UHC Host Controller Comm. Area */ -#define UHCPCED __REG(0x4C00001C) /* UHC Period Current Endpt Descr */ -#define UHCCHED __REG(0x4C000020) /* UHC Control Head Endpt Descr */ -#define UHCCCED __REG(0x4C000024) /* UHC Control Current Endpt Descr */ -#define UHCBHED __REG(0x4C000028) /* UHC Bulk Head Endpt Descr */ -#define UHCBCED __REG(0x4C00002C) /* UHC Bulk Current Endpt Descr */ -#define UHCDHEAD __REG(0x4C000030) /* UHC Done Head */ -#define UHCFMI __REG(0x4C000034) /* UHC Frame Interval */ -#define UHCFMR __REG(0x4C000038) /* UHC Frame Remaining */ -#define UHCFMN __REG(0x4C00003C) /* UHC Frame Number */ -#define UHCPERS __REG(0x4C000040) /* UHC Periodic Start */ -#define UHCLS __REG(0x4C000044) /* UHC Low Speed Threshold */ - -#define UHCRHDA __REG(0x4C000048) /* UHC Root Hub Descriptor A */ -#define UHCRHDA_NOCP (1 << 12) /* No over current protection */ - -#define UHCRHDB __REG(0x4C00004C) /* UHC Root Hub Descriptor B */ -#define UHCRHS __REG(0x4C000050) /* UHC Root Hub Status */ -#define UHCRHPS1 __REG(0x4C000054) /* UHC Root Hub Port 1 Status */ -#define UHCRHPS2 __REG(0x4C000058) /* UHC Root Hub Port 2 Status */ -#define UHCRHPS3 __REG(0x4C00005C) /* UHC Root Hub Port 3 Status */ - -#define UHCSTAT __REG(0x4C000060) /* UHC Status Register */ -#define UHCSTAT_UPS3 (1 << 16) /* USB Power Sense Port3 */ -#define UHCSTAT_SBMAI (1 << 15) /* System Bus Master Abort Interrupt*/ -#define UHCSTAT_SBTAI (1 << 14) /* System Bus Target Abort Interrupt*/ -#define UHCSTAT_UPRI (1 << 13) /* USB Port Resume Interrupt */ -#define UHCSTAT_UPS2 (1 << 12) /* USB Power Sense Port 2 */ -#define UHCSTAT_UPS1 (1 << 11) /* USB Power Sense Port 1 */ -#define UHCSTAT_HTA (1 << 10) /* HCI Target Abort */ -#define UHCSTAT_HBA (1 << 8) /* HCI Buffer Active */ -#define UHCSTAT_RWUE (1 << 7) /* HCI Remote Wake Up Event */ - -#define UHCHR __REG(0x4C000064) /* UHC Reset Register */ -#define UHCHR_SSEP3 (1 << 11) /* Sleep Standby Enable for Port3 */ -#define UHCHR_SSEP2 (1 << 10) /* Sleep Standby Enable for Port2 */ -#define UHCHR_SSEP1 (1 << 9) /* Sleep Standby Enable for Port1 */ -#define UHCHR_PCPL (1 << 7) /* Power control polarity low */ -#define UHCHR_PSPL (1 << 6) /* Power sense polarity low */ -#define UHCHR_SSE (1 << 5) /* Sleep Standby Enable */ -#define UHCHR_UIT (1 << 4) /* USB Interrupt Test */ -#define UHCHR_SSDC (1 << 3) /* Simulation Scale Down Clock */ -#define UHCHR_CGR (1 << 2) /* Clock Generation Reset */ -#define UHCHR_FHR (1 << 1) /* Force Host Controller Reset */ -#define UHCHR_FSBIR (1 << 0) /* Force System Bus Iface Reset */ - -#define UHCHIE __REG(0x4C000068) /* UHC Interrupt Enable Register*/ -#define UHCHIE_UPS3IE (1 << 14) /* Power Sense Port3 IntEn */ -#define UHCHIE_UPRIE (1 << 13) /* Port Resume IntEn */ -#define UHCHIE_UPS2IE (1 << 12) /* Power Sense Port2 IntEn */ -#define UHCHIE_UPS1IE (1 << 11) /* Power Sense Port1 IntEn */ -#define UHCHIE_TAIE (1 << 10) /* HCI Interface Transfer Abort - Interrupt Enable*/ -#define UHCHIE_HBAIE (1 << 8) /* HCI Buffer Active IntEn */ -#define UHCHIE_RWIE (1 << 7) /* Remote Wake-up IntEn */ - -#define UHCHIT __REG(0x4C00006C) /* UHC Interrupt Test register */ - /* Camera Interface */ #define CICR0 __REG(0x50000000) #define CICR1 __REG(0x50000004) @@ -2350,6 +2053,77 @@ #endif +#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) +/* + * UHC: USB Host Controller (OHCI-like) register definitions + */ +#define UHC_BASE_PHYS (0x4C000000) +#define UHCREV __REG(0x4C000000) /* UHC HCI Spec Revision */ +#define UHCHCON __REG(0x4C000004) /* UHC Host Control Register */ +#define UHCCOMS __REG(0x4C000008) /* UHC Command Status Register */ +#define UHCINTS __REG(0x4C00000C) /* UHC Interrupt Status Register */ +#define UHCINTE __REG(0x4C000010) /* UHC Interrupt Enable */ +#define UHCINTD __REG(0x4C000014) /* UHC Interrupt Disable */ +#define UHCHCCA __REG(0x4C000018) /* UHC Host Controller Comm. Area */ +#define UHCPCED __REG(0x4C00001C) /* UHC Period Current Endpt Descr */ +#define UHCCHED __REG(0x4C000020) /* UHC Control Head Endpt Descr */ +#define UHCCCED __REG(0x4C000024) /* UHC Control Current Endpt Descr */ +#define UHCBHED __REG(0x4C000028) /* UHC Bulk Head Endpt Descr */ +#define UHCBCED __REG(0x4C00002C) /* UHC Bulk Current Endpt Descr */ +#define UHCDHEAD __REG(0x4C000030) /* UHC Done Head */ +#define UHCFMI __REG(0x4C000034) /* UHC Frame Interval */ +#define UHCFMR __REG(0x4C000038) /* UHC Frame Remaining */ +#define UHCFMN __REG(0x4C00003C) /* UHC Frame Number */ +#define UHCPERS __REG(0x4C000040) /* UHC Periodic Start */ +#define UHCLS __REG(0x4C000044) /* UHC Low Speed Threshold */ + +#define UHCRHDA __REG(0x4C000048) /* UHC Root Hub Descriptor A */ +#define UHCRHDA_NOCP (1 << 12) /* No over current protection */ + +#define UHCRHDB __REG(0x4C00004C) /* UHC Root Hub Descriptor B */ +#define UHCRHS __REG(0x4C000050) /* UHC Root Hub Status */ +#define UHCRHPS1 __REG(0x4C000054) /* UHC Root Hub Port 1 Status */ +#define UHCRHPS2 __REG(0x4C000058) /* UHC Root Hub Port 2 Status */ +#define UHCRHPS3 __REG(0x4C00005C) /* UHC Root Hub Port 3 Status */ + +#define UHCSTAT __REG(0x4C000060) /* UHC Status Register */ +#define UHCSTAT_UPS3 (1 << 16) /* USB Power Sense Port3 */ +#define UHCSTAT_SBMAI (1 << 15) /* System Bus Master Abort Interrupt*/ +#define UHCSTAT_SBTAI (1 << 14) /* System Bus Target Abort Interrupt*/ +#define UHCSTAT_UPRI (1 << 13) /* USB Port Resume Interrupt */ +#define UHCSTAT_UPS2 (1 << 12) /* USB Power Sense Port 2 */ +#define UHCSTAT_UPS1 (1 << 11) /* USB Power Sense Port 1 */ +#define UHCSTAT_HTA (1 << 10) /* HCI Target Abort */ +#define UHCSTAT_HBA (1 << 8) /* HCI Buffer Active */ +#define UHCSTAT_RWUE (1 << 7) /* HCI Remote Wake Up Event */ + +#define UHCHR __REG(0x4C000064) /* UHC Reset Register */ +#define UHCHR_SSEP3 (1 << 11) /* Sleep Standby Enable for Port3 */ +#define UHCHR_SSEP2 (1 << 10) /* Sleep Standby Enable for Port2 */ +#define UHCHR_SSEP1 (1 << 9) /* Sleep Standby Enable for Port1 */ +#define UHCHR_PCPL (1 << 7) /* Power control polarity low */ +#define UHCHR_PSPL (1 << 6) /* Power sense polarity low */ +#define UHCHR_SSE (1 << 5) /* Sleep Standby Enable */ +#define UHCHR_UIT (1 << 4) /* USB Interrupt Test */ +#define UHCHR_SSDC (1 << 3) /* Simulation Scale Down Clock */ +#define UHCHR_CGR (1 << 2) /* Clock Generation Reset */ +#define UHCHR_FHR (1 << 1) /* Force Host Controller Reset */ +#define UHCHR_FSBIR (1 << 0) /* Force System Bus Iface Reset */ + +#define UHCHIE __REG(0x4C000068) /* UHC Interrupt Enable Register*/ +#define UHCHIE_UPS3IE (1 << 14) /* Power Sense Port3 IntEn */ +#define UHCHIE_UPRIE (1 << 13) /* Port Resume IntEn */ +#define UHCHIE_UPS2IE (1 << 12) /* Power Sense Port2 IntEn */ +#define UHCHIE_UPS1IE (1 << 11) /* Power Sense Port1 IntEn */ +#define UHCHIE_TAIE (1 << 10) /* HCI Interface Transfer Abort + Interrupt Enable*/ +#define UHCHIE_HBAIE (1 << 8) /* HCI Buffer Active IntEn */ +#define UHCHIE_RWIE (1 << 7) /* Remote Wake-up IntEn */ + +#define UHCHIT __REG(0x4C00006C) /* UHC Interrupt Test register */ + +#endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ + /* PWRMODE register M field values */ #define PWRMODE_IDLE 0x1 diff --git a/include/asm-arm/arch-pxa/pxa2xx-regs.h b/include/asm-arm/arch-pxa/pxa2xx-regs.h new file mode 100644 index 000000000000..9553b54fa5bc --- /dev/null +++ b/include/asm-arm/arch-pxa/pxa2xx-regs.h @@ -0,0 +1,84 @@ +/* + * linux/include/asm-arm/arch-pxa/pxa2xx-regs.h + * + * Taken from pxa-regs.h by Russell King + * + * Author: Nicolas Pitre + * Copyright: MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __PXA2XX_REGS_H +#define __PXA2XX_REGS_H + +/* + * Memory controller + */ + +#define MDCNFG __REG(0x48000000) /* SDRAM Configuration Register 0 */ +#define MDREFR __REG(0x48000004) /* SDRAM Refresh Control Register */ +#define MSC0 __REG(0x48000008) /* Static Memory Control Register 0 */ +#define MSC1 __REG(0x4800000C) /* Static Memory Control Register 1 */ +#define MSC2 __REG(0x48000010) /* Static Memory Control Register 2 */ +#define MECR __REG(0x48000014) /* Expansion Memory (PCMCIA/Compact Flash) Bus Configuration */ +#define SXLCR __REG(0x48000018) /* LCR value to be written to SDRAM-Timing Synchronous Flash */ +#define SXCNFG __REG(0x4800001C) /* Synchronous Static Memory Control Register */ +#define SXMRS __REG(0x48000024) /* MRS value to be written to Synchronous Flash or SMROM */ +#define MCMEM0 __REG(0x48000028) /* Card interface Common Memory Space Socket 0 Timing */ +#define MCMEM1 __REG(0x4800002C) /* Card interface Common Memory Space Socket 1 Timing */ +#define MCATT0 __REG(0x48000030) /* Card interface Attribute Space Socket 0 Timing Configuration */ +#define MCATT1 __REG(0x48000034) /* Card interface Attribute Space Socket 1 Timing Configuration */ +#define MCIO0 __REG(0x48000038) /* Card interface I/O Space Socket 0 Timing Configuration */ +#define MCIO1 __REG(0x4800003C) /* Card interface I/O Space Socket 1 Timing Configuration */ +#define MDMRS __REG(0x48000040) /* MRS value to be written to SDRAM */ +#define BOOT_DEF __REG(0x48000044) /* Read-Only Boot-Time Register. Contains BOOT_SEL and PKG_SEL */ + +/* + * More handy macros for PCMCIA + * + * Arg is socket number + */ +#define MCMEM(s) __REG2(0x48000028, (s)<<2 ) /* Card interface Common Memory Space Socket s Timing */ +#define MCATT(s) __REG2(0x48000030, (s)<<2 ) /* Card interface Attribute Space Socket s Timing Configuration */ +#define MCIO(s) __REG2(0x48000038, (s)<<2 ) /* Card interface I/O Space Socket s Timing Configuration */ + +/* MECR register defines */ +#define MECR_NOS (1 << 0) /* Number Of Sockets: 0 -> 1 sock, 1 -> 2 sock */ +#define MECR_CIT (1 << 1) /* Card Is There: 0 -> no card, 1 -> card inserted */ + +#define MDREFR_K0DB4 (1 << 29) /* SDCLK0 Divide by 4 Control/Status */ +#define MDREFR_K2FREE (1 << 25) /* SDRAM Free-Running Control */ +#define MDREFR_K1FREE (1 << 24) /* SDRAM Free-Running Control */ +#define MDREFR_K0FREE (1 << 23) /* SDRAM Free-Running Control */ +#define MDREFR_SLFRSH (1 << 22) /* SDRAM Self-Refresh Control/Status */ +#define MDREFR_APD (1 << 20) /* SDRAM/SSRAM Auto-Power-Down Enable */ +#define MDREFR_K2DB2 (1 << 19) /* SDCLK2 Divide by 2 Control/Status */ +#define MDREFR_K2RUN (1 << 18) /* SDCLK2 Run Control/Status */ +#define MDREFR_K1DB2 (1 << 17) /* SDCLK1 Divide by 2 Control/Status */ +#define MDREFR_K1RUN (1 << 16) /* SDCLK1 Run Control/Status */ +#define MDREFR_E1PIN (1 << 15) /* SDCKE1 Level Control/Status */ +#define MDREFR_K0DB2 (1 << 14) /* SDCLK0 Divide by 2 Control/Status */ +#define MDREFR_K0RUN (1 << 13) /* SDCLK0 Run Control/Status */ +#define MDREFR_E0PIN (1 << 12) /* SDCKE0 Level Control/Status */ + + +#ifdef CONFIG_PXA27x + +#define ARB_CNTRL __REG(0x48000048) /* Arbiter Control Register */ + +#define ARB_DMA_SLV_PARK (1<<31) /* Be parked with DMA slave when idle */ +#define ARB_CI_PARK (1<<30) /* Be parked with Camera Interface when idle */ +#define ARB_EX_MEM_PARK (1<<29) /* Be parked with external MEMC when idle */ +#define ARB_INT_MEM_PARK (1<<28) /* Be parked with internal MEMC when idle */ +#define ARB_USB_PARK (1<<27) /* Be parked with USB when idle */ +#define ARB_LCD_PARK (1<<26) /* Be parked with LCD when idle */ +#define ARB_DMA_PARK (1<<25) /* Be parked with DMA when idle */ +#define ARB_CORE_PARK (1<<24) /* Be parked with core when idle */ +#define ARB_LOCK_FLAG (1<<23) /* Only Locking masters gain access to the bus */ + +#endif + +#endif diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h index acc7ec7a84a1..3459fb26ce97 100644 --- a/include/asm-arm/arch-pxa/pxa2xx_spi.h +++ b/include/asm-arm/arch-pxa/pxa2xx_spi.h @@ -22,32 +22,8 @@ #define PXA2XX_CS_ASSERT (0x01) #define PXA2XX_CS_DEASSERT (0x02) -#if defined(CONFIG_PXA25x) -#define CLOCK_SPEED_HZ 3686400 -#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/2/(x+1))<<8)&0x0000ff00) -#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) -#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) -#elif defined(CONFIG_PXA27x) -#define CLOCK_SPEED_HZ 13000000 -#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) -#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) -#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) -#endif - -#define SSP1_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(1))))) -#define SSP2_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(2))))) -#define SSP3_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(3))))) - -enum pxa_ssp_type { - SSP_UNDEFINED = 0, - PXA25x_SSP, /* pxa 210, 250, 255, 26x */ - PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ - PXA27x_SSP, -}; - /* device.platform_data for SSP controller devices */ struct pxa2xx_spi_master { - enum pxa_ssp_type ssp_type; u32 clock_enable; u16 num_chipselect; u8 enable_dma; diff --git a/include/asm-arm/arch-pxa/pxa3xx-regs.h b/include/asm-arm/arch-pxa/pxa3xx-regs.h index 3900a0ca0bc0..66d54119757c 100644 --- a/include/asm-arm/arch-pxa/pxa3xx-regs.h +++ b/include/asm-arm/arch-pxa/pxa3xx-regs.h @@ -14,6 +14,92 @@ #define __ASM_ARCH_PXA3XX_REGS_H /* + * Slave Power Managment Unit + */ +#define ASCR __REG(0x40f40000) /* Application Subsystem Power Status/Configuration */ +#define ARSR __REG(0x40f40004) /* Application Subsystem Reset Status */ +#define AD3ER __REG(0x40f40008) /* Application Subsystem Wake-Up from D3 Enable */ +#define AD3SR __REG(0x40f4000c) /* Application Subsystem Wake-Up from D3 Status */ +#define AD2D0ER __REG(0x40f40010) /* Application Subsystem Wake-Up from D2 to D0 Enable */ +#define AD2D0SR __REG(0x40f40014) /* Application Subsystem Wake-Up from D2 to D0 Status */ +#define AD2D1ER __REG(0x40f40018) /* Application Subsystem Wake-Up from D2 to D1 Enable */ +#define AD2D1SR __REG(0x40f4001c) /* Application Subsystem Wake-Up from D2 to D1 Status */ +#define AD1D0ER __REG(0x40f40020) /* Application Subsystem Wake-Up from D1 to D0 Enable */ +#define AD1D0SR __REG(0x40f40024) /* Application Subsystem Wake-Up from D1 to D0 Status */ +#define AGENP __REG(0x40f4002c) /* Application Subsystem General Purpose */ +#define AD3R __REG(0x40f40030) /* Application Subsystem D3 Configuration */ +#define AD2R __REG(0x40f40034) /* Application Subsystem D2 Configuration */ +#define AD1R __REG(0x40f40038) /* Application Subsystem D1 Configuration */ + +/* + * Application Subsystem Configuration bits. + */ +#define ASCR_RDH (1 << 31) +#define ASCR_D1S (1 << 2) +#define ASCR_D2S (1 << 1) +#define ASCR_D3S (1 << 0) + +/* + * Application Reset Status bits. + */ +#define ARSR_GPR (1 << 3) +#define ARSR_LPMR (1 << 2) +#define ARSR_WDT (1 << 1) +#define ARSR_HWR (1 << 0) + +/* + * Application Subsystem Wake-Up bits. + */ +#define ADXER_WRTC (1 << 31) /* RTC */ +#define ADXER_WOST (1 << 30) /* OS Timer */ +#define ADXER_WTSI (1 << 29) /* Touchscreen */ +#define ADXER_WUSBH (1 << 28) /* USB host */ +#define ADXER_WUSB2 (1 << 26) /* USB client 2.0 */ +#define ADXER_WMSL0 (1 << 24) /* MSL port 0*/ +#define ADXER_WDMUX3 (1 << 23) /* USB EDMUX3 */ +#define ADXER_WDMUX2 (1 << 22) /* USB EDMUX2 */ +#define ADXER_WKP (1 << 21) /* Keypad */ +#define ADXER_WUSIM1 (1 << 20) /* USIM Port 1 */ +#define ADXER_WUSIM0 (1 << 19) /* USIM Port 0 */ +#define ADXER_WOTG (1 << 16) /* USBOTG input */ +#define ADXER_MFP_WFLASH (1 << 15) /* MFP: Data flash busy */ +#define ADXER_MFP_GEN12 (1 << 14) /* MFP: MMC3/GPIO/OST inputs */ +#define ADXER_MFP_WMMC2 (1 << 13) /* MFP: MMC2 */ +#define ADXER_MFP_WMMC1 (1 << 12) /* MFP: MMC1 */ +#define ADXER_MFP_WI2C (1 << 11) /* MFP: I2C */ +#define ADXER_MFP_WSSP4 (1 << 10) /* MFP: SSP4 */ +#define ADXER_MFP_WSSP3 (1 << 9) /* MFP: SSP3 */ +#define ADXER_MFP_WMAXTRIX (1 << 8) /* MFP: matrix keypad */ +#define ADXER_MFP_WUART3 (1 << 7) /* MFP: UART3 */ +#define ADXER_MFP_WUART2 (1 << 6) /* MFP: UART2 */ +#define ADXER_MFP_WUART1 (1 << 5) /* MFP: UART1 */ +#define ADXER_MFP_WSSP2 (1 << 4) /* MFP: SSP2 */ +#define ADXER_MFP_WSSP1 (1 << 3) /* MFP: SSP1 */ +#define ADXER_MFP_WAC97 (1 << 2) /* MFP: AC97 */ +#define ADXER_WEXTWAKE1 (1 << 1) /* External Wake 1 */ +#define ADXER_WEXTWAKE0 (1 << 0) /* External Wake 0 */ + +/* + * AD3R/AD2R/AD1R bits. R2-R5 are only defined for PXA320. + */ +#define ADXR_L2 (1 << 8) +#define ADXR_R5 (1 << 5) +#define ADXR_R4 (1 << 4) +#define ADXR_R3 (1 << 3) +#define ADXR_R2 (1 << 2) +#define ADXR_R1 (1 << 1) +#define ADXR_R0 (1 << 0) + +/* + * Values for PWRMODE CP15 register + */ +#define PXA3xx_PM_S3D4C4 0x07 /* aka deep sleep */ +#define PXA3xx_PM_S2D3C4 0x06 /* aka sleep */ +#define PXA3xx_PM_S0D2C2 0x03 /* aka standby */ +#define PXA3xx_PM_S0D1C2 0x02 /* aka LCD refresh */ +#define PXA3xx_PM_S0D0C1 0x01 + +/* * Application Subsystem Clock */ #define ACCR __REG(0x41340000) /* Application Subsystem Clock Configuration Register */ diff --git a/include/asm-arm/arch-pxa/regs-ssp.h b/include/asm-arm/arch-pxa/regs-ssp.h new file mode 100644 index 000000000000..991cb688db75 --- /dev/null +++ b/include/asm-arm/arch-pxa/regs-ssp.h @@ -0,0 +1,112 @@ +#ifndef __ASM_ARCH_REGS_SSP_H +#define __ASM_ARCH_REGS_SSP_H + +/* + * SSP Serial Port Registers + * PXA250, PXA255, PXA26x and PXA27x SSP controllers are all slightly different. + * PXA255, PXA26x and PXA27x have extra ports, registers and bits. + */ + +#define SSCR0 (0x00) /* SSP Control Register 0 */ +#define SSCR1 (0x04) /* SSP Control Register 1 */ +#define SSSR (0x08) /* SSP Status Register */ +#define SSITR (0x0C) /* SSP Interrupt Test Register */ +#define SSDR (0x10) /* SSP Data Write/Data Read Register */ + +#define SSTO (0x28) /* SSP Time Out Register */ +#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ +#define SSTSA (0x30) /* SSP Tx Timeslot Active */ +#define SSRSA (0x34) /* SSP Rx Timeslot Active */ +#define SSTSS (0x38) /* SSP Timeslot Status */ +#define SSACD (0x3C) /* SSP Audio Clock Divider */ + +/* Common PXA2xx bits first */ +#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ +#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ +#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */ +#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */ +#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */ +#define SSCR0_National (0x2 << 4) /* National Microwire */ +#define SSCR0_ECS (1 << 6) /* External clock select */ +#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ +#if defined(CONFIG_PXA25x) +#define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */ +#define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */ +#elif defined(CONFIG_PXA27x) +#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */ +#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */ +#define SSCR0_EDSS (1 << 20) /* Extended data size select */ +#define SSCR0_NCS (1 << 21) /* Network clock select */ +#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */ +#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */ +#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */ +#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ +#define SSCR0_ADC (1 << 30) /* Audio clock select */ +#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ +#endif + +#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ +#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ +#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ +#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */ +#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */ +#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */ +#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ +#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ + +#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */ +#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */ +#define SSSR_BSY (1 << 4) /* SSP Busy */ +#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */ +#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */ +#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */ + +#define SSCR0_TIM (1 << 23) /* Transmit FIFO Under Run Interrupt Mask */ +#define SSCR0_RIM (1 << 22) /* Receive FIFO Over Run interrupt Mask */ +#define SSCR0_NCS (1 << 21) /* Network Clock Select */ +#define SSCR0_EDSS (1 << 20) /* Extended Data Size Select */ + +/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ +#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ +#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ +#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */ +#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */ +#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */ +#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */ +#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */ +#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */ +#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */ +#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */ +#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */ +#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */ +#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */ +#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */ +#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */ +#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interupt Enable */ +#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */ +#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */ + +#define SSSR_BCE (1 << 23) /* Bit Count Error */ +#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */ +#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */ +#define SSSR_EOC (1 << 20) /* End Of Chain */ +#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */ +#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */ + +#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */ +#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */ +#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */ +#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */ +#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */ +#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */ +#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */ +#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */ +#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */ + +#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ +#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ +#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ + +#endif /* __ASM_ARCH_REGS_SSP_H */ diff --git a/include/asm-arm/arch-pxa/sharpsl.h b/include/asm-arm/arch-pxa/sharpsl.h index 2b0fe773213a..3b1d4a72d4d1 100644 --- a/include/asm-arm/arch-pxa/sharpsl.h +++ b/include/asm-arm/arch-pxa/sharpsl.h @@ -16,7 +16,7 @@ int corgi_ssp_max1111_get(unsigned long data); */ struct corgits_machinfo { - unsigned long (*get_hsync_len)(void); + unsigned long (*get_hsync_invperiod)(void); void (*put_hsync)(void); void (*wait_hsync)(void); }; diff --git a/include/asm-arm/arch-pxa/spitz.h b/include/asm-arm/arch-pxa/spitz.h index 4953dd324d4d..bd14365f7ed5 100644 --- a/include/asm-arm/arch-pxa/spitz.h +++ b/include/asm-arm/arch-pxa/spitz.h @@ -156,5 +156,3 @@ extern struct platform_device spitzscoop_device; extern struct platform_device spitzscoop2_device; extern struct platform_device spitzssp_device; extern struct sharpsl_charger_machinfo spitz_pm_machinfo; - -extern void spitz_lcd_power(int on, struct fb_var_screeninfo *var); diff --git a/include/asm-arm/arch-pxa/ssp.h b/include/asm-arm/arch-pxa/ssp.h index ea200551a75f..a012882c9ee6 100644 --- a/include/asm-arm/arch-pxa/ssp.h +++ b/include/asm-arm/arch-pxa/ssp.h @@ -13,10 +13,37 @@ * PXA255 SSP, NSSP * PXA26x SSP, NSSP, ASSP * PXA27x SSP1, SSP2, SSP3 + * PXA3xx SSP1, SSP2, SSP3, SSP4 */ -#ifndef SSP_H -#define SSP_H +#ifndef __ASM_ARCH_SSP_H +#define __ASM_ARCH_SSP_H + +#include <linux/list.h> + +enum pxa_ssp_type { + SSP_UNDEFINED = 0, + PXA25x_SSP, /* pxa 210, 250, 255, 26x */ + PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ + PXA27x_SSP, +}; + +struct ssp_device { + struct platform_device *pdev; + struct list_head node; + + struct clk *clk; + void __iomem *mmio_base; + unsigned long phys_base; + + const char *label; + int port_id; + int type; + int use_count; + int irq; + int drcmr_rx; + int drcmr_tx; +}; /* * SSP initialisation flags @@ -31,6 +58,7 @@ struct ssp_state { }; struct ssp_dev { + struct ssp_device *ssp; u32 port; u32 mode; u32 flags; @@ -50,4 +78,6 @@ int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags); int ssp_config(struct ssp_dev *dev, u32 mode, u32 flags, u32 psp_flags, u32 speed); void ssp_exit(struct ssp_dev *dev); -#endif +struct ssp_device *ssp_request(int port, const char *label); +void ssp_free(struct ssp_device *); +#endif /* __ASM_ARCH_SSP_H */ diff --git a/include/asm-arm/arch-pxa/uncompress.h b/include/asm-arm/arch-pxa/uncompress.h index 178aa2e073ac..dadf4c20b622 100644 --- a/include/asm-arm/arch-pxa/uncompress.h +++ b/include/asm-arm/arch-pxa/uncompress.h @@ -9,19 +9,21 @@ * published by the Free Software Foundation. */ -#define FFUART ((volatile unsigned long *)0x40100000) -#define BTUART ((volatile unsigned long *)0x40200000) -#define STUART ((volatile unsigned long *)0x40700000) -#define HWUART ((volatile unsigned long *)0x41600000) +#include <linux/serial_reg.h> +#include <asm/arch/pxa-regs.h> + +#define __REG(x) ((volatile unsigned long *)x) #define UART FFUART static inline void putc(char c) { - while (!(UART[5] & 0x20)) + if (!(UART[UART_IER] & IER_UUE)) + return; + while (!(UART[UART_LSR] & LSR_TDRQ)) barrier(); - UART[0] = c; + UART[UART_TX] = c; } /* diff --git a/include/asm-arm/arch-pxa/zylonite.h b/include/asm-arm/arch-pxa/zylonite.h index f58b59162b82..5f717d64ea7d 100644 --- a/include/asm-arm/arch-pxa/zylonite.h +++ b/include/asm-arm/arch-pxa/zylonite.h @@ -3,9 +3,18 @@ #define ZYLONITE_ETH_PHYS 0x14000000 +#define EXT_GPIO(x) (128 + (x)) + /* the following variables are processor specific and initialized * by the corresponding zylonite_pxa3xx_init() */ +struct platform_mmc_slot { + int gpio_cd; + int gpio_wp; +}; + +extern struct platform_mmc_slot zylonite_mmc_slot[]; + extern int gpio_backlight; extern int gpio_eth_irq; diff --git a/include/asm-arm/arch-s3c2410/debug-macro.S b/include/asm-arm/arch-s3c2410/debug-macro.S index 9c8cd9abb82b..89076c322726 100644 --- a/include/asm-arm/arch-s3c2410/debug-macro.S +++ b/include/asm-arm/arch-s3c2410/debug-macro.S @@ -92,11 +92,9 @@ #if defined(CONFIG_CPU_LLSERIAL_S3C2410_ONLY) #define fifo_full fifo_full_s3c2410 #define fifo_level fifo_level_s3c2410 -#warning 2410only #elif !defined(CONFIG_CPU_LLSERIAL_S3C2440_ONLY) #define fifo_full fifo_full_s3c24xx #define fifo_level fifo_level_s3c24xx -#warning generic #endif /* include the reset of the code which will do the work */ diff --git a/include/asm-arm/arch-s3c2410/dma.h b/include/asm-arm/arch-s3c2410/dma.h index c6e8d8f64938..4f291d9b7d93 100644 --- a/include/asm-arm/arch-s3c2410/dma.h +++ b/include/asm-arm/arch-s3c2410/dma.h @@ -214,6 +214,7 @@ struct s3c2410_dma_chan { unsigned long dev_addr; unsigned long load_timeout; unsigned int flags; /* channel flags */ + unsigned int hw_cfg; /* last hw config */ struct s3c24xx_dma_map *map; /* channel hw maps */ diff --git a/include/asm-arm/arch-s3c2410/hardware.h b/include/asm-arm/arch-s3c2410/hardware.h index 6dadf58ff984..29592c3ebf22 100644 --- a/include/asm-arm/arch-s3c2410/hardware.h +++ b/include/asm-arm/arch-s3c2410/hardware.h @@ -50,6 +50,17 @@ extern unsigned int s3c2410_gpio_getcfg(unsigned int pin); extern int s3c2410_gpio_getirq(unsigned int pin); +/* s3c2410_gpio_irq2pin + * + * turn the given irq number into the corresponding GPIO number + * + * returns: + * < 0 = no pin + * >=0 = gpio pin number +*/ + +extern int s3c2410_gpio_irq2pin(unsigned int irq); + #ifdef CONFIG_CPU_S3C2400 extern int s3c2400_gpio_getirq(unsigned int pin); @@ -87,6 +98,18 @@ extern int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on, extern void s3c2410_gpio_pullup(unsigned int pin, unsigned int to); +/* s3c2410_gpio_getpull + * + * Read the state of the pull-up on a given pin + * + * return: + * < 0 => error code + * 0 => enabled + * 1 => disabled +*/ + +extern int s3c2410_gpio_getpull(unsigned int pin); + extern void s3c2410_gpio_setpin(unsigned int pin, unsigned int to); extern unsigned int s3c2410_gpio_getpin(unsigned int pin); @@ -99,6 +122,11 @@ extern int s3c2440_set_dsc(unsigned int pin, unsigned int value); #endif /* CONFIG_CPU_S3C2440 */ +#ifdef CONFIG_CPU_S3C2412 + +extern int s3c2412_gpio_set_sleepcfg(unsigned int pin, unsigned int state); + +#endif /* CONFIG_CPU_S3C2412 */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-arm/arch-s3c2410/irqs.h b/include/asm-arm/arch-s3c2410/irqs.h index 996f65488d2d..d858b3eb5547 100644 --- a/include/asm-arm/arch-s3c2410/irqs.h +++ b/include/asm-arm/arch-s3c2410/irqs.h @@ -160,4 +160,7 @@ #define NR_IRQS (IRQ_S3C2440_AC97+1) #endif +/* Our FIQs are routable from IRQ_EINT0 to IRQ_ADCPARENT */ +#define FIQ_START IRQ_EINT0 + #endif /* __ASM_ARCH_IRQ_H */ diff --git a/include/asm-arm/arch-s3c2410/regs-clock.h b/include/asm-arm/arch-s3c2410/regs-clock.h index e39656b7a086..dba9df9d8713 100644 --- a/include/asm-arm/arch-s3c2410/regs-clock.h +++ b/include/asm-arm/arch-s3c2410/regs-clock.h @@ -138,6 +138,8 @@ s3c2410_get_pll(unsigned int pllval, unsigned int baseclk) #define S3C2412_CLKDIVN_PDIVN (1<<2) #define S3C2412_CLKDIVN_HDIVN_MASK (3<<0) #define S3C2421_CLKDIVN_ARMDIVN (1<<3) +#define S3C2412_CLKDIVN_DVSEN (1<<4) +#define S3C2412_CLKDIVN_HALFHCLK (1<<5) #define S3C2412_CLKDIVN_USB48DIV (1<<6) #define S3C2412_CLKDIVN_UARTDIV_MASK (15<<8) #define S3C2412_CLKDIVN_UARTDIV_SHIFT (8) diff --git a/include/asm-arm/arch-s3c2410/regs-dsc.h b/include/asm-arm/arch-s3c2410/regs-dsc.h index c0748511edbc..1235df70f34e 100644 --- a/include/asm-arm/arch-s3c2410/regs-dsc.h +++ b/include/asm-arm/arch-s3c2410/regs-dsc.h @@ -19,7 +19,7 @@ #define S3C2412_DSC1 S3C2410_GPIOREG(0xe0) #endif -#if defined(CONFIG_CPU_S3C2440) +#if defined(CONFIG_CPU_S3C244X) #define S3C2440_DSC0 S3C2410_GPIOREG(0xc4) #define S3C2440_DSC1 S3C2410_GPIOREG(0xc8) diff --git a/include/asm-arm/arch-s3c2410/regs-gpio.h b/include/asm-arm/arch-s3c2410/regs-gpio.h index b693158b2d3c..0ad75d716ded 100644 --- a/include/asm-arm/arch-s3c2410/regs-gpio.h +++ b/include/asm-arm/arch-s3c2410/regs-gpio.h @@ -1133,12 +1133,16 @@ #define S3C2412_GPBSLPCON S3C2410_GPIOREG(0x1C) #define S3C2412_GPCSLPCON S3C2410_GPIOREG(0x2C) #define S3C2412_GPDSLPCON S3C2410_GPIOREG(0x3C) -#define S3C2412_GPESLPCON S3C2410_GPIOREG(0x4C) #define S3C2412_GPFSLPCON S3C2410_GPIOREG(0x5C) #define S3C2412_GPGSLPCON S3C2410_GPIOREG(0x6C) #define S3C2412_GPHSLPCON S3C2410_GPIOREG(0x7C) /* definitions for each pin bit */ +#define S3C2412_GPIO_SLPCON_LOW ( 0x00 ) +#define S3C2412_GPIO_SLPCON_HIGH ( 0x01 ) +#define S3C2412_GPIO_SLPCON_IN ( 0x02 ) +#define S3C2412_GPIO_SLPCON_PULL ( 0x03 ) + #define S3C2412_SLPCON_LOW(x) ( 0x00 << ((x) * 2)) #define S3C2412_SLPCON_HIGH(x) ( 0x01 << ((x) * 2)) #define S3C2412_SLPCON_IN(x) ( 0x02 << ((x) * 2)) diff --git a/include/asm-arm/arch-s3c2410/regs-mem.h b/include/asm-arm/arch-s3c2410/regs-mem.h index e4d82341f7ba..312ff93b63c6 100644 --- a/include/asm-arm/arch-s3c2410/regs-mem.h +++ b/include/asm-arm/arch-s3c2410/regs-mem.h @@ -98,16 +98,19 @@ #define S3C2410_BANKCON_Tacp3 (0x1 << 2) #define S3C2410_BANKCON_Tacp4 (0x2 << 2) #define S3C2410_BANKCON_Tacp6 (0x3 << 2) +#define S3C2410_BANKCON_Tacp_SHIFT (2) #define S3C2410_BANKCON_Tcah0 (0x0 << 4) #define S3C2410_BANKCON_Tcah1 (0x1 << 4) #define S3C2410_BANKCON_Tcah2 (0x2 << 4) #define S3C2410_BANKCON_Tcah4 (0x3 << 4) +#define S3C2410_BANKCON_Tcah_SHIFT (4) #define S3C2410_BANKCON_Tcoh0 (0x0 << 6) #define S3C2410_BANKCON_Tcoh1 (0x1 << 6) #define S3C2410_BANKCON_Tcoh2 (0x2 << 6) #define S3C2410_BANKCON_Tcoh4 (0x3 << 6) +#define S3C2410_BANKCON_Tcoh_SHIFT (6) #define S3C2410_BANKCON_Tacc1 (0x0 << 8) #define S3C2410_BANKCON_Tacc2 (0x1 << 8) @@ -117,16 +120,19 @@ #define S3C2410_BANKCON_Tacc8 (0x5 << 8) #define S3C2410_BANKCON_Tacc10 (0x6 << 8) #define S3C2410_BANKCON_Tacc14 (0x7 << 8) +#define S3C2410_BANKCON_Tacc_SHIFT (8) #define S3C2410_BANKCON_Tcos0 (0x0 << 11) #define S3C2410_BANKCON_Tcos1 (0x1 << 11) #define S3C2410_BANKCON_Tcos2 (0x2 << 11) #define S3C2410_BANKCON_Tcos4 (0x3 << 11) +#define S3C2410_BANKCON_Tcos_SHIFT (11) #define S3C2410_BANKCON_Tacs0 (0x0 << 13) #define S3C2410_BANKCON_Tacs1 (0x1 << 13) #define S3C2410_BANKCON_Tacs2 (0x2 << 13) #define S3C2410_BANKCON_Tacs4 (0x3 << 13) +#define S3C2410_BANKCON_Tacs_SHIFT (13) #define S3C2410_BANKCON_SRAM (0x0 << 15) #define S3C2400_BANKCON_EDODRAM (0x2 << 15) diff --git a/include/asm-arm/arch-s3c2410/regs-power.h b/include/asm-arm/arch-s3c2410/regs-power.h index f79987be55e8..13d13b7cfe98 100644 --- a/include/asm-arm/arch-s3c2410/regs-power.h +++ b/include/asm-arm/arch-s3c2410/regs-power.h @@ -23,7 +23,8 @@ #define S3C2412_INFORM2 S3C24XX_PWRREG(0x78) #define S3C2412_INFORM3 S3C24XX_PWRREG(0x7C) -#define S3C2412_PWRCFG_BATF_IGNORE (0<<0) +#define S3C2412_PWRCFG_BATF_IRQ (1<<0) +#define S3C2412_PWRCFG_BATF_IGNORE (2<<0) #define S3C2412_PWRCFG_BATF_SLEEP (3<<0) #define S3C2412_PWRCFG_BATF_MASK (3<<0) diff --git a/include/asm-arm/arch-s3c2410/system.h b/include/asm-arm/arch-s3c2410/system.h index 63891786dfa0..14de4e596f87 100644 --- a/include/asm-arm/arch-s3c2410/system.h +++ b/include/asm-arm/arch-s3c2410/system.h @@ -20,6 +20,9 @@ #include <asm/plat-s3c/regs-watchdog.h> #include <asm/arch/regs-clock.h> +#include <linux/clk.h> +#include <linux/err.h> + void (*s3c24xx_idle)(void); void (*s3c24xx_reset_hook)(void); @@ -59,6 +62,8 @@ static void arch_idle(void) static void arch_reset(char mode) { + struct clk *wdtclk; + if (mode == 's') { cpu_reset(0); } @@ -70,19 +75,28 @@ arch_reset(char mode) __raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */ + wdtclk = clk_get(NULL, "watchdog"); + if (!IS_ERR(wdtclk)) { + clk_enable(wdtclk); + } else + printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__); + /* put initial values into count and data */ - __raw_writel(0x100, S3C2410_WTCNT); - __raw_writel(0x100, S3C2410_WTDAT); + __raw_writel(0x80, S3C2410_WTCNT); + __raw_writel(0x80, S3C2410_WTDAT); /* set the watchdog to go and reset... */ __raw_writel(S3C2410_WTCON_ENABLE|S3C2410_WTCON_DIV16|S3C2410_WTCON_RSTEN | S3C2410_WTCON_PRESCALE(0x20), S3C2410_WTCON); /* wait for reset to assert... */ - mdelay(5000); + mdelay(500); printk(KERN_ERR "Watchdog reset failed to assert reset\n"); + /* delay to allow the serial port to show the message */ + mdelay(50); + /* we'll take a jump through zero as a poor second */ cpu_reset(0); } diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index 47a6b086eee2..5c60bfc1a84d 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h @@ -310,6 +310,8 @@ static inline int constant_fls(int x) _find_first_zero_bit_le(p,sz) #define ext2_find_next_zero_bit(p,sz,off) \ _find_next_zero_bit_le(p,sz,off) +#define ext2_find_next_bit(p, sz, off) \ + _find_next_bit_le(p, sz, off) /* * Minix is defined to use little-endian byte ordering. diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 6c1c968b2987..759a97b56eed 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -94,6 +94,14 @@ # endif #endif +#if defined(CONFIG_CPU_FEROCEON) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE feroceon +# endif +#endif + #if defined(CONFIG_CPU_V6) //# ifdef _CACHE # define MULTI_CACHE 1 diff --git a/include/asm-arm/fpstate.h b/include/asm-arm/fpstate.h index f31cda5a55ee..392eb5332323 100644 --- a/include/asm-arm/fpstate.h +++ b/include/asm-arm/fpstate.h @@ -17,14 +17,18 @@ /* * VFP storage area has: * - FPEXC, FPSCR, FPINST and FPINST2. - * - 16 double precision data registers - * - an implementation-dependant word of state for FLDMX/FSTMX + * - 16 or 32 double precision data registers + * - an implementation-dependant word of state for FLDMX/FSTMX (pre-ARMv6) * * FPEXC will always be non-zero once the VFP has been used in this process. */ struct vfp_hard_struct { +#ifdef CONFIG_VFPv3 + __u64 fpregs[32]; +#else __u64 fpregs[16]; +#endif #if __LINUX_ARM_ARCH__ < 6 __u32 fpmx_state; #endif @@ -35,6 +39,7 @@ struct vfp_hard_struct { */ __u32 fpinst; __u32 fpinst2; + #ifdef CONFIG_SMP __u32 cpu; #endif diff --git a/include/asm-arm/kprobes.h b/include/asm-arm/kprobes.h new file mode 100644 index 000000000000..4e7bd32288ae --- /dev/null +++ b/include/asm-arm/kprobes.h @@ -0,0 +1,79 @@ +/* + * include/asm-arm/kprobes.h + * + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _ARM_KPROBES_H +#define _ARM_KPROBES_H + +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/percpu.h> + +#define ARCH_SUPPORTS_KRETPROBES +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 2 +#define MAX_STACK_SIZE 64 /* 32 would probably be OK */ + +/* + * This undefined instruction must be unique and + * reserved solely for kprobes' use. + */ +#define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8 + +#define regs_return_value(regs) ((regs)->ARM_r0) +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +typedef u32 kprobe_opcode_t; + +struct kprobe; +typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); + +/* Architecture specific copy of original instruction. */ +struct arch_specific_insn { + kprobe_opcode_t *insn; + kprobe_insn_handler_t *insn_handler; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + struct prev_kprobe prev_kprobe; + struct pt_regs jprobe_saved_regs; + char jprobes_stack[MAX_STACK_SIZE]; +}; + +void arch_remove_kprobe(struct kprobe *); + +int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr); +int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); +int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); + +enum kprobe_insn { + INSN_REJECTED, + INSN_GOOD, + INSN_GOOD_NO_SLOT +}; + +enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, + struct arch_specific_insn *); +void __init arm_kprobe_decode_init(void); + +#endif /* _ARM_KPROBES_H */ diff --git a/include/asm-arm/plat-s3c24xx/dma.h b/include/asm-arm/plat-s3c24xx/dma.h index 2c59406435e5..c78efe316fc8 100644 --- a/include/asm-arm/plat-s3c24xx/dma.h +++ b/include/asm-arm/plat-s3c24xx/dma.h @@ -32,6 +32,7 @@ struct s3c24xx_dma_map { struct s3c24xx_dma_addr hw_addr; unsigned long channels[S3C2410_DMA_CHANNELS]; + unsigned long channels_rx[S3C2410_DMA_CHANNELS]; }; struct s3c24xx_dma_selection { @@ -41,6 +42,10 @@ struct s3c24xx_dma_selection { void (*select)(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map); + + void (*direction)(struct s3c2410_dma_chan *chan, + struct s3c24xx_dma_map *map, + enum s3c2410_dmasrc dir); }; extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel); diff --git a/include/asm-arm/plat-s3c24xx/irq.h b/include/asm-arm/plat-s3c24xx/irq.h index 8af6d9579b31..45746a995343 100644 --- a/include/asm-arm/plat-s3c24xx/irq.h +++ b/include/asm-arm/plat-s3c24xx/irq.h @@ -15,7 +15,9 @@ #define EXTINT_OFF (IRQ_EINT4 - 4) +/* these are exported for arch/arm/mach-* usage */ extern struct irq_chip s3c_irq_level_chip; +extern struct irq_chip s3c_irq_chip; static inline void s3c_irqsub_mask(unsigned int irqno, unsigned int parentbit, diff --git a/include/asm-arm/plat-s3c24xx/regs-s3c2412-iis.h b/include/asm-arm/plat-s3c24xx/regs-s3c2412-iis.h new file mode 100644 index 000000000000..25d4058bcfed --- /dev/null +++ b/include/asm-arm/plat-s3c24xx/regs-s3c2412-iis.h @@ -0,0 +1,72 @@ +/* linux/include/asm-arm/plat-s3c24xx/regs-s3c2412-iis.h + * + * Copyright 2007 Simtec Electronics <linux@simtec.co.uk> + * http://armlinux.simtec.co.uk/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * S3C2412 IIS register definition +*/ + +#ifndef __ASM_ARCH_REGS_S3C2412_IIS_H +#define __ASM_ARCH_REGS_S3C2412_IIS_H + +#define S3C2412_IISCON (0x00) +#define S3C2412_IISMOD (0x04) +#define S3C2412_IISFIC (0x08) +#define S3C2412_IISPSR (0x0C) +#define S3C2412_IISTXD (0x10) +#define S3C2412_IISRXD (0x14) + +#define S3C2412_IISCON_LRINDEX (1 << 11) +#define S3C2412_IISCON_TXFIFO_EMPTY (1 << 10) +#define S3C2412_IISCON_RXFIFO_EMPTY (1 << 9) +#define S3C2412_IISCON_TXFIFO_FULL (1 << 8) +#define S3C2412_IISCON_RXFIFO_FULL (1 << 7) +#define S3C2412_IISCON_TXDMA_PAUSE (1 << 6) +#define S3C2412_IISCON_RXDMA_PAUSE (1 << 5) +#define S3C2412_IISCON_TXCH_PAUSE (1 << 4) +#define S3C2412_IISCON_RXCH_PAUSE (1 << 3) +#define S3C2412_IISCON_TXDMA_ACTIVE (1 << 2) +#define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) +#define S3C2412_IISCON_IIS_ACTIVE (1 << 0) + +#define S3C2412_IISMOD_MASTER_INTERNAL (0 << 10) +#define S3C2412_IISMOD_MASTER_EXTERNAL (1 << 10) +#define S3C2412_IISMOD_SLAVE (2 << 10) +#define S3C2412_IISMOD_MASTER_MASK (3 << 10) +#define S3C2412_IISMOD_MODE_TXONLY (0 << 8) +#define S3C2412_IISMOD_MODE_RXONLY (1 << 8) +#define S3C2412_IISMOD_MODE_TXRX (2 << 8) +#define S3C2412_IISMOD_MODE_MASK (3 << 8) +#define S3C2412_IISMOD_LR_LLOW (0 << 7) +#define S3C2412_IISMOD_LR_RLOW (1 << 7) +#define S3C2412_IISMOD_SDF_IIS (0 << 5) +#define S3C2412_IISMOD_SDF_MSB (0 << 5) +#define S3C2412_IISMOD_SDF_LSB (0 << 5) +#define S3C2412_IISMOD_SDF_MASK (3 << 5) +#define S3C2412_IISMOD_RCLK_256FS (0 << 3) +#define S3C2412_IISMOD_RCLK_512FS (1 << 3) +#define S3C2412_IISMOD_RCLK_384FS (2 << 3) +#define S3C2412_IISMOD_RCLK_768FS (3 << 3) +#define S3C2412_IISMOD_RCLK_MASK (3 << 3) +#define S3C2412_IISMOD_BCLK_32FS (0 << 1) +#define S3C2412_IISMOD_BCLK_48FS (1 << 1) +#define S3C2412_IISMOD_BCLK_16FS (2 << 1) +#define S3C2412_IISMOD_BCLK_24FS (3 << 1) +#define S3C2412_IISMOD_BCLK_MASK (3 << 1) +#define S3C2412_IISMOD_8BIT (1 << 0) + +#define S3C2412_IISPSR_PSREN (1 << 15) + +#define S3C2412_IISFIC_TXFLUSH (1 << 15) +#define S3C2412_IISFIC_RXFLUSH (1 << 7) +#define S3C2412_IISFIC_TXCOUNT(x) (((x) >> 8) & 0xf) +#define S3C2412_IISFIC_RXCOUNT(x) (((x) >> 0) & 0xf) + + + +#endif /* __ASM_ARCH_REGS_S3C2412_IIS_H */ + diff --git a/include/asm-arm/plat-s3c24xx/regs-spi.h b/include/asm-arm/plat-s3c24xx/regs-spi.h index 4a499a138256..ea565b007d04 100644 --- a/include/asm-arm/plat-s3c24xx/regs-spi.h +++ b/include/asm-arm/plat-s3c24xx/regs-spi.h @@ -17,6 +17,21 @@ #define S3C2410_SPCON (0x00) +#define S3C2412_SPCON_RXFIFO_RB2 (0<<14) +#define S3C2412_SPCON_RXFIFO_RB4 (1<<14) +#define S3C2412_SPCON_RXFIFO_RB12 (2<<14) +#define S3C2412_SPCON_RXFIFO_RB14 (3<<14) +#define S3C2412_SPCON_TXFIFO_RB2 (0<<12) +#define S3C2412_SPCON_TXFIFO_RB4 (1<<12) +#define S3C2412_SPCON_TXFIFO_RB12 (2<<12) +#define S3C2412_SPCON_TXFIFO_RB14 (3<<12) +#define S3C2412_SPCON_RXFIFO_RESET (1<<11) /* RxFIFO reset */ +#define S3C2412_SPCON_TXFIFO_RESET (1<<10) /* TxFIFO reset */ +#define S3C2412_SPCON_RXFIFO_EN (1<<9) /* RxFIFO Enable */ +#define S3C2412_SPCON_TXFIFO_EN (1<<8) /* TxFIFO Enable */ + +#define S3C2412_SPCON_DIRC_RX (1<<7) + #define S3C2410_SPCON_SMOD_DMA (2<<5) /* DMA mode */ #define S3C2410_SPCON_SMOD_INT (1<<5) /* interrupt mode */ #define S3C2410_SPCON_SMOD_POLL (0<<5) /* polling mode */ @@ -34,10 +49,19 @@ #define S3C2410_SPSTA (0x04) +#define S3C2412_SPSTA_RXFIFO_AE (1<<11) +#define S3C2412_SPSTA_TXFIFO_AE (1<<10) +#define S3C2412_SPSTA_RXFIFO_ERROR (1<<9) +#define S3C2412_SPSTA_TXFIFO_ERROR (1<<8) +#define S3C2412_SPSTA_RXFIFO_FIFO (1<<7) +#define S3C2412_SPSTA_RXFIFO_EMPTY (1<<6) +#define S3C2412_SPSTA_TXFIFO_NFULL (1<<5) +#define S3C2412_SPSTA_TXFIFO_EMPTY (1<<4) + #define S3C2410_SPSTA_DCOL (1<<2) /* Data Collision Error */ #define S3C2410_SPSTA_MULD (1<<1) /* Multi Master Error */ #define S3C2410_SPSTA_READY (1<<0) /* Data Tx/Rx ready */ - +#define S3C2412_SPSTA_READY_ORG (1<<3) #define S3C2410_SPPIN (0x08) @@ -46,9 +70,13 @@ #define S3C2400_SPPIN_nCS (1<<1) /* SPI Card Select */ #define S3C2410_SPPIN_KEEP (1<<0) /* Master Out keep */ - #define S3C2410_SPPRE (0x0C) #define S3C2410_SPTDAT (0x10) #define S3C2410_SPRDAT (0x14) +#define S3C2412_TXFIFO (0x18) +#define S3C2412_RXFIFO (0x18) +#define S3C2412_SPFIC (0x24) + + #endif /* __ASM_ARCH_REGS_SPI_H */ diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h index 5599d4e5e708..a4ce457199d3 100644 --- a/include/asm-arm/proc-fns.h +++ b/include/asm-arm/proc-fns.h @@ -185,6 +185,14 @@ # define CPU_NAME cpu_xsc3 # endif # endif +# ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_feroceon +# endif +# endif # ifdef CONFIG_CPU_V6 # ifdef CPU_NAME # undef MULTI_CPU diff --git a/include/asm-arm/traps.h b/include/asm-arm/traps.h index d4f34dc83eb0..f1541afcf85c 100644 --- a/include/asm-arm/traps.h +++ b/include/asm-arm/traps.h @@ -15,4 +15,13 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); +static inline int in_exception_text(unsigned long ptr) +{ + extern char __exception_text_start[]; + extern char __exception_text_end[]; + + return ptr >= (unsigned long)&__exception_text_start && + ptr < (unsigned long)&__exception_text_end; +} + #endif diff --git a/include/asm-arm/vfp.h b/include/asm-arm/vfp.h index bd6be9d7f772..5f9a2cb3d452 100644 --- a/include/asm-arm/vfp.h +++ b/include/asm-arm/vfp.h @@ -7,7 +7,11 @@ #define FPSID cr0 #define FPSCR cr1 +#define MVFR1 cr6 +#define MVFR0 cr7 #define FPEXC cr8 +#define FPINST cr9 +#define FPINST2 cr10 /* FPSID bits */ #define FPSID_IMPLEMENTER_BIT (24) @@ -28,6 +32,19 @@ /* FPEXC bits */ #define FPEXC_EX (1 << 31) #define FPEXC_EN (1 << 30) +#define FPEXC_DEX (1 << 29) +#define FPEXC_FP2V (1 << 28) +#define FPEXC_VV (1 << 27) +#define FPEXC_TFV (1 << 26) +#define FPEXC_LENGTH_BIT (8) +#define FPEXC_LENGTH_MASK (7 << FPEXC_LENGTH_BIT) +#define FPEXC_IDF (1 << 7) +#define FPEXC_IXF (1 << 4) +#define FPEXC_UFF (1 << 3) +#define FPEXC_OFF (1 << 2) +#define FPEXC_DZF (1 << 1) +#define FPEXC_IOF (1 << 0) +#define FPEXC_TRAP_MASK (FPEXC_IDF|FPEXC_IXF|FPEXC_UFF|FPEXC_OFF|FPEXC_DZF|FPEXC_IOF) /* FPSCR bits */ #define FPSCR_DEFAULT_NAN (1<<25) @@ -55,20 +72,9 @@ #define FPSCR_IXC (1<<4) #define FPSCR_IDC (1<<7) -/* - * VFP9-S specific. - */ -#define FPINST cr9 -#define FPINST2 cr10 - -/* FPEXC bits */ -#define FPEXC_FPV2 (1<<28) -#define FPEXC_LENGTH_BIT (8) -#define FPEXC_LENGTH_MASK (7 << FPEXC_LENGTH_BIT) -#define FPEXC_INV (1 << 7) -#define FPEXC_UFC (1 << 3) -#define FPEXC_OFC (1 << 2) -#define FPEXC_IOC (1 << 0) +/* MVFR0 bits */ +#define MVFR0_A_SIMD_BIT (0) +#define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT) /* Bit patterns for decoding the packaged operation descriptors */ #define VFPOPDESC_LENGTH_BIT (9) diff --git a/include/asm-arm/vfpmacros.h b/include/asm-arm/vfpmacros.h index 27fe028b4e72..cccb3892e73c 100644 --- a/include/asm-arm/vfpmacros.h +++ b/include/asm-arm/vfpmacros.h @@ -15,19 +15,33 @@ .endm @ read all the working registers back into the VFP - .macro VFPFLDMIA, base + .macro VFPFLDMIA, base, tmp #if __LINUX_ARM_ARCH__ < 6 LDC p11, cr0, [\base],#33*4 @ FLDMIAX \base!, {d0-d15} #else LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15} #endif +#ifdef CONFIG_VFPv3 + VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 + and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field + cmp \tmp, #2 @ 32 x 64bit registers? + ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addne \base, \base, #32*4 @ step over unused register space +#endif .endm @ write all the working registers out of the VFP - .macro VFPFSTMIA, base + .macro VFPFSTMIA, base, tmp #if __LINUX_ARM_ARCH__ < 6 STC p11, cr0, [\base],#33*4 @ FSTMIAX \base!, {d0-d15} #else STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15} #endif +#ifdef CONFIG_VFPv3 + VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 + and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field + cmp \tmp, #2 @ 32 x 64bit registers? + stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addne \base, \base, #32*4 @ step over unused register space +#endif .endm diff --git a/include/asm-avr32/arch-at32ap/at32ap7000.h b/include/asm-avr32/arch-at32ap/at32ap700x.h index 3914d7b94ff4..99684d6f3967 100644 --- a/include/asm-avr32/arch-at32ap/at32ap7000.h +++ b/include/asm-avr32/arch-at32ap/at32ap700x.h @@ -7,8 +7,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __ASM_ARCH_AT32AP7000_H__ -#define __ASM_ARCH_AT32AP7000_H__ +#ifndef __ASM_ARCH_AT32AP700X_H__ +#define __ASM_ARCH_AT32AP700X_H__ #define GPIO_PERIPH_A 0 #define GPIO_PERIPH_B 1 @@ -32,4 +32,4 @@ #define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) #define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) -#endif /* __ASM_ARCH_AT32AP7000_H__ */ +#endif /* __ASM_ARCH_AT32AP700X_H__ */ diff --git a/include/asm-avr32/arch-at32ap/cpu.h b/include/asm-avr32/arch-at32ap/cpu.h index a762f42cbb71..44d0bfa1f409 100644 --- a/include/asm-avr32/arch-at32ap/cpu.h +++ b/include/asm-avr32/arch-at32ap/cpu.h @@ -14,7 +14,7 @@ * Only AT32AP7000 is defined for now. We can identify the specific * chip at runtime, but I'm not sure if it's really worth it. */ -#ifdef CONFIG_CPU_AT32AP7000 +#ifdef CONFIG_CPU_AT32AP700X # define cpu_is_at32ap7000() (1) #else # define cpu_is_at32ap7000() (0) @@ -30,5 +30,6 @@ #define cpu_is_at91sam9261() (0) #define cpu_is_at91sam9263() (0) #define cpu_is_at91sam9rl() (0) +#define cpu_is_at91cap9() (0) #endif /* __ASM_ARCH_CPU_H */ diff --git a/include/asm-avr32/arch-at32ap/io.h b/include/asm-avr32/arch-at32ap/io.h index ee59e401f041..4ec6abc68ea3 100644 --- a/include/asm-avr32/arch-at32ap/io.h +++ b/include/asm-avr32/arch-at32ap/io.h @@ -4,7 +4,7 @@ /* For "bizarre" halfword swapping */ #include <linux/byteorder/swabb.h> -#if defined(CONFIG_AP7000_32_BIT_SMC) +#if defined(CONFIG_AP700X_32_BIT_SMC) # define __swizzle_addr_b(addr) (addr ^ 3UL) # define __swizzle_addr_w(addr) (addr ^ 2UL) # define __swizzle_addr_l(addr) (addr) @@ -14,7 +14,7 @@ # define __mem_ioswabb(a, x) (x) # define __mem_ioswabw(a, x) swab16(x) # define __mem_ioswabl(a, x) swab32(x) -#elif defined(CONFIG_AP7000_16_BIT_SMC) +#elif defined(CONFIG_AP700X_16_BIT_SMC) # define __swizzle_addr_b(addr) (addr ^ 1UL) # define __swizzle_addr_w(addr) (addr) # define __swizzle_addr_l(addr) (addr) diff --git a/include/asm-avr32/irq.h b/include/asm-avr32/irq.h index 83e6549d7783..9315724c0596 100644 --- a/include/asm-avr32/irq.h +++ b/include/asm-avr32/irq.h @@ -11,4 +11,9 @@ #define irq_canonicalize(i) (i) +#ifndef __ASSEMBLER__ +int nmi_enable(void); +void nmi_disable(void); +#endif + #endif /* __ASM_AVR32_IOCTLS_H */ diff --git a/include/asm-avr32/kdebug.h b/include/asm-avr32/kdebug.h index fd7e99046b2f..ca4f9542365a 100644 --- a/include/asm-avr32/kdebug.h +++ b/include/asm-avr32/kdebug.h @@ -5,6 +5,7 @@ enum die_val { DIE_BREAKPOINT, DIE_SSTEP, + DIE_NMI, }; #endif /* __ASM_AVR32_KDEBUG_H */ diff --git a/include/asm-avr32/ocd.h b/include/asm-avr32/ocd.h index 996405e0393f..6bef09490235 100644 --- a/include/asm-avr32/ocd.h +++ b/include/asm-avr32/ocd.h @@ -533,6 +533,11 @@ static inline void __ocd_write(unsigned int reg, unsigned long value) #define ocd_read(reg) __ocd_read(OCD_##reg) #define ocd_write(reg, value) __ocd_write(OCD_##reg, value) +struct task_struct; + +void ocd_enable(struct task_struct *child); +void ocd_disable(struct task_struct *child); + #endif /* !__ASSEMBLER__ */ #endif /* __ASM_AVR32_OCD_H */ diff --git a/include/asm-avr32/processor.h b/include/asm-avr32/processor.h index a52576b25afe..4212551c1cd9 100644 --- a/include/asm-avr32/processor.h +++ b/include/asm-avr32/processor.h @@ -57,11 +57,25 @@ struct avr32_cpuinfo { unsigned short cpu_revision; enum tlb_config tlb_config; unsigned long features; + u32 device_id; struct cache_info icache; struct cache_info dcache; }; +static inline unsigned int avr32_get_manufacturer_id(struct avr32_cpuinfo *cpu) +{ + return (cpu->device_id >> 1) & 0x7f; +} +static inline unsigned int avr32_get_product_number(struct avr32_cpuinfo *cpu) +{ + return (cpu->device_id >> 12) & 0xffff; +} +static inline unsigned int avr32_get_chip_revision(struct avr32_cpuinfo *cpu) +{ + return (cpu->device_id >> 28) & 0x0f; +} + extern struct avr32_cpuinfo boot_cpu_data; #ifdef CONFIG_SMP diff --git a/include/asm-avr32/ptrace.h b/include/asm-avr32/ptrace.h index 8c5dba5e33df..9e2d44f4e0fe 100644 --- a/include/asm-avr32/ptrace.h +++ b/include/asm-avr32/ptrace.h @@ -121,7 +121,15 @@ struct pt_regs { }; #ifdef __KERNEL__ -# define user_mode(regs) (((regs)->sr & MODE_MASK) == MODE_USER) + +#include <asm/ocd.h> + +#define arch_ptrace_attach(child) ocd_enable(child) + +#define user_mode(regs) (((regs)->sr & MODE_MASK) == MODE_USER) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) + extern void show_regs (struct pt_regs *); static __inline__ int valid_user_regs(struct pt_regs *regs) @@ -141,9 +149,6 @@ static __inline__ int valid_user_regs(struct pt_regs *regs) return 0; } -#define instruction_pointer(regs) ((regs)->pc) - -#define profile_pc(regs) instruction_pointer(regs) #endif /* __KERNEL__ */ diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h index b0828d43e110..ea3070ff13a5 100644 --- a/include/asm-avr32/setup.h +++ b/include/asm-avr32/setup.h @@ -110,7 +110,7 @@ struct tagtable { int (*parse)(struct tag *); }; -#define __tag __attribute_used__ __attribute__((__section__(".taglist.init"))) +#define __tag __used __attribute__((__section__(".taglist.init"))) #define __tagtable(tag, fn) \ static struct tagtable __tagtable_##fn __tag = { tag, fn } diff --git a/include/asm-avr32/thread_info.h b/include/asm-avr32/thread_info.h index 184b574289b4..07049f6c0d41 100644 --- a/include/asm-avr32/thread_info.h +++ b/include/asm-avr32/thread_info.h @@ -88,6 +88,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_MEMDIE 6 #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ +#define TIF_DEBUG 30 /* debugging enabled */ #define TIF_USERSPACE 31 /* true if FS sets userspace */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index 39bdd86871cf..6ae0619d7696 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h @@ -51,7 +51,7 @@ extern unsigned long sclk_to_usecs(unsigned long sclk); extern unsigned long usecs_to_sclk(unsigned long usecs); extern void dump_bfin_process(struct pt_regs *regs); -extern void dump_bfin_mem(void *retaddr); +extern void dump_bfin_mem(struct pt_regs *regs); extern void dump_bfin_trace_buffer(void); extern int init_arch_irq(void); diff --git a/include/asm-blackfin/cplb-mpu.h b/include/asm-blackfin/cplb-mpu.h new file mode 100644 index 000000000000..75c67b99d607 --- /dev/null +++ b/include/asm-blackfin/cplb-mpu.h @@ -0,0 +1,61 @@ +/* + * File: include/asm-blackfin/cplbinit.h + * Based on: + * Author: + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __ASM_BFIN_CPLB_MPU_H +#define __ASM_BFIN_CPLB_MPU_H + +struct cplb_entry { + unsigned long data, addr; +}; + +struct mem_region { + unsigned long start, end; + unsigned long dcplb_data; + unsigned long icplb_data; +}; + +extern struct cplb_entry dcplb_tbl[MAX_CPLBS]; +extern struct cplb_entry icplb_tbl[MAX_CPLBS]; +extern int first_switched_icplb; +extern int first_mask_dcplb; +extern int first_switched_dcplb; + +extern int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot; +extern int nr_cplb_flush; + +extern int page_mask_order; +extern int page_mask_nelts; + +extern unsigned long *current_rwx_mask; + +extern void flush_switched_cplbs(void); +extern void set_mask_dcplbs(unsigned long *); + +extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *); + +#endif /* __ASM_BFIN_CPLB_MPU_H */ diff --git a/include/asm-blackfin/cplb.h b/include/asm-blackfin/cplb.h index 06828d77a58f..654375c2b746 100644 --- a/include/asm-blackfin/cplb.h +++ b/include/asm-blackfin/cplb.h @@ -65,7 +65,11 @@ #define SIZE_1M 0x00100000 /* 1M */ #define SIZE_4M 0x00400000 /* 4M */ +#ifdef CONFIG_MPU +#define MAX_CPLBS 16 +#else #define MAX_CPLBS (16 * 2) +#endif #define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M) diff --git a/include/asm-blackfin/cplbinit.h b/include/asm-blackfin/cplbinit.h index c4d0596e8e9f..0eb1c1b685a7 100644 --- a/include/asm-blackfin/cplbinit.h +++ b/include/asm-blackfin/cplbinit.h @@ -33,6 +33,12 @@ #include <asm/blackfin.h> #include <asm/cplb.h> +#ifdef CONFIG_MPU + +#include <asm/cplb-mpu.h> + +#else + #define INITIAL_T 0x1 #define SWITCH_T 0x2 #define I_CPLB 0x4 @@ -79,6 +85,8 @@ extern u_long ipdt_swapcount_table[]; extern u_long dpdt_swapcount_table[]; #endif +#endif /* CONFIG_MPU */ + extern unsigned long reserved_mem_dcache_on; extern unsigned long reserved_mem_icache_on; diff --git a/include/asm-blackfin/dma.h b/include/asm-blackfin/dma.h index b469505af364..5abaa2cee8db 100644 --- a/include/asm-blackfin/dma.h +++ b/include/asm-blackfin/dma.h @@ -76,6 +76,9 @@ enum dma_chan_status { #define INTR_ON_BUF 2 #define INTR_ON_ROW 3 +#define DMA_NOSYNC_KEEP_DMA_BUF 0 +#define DMA_SYNC_RESTART 1 + struct dmasg { unsigned long next_desc_addr; unsigned long start_addr; @@ -157,7 +160,8 @@ void set_dma_y_count(unsigned int channel, unsigned short y_count); void set_dma_y_modify(unsigned int channel, short y_modify); void set_dma_config(unsigned int channel, unsigned short config); unsigned short set_bfin_dma_config(char direction, char flow_mode, - char intr_mode, char dma_mode, char width); + char intr_mode, char dma_mode, char width, + char syncmode); void set_dma_curr_addr(unsigned int channel, unsigned long addr); /* get curr status for polling */ diff --git a/include/asm-blackfin/gpio.h b/include/asm-blackfin/gpio.h index 33ce98ef7e0f..d0426c108262 100644 --- a/include/asm-blackfin/gpio.h +++ b/include/asm-blackfin/gpio.h @@ -7,7 +7,7 @@ * Description: * * Modified: - * Copyright 2004-2006 Analog Devices Inc. + * Copyright 2004-2008 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * @@ -304,39 +304,39 @@ **************************************************************/ #ifndef BF548_FAMILY -void set_gpio_dir(unsigned short, unsigned short); -void set_gpio_inen(unsigned short, unsigned short); -void set_gpio_polar(unsigned short, unsigned short); -void set_gpio_edge(unsigned short, unsigned short); -void set_gpio_both(unsigned short, unsigned short); -void set_gpio_data(unsigned short, unsigned short); -void set_gpio_maska(unsigned short, unsigned short); -void set_gpio_maskb(unsigned short, unsigned short); -void set_gpio_toggle(unsigned short); -void set_gpiop_dir(unsigned short, unsigned short); -void set_gpiop_inen(unsigned short, unsigned short); -void set_gpiop_polar(unsigned short, unsigned short); -void set_gpiop_edge(unsigned short, unsigned short); -void set_gpiop_both(unsigned short, unsigned short); -void set_gpiop_data(unsigned short, unsigned short); -void set_gpiop_maska(unsigned short, unsigned short); -void set_gpiop_maskb(unsigned short, unsigned short); -unsigned short get_gpio_dir(unsigned short); -unsigned short get_gpio_inen(unsigned short); -unsigned short get_gpio_polar(unsigned short); -unsigned short get_gpio_edge(unsigned short); -unsigned short get_gpio_both(unsigned short); -unsigned short get_gpio_maska(unsigned short); -unsigned short get_gpio_maskb(unsigned short); -unsigned short get_gpio_data(unsigned short); -unsigned short get_gpiop_dir(unsigned short); -unsigned short get_gpiop_inen(unsigned short); -unsigned short get_gpiop_polar(unsigned short); -unsigned short get_gpiop_edge(unsigned short); -unsigned short get_gpiop_both(unsigned short); -unsigned short get_gpiop_maska(unsigned short); -unsigned short get_gpiop_maskb(unsigned short); -unsigned short get_gpiop_data(unsigned short); +void set_gpio_dir(unsigned, unsigned short); +void set_gpio_inen(unsigned, unsigned short); +void set_gpio_polar(unsigned, unsigned short); +void set_gpio_edge(unsigned, unsigned short); +void set_gpio_both(unsigned, unsigned short); +void set_gpio_data(unsigned, unsigned short); +void set_gpio_maska(unsigned, unsigned short); +void set_gpio_maskb(unsigned, unsigned short); +void set_gpio_toggle(unsigned); +void set_gpiop_dir(unsigned, unsigned short); +void set_gpiop_inen(unsigned, unsigned short); +void set_gpiop_polar(unsigned, unsigned short); +void set_gpiop_edge(unsigned, unsigned short); +void set_gpiop_both(unsigned, unsigned short); +void set_gpiop_data(unsigned, unsigned short); +void set_gpiop_maska(unsigned, unsigned short); +void set_gpiop_maskb(unsigned, unsigned short); +unsigned short get_gpio_dir(unsigned); +unsigned short get_gpio_inen(unsigned); +unsigned short get_gpio_polar(unsigned); +unsigned short get_gpio_edge(unsigned); +unsigned short get_gpio_both(unsigned); +unsigned short get_gpio_maska(unsigned); +unsigned short get_gpio_maskb(unsigned); +unsigned short get_gpio_data(unsigned); +unsigned short get_gpiop_dir(unsigned); +unsigned short get_gpiop_inen(unsigned); +unsigned short get_gpiop_polar(unsigned); +unsigned short get_gpiop_edge(unsigned); +unsigned short get_gpiop_both(unsigned); +unsigned short get_gpiop_maska(unsigned); +unsigned short get_gpiop_maskb(unsigned); +unsigned short get_gpiop_data(unsigned); struct gpio_port_t { unsigned short data; @@ -382,8 +382,8 @@ struct gpio_port_t { #define PM_WAKE_LOW 0x8 #define PM_WAKE_BOTH_EDGES (PM_WAKE_RISING | PM_WAKE_FALLING) -int gpio_pm_wakeup_request(unsigned short gpio, unsigned char type); -void gpio_pm_wakeup_free(unsigned short gpio); +int gpio_pm_wakeup_request(unsigned gpio, unsigned char type); +void gpio_pm_wakeup_free(unsigned gpio); unsigned int gpio_pm_setup(void); void gpio_pm_restore(void); @@ -426,19 +426,19 @@ struct gpio_port_s { * MODIFICATION HISTORY : **************************************************************/ -int gpio_request(unsigned short, const char *); -void gpio_free(unsigned short); +int gpio_request(unsigned, const char *); +void gpio_free(unsigned); -void gpio_set_value(unsigned short gpio, unsigned short arg); -unsigned short gpio_get_value(unsigned short gpio); +void gpio_set_value(unsigned gpio, int arg); +int gpio_get_value(unsigned gpio); #ifndef BF548_FAMILY #define gpio_get_value(gpio) get_gpio_data(gpio) #define gpio_set_value(gpio, value) set_gpio_data(gpio, value) #endif -void gpio_direction_input(unsigned short gpio); -void gpio_direction_output(unsigned short gpio); +int gpio_direction_input(unsigned gpio); +int gpio_direction_output(unsigned gpio, int value); #include <asm-generic/gpio.h> /* cansleep wrappers */ #include <asm/irq.h> diff --git a/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h index 0b867e6a76c4..15dbc21eed8b 100644 --- a/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h @@ -146,7 +146,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart) if (uart->rts_pin >= 0) { gpio_request(uart->rts_pin, DRIVER_NAME); - gpio_direction_output(uart->rts_pin); + gpio_direction_output(uart->rts_pin, 0); } #endif } diff --git a/include/asm-blackfin/mach-bf527/portmux.h b/include/asm-blackfin/mach-bf527/portmux.h index dcf001adc63c..ae4d205bfcf5 100644 --- a/include/asm-blackfin/mach-bf527/portmux.h +++ b/include/asm-blackfin/mach-bf527/portmux.h @@ -1,6 +1,8 @@ #ifndef _MACH_PORTMUX_H_ #define _MACH_PORTMUX_H_ +#define MAX_RESOURCES MAX_BLACKFIN_GPIOS + #define P_PPI0_D0 (P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(0)) #define P_PPI0_D1 (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(0)) #define P_PPI0_D2 (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(0)) diff --git a/include/asm-blackfin/mach-bf533/anomaly.h b/include/asm-blackfin/mach-bf533/anomaly.h index f36ff5af1b91..98209d40abba 100644 --- a/include/asm-blackfin/mach-bf533/anomaly.h +++ b/include/asm-blackfin/mach-bf533/anomaly.h @@ -7,9 +7,7 @@ */ /* This file shoule be up to date with: - * - Revision X, March 23, 2007; ADSP-BF533 Blackfin Processor Anomaly List - * - Revision AB, March 23, 2007; ADSP-BF532 Blackfin Processor Anomaly List - * - Revision W, March 23, 2007; ADSP-BF531 Blackfin Processor Anomaly List + * - Revision B, 12/10/2007; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List */ #ifndef _MACH_ANOMALY_H_ @@ -17,7 +15,7 @@ /* We do not support 0.1 or 0.2 silicon - sorry */ #if __SILICON_REVISION__ < 3 -# error Kernel will not work on BF533 silicon version 0.0, 0.1, or 0.2 +# error will not work on BF533 silicon version 0.0, 0.1, or 0.2 #endif #if defined(__ADSPBF531__) @@ -251,6 +249,12 @@ #define ANOMALY_05000192 (__SILICON_REVISION__ < 3) /* Internal Voltage Regulator may not start up */ #define ANOMALY_05000206 (__SILICON_REVISION__ < 3) +/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ +#define ANOMALY_05000357 (1) +/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */ +#define ANOMALY_05000366 (1) +/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */ +#define ANOMALY_05000371 (1) /* Anomalies that don't exist on this proc */ #define ANOMALY_05000266 (0) diff --git a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h index 69b9f8e120e9..7871d4313f49 100644 --- a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h @@ -111,7 +111,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart) } if (uart->rts_pin >= 0) { gpio_request(uart->rts_pin, DRIVER_NAME); - gpio_direction_input(uart->rts_pin); + gpio_direction_input(uart->rts_pin, 0); } #endif } diff --git a/include/asm-blackfin/mach-bf533/portmux.h b/include/asm-blackfin/mach-bf533/portmux.h index 137f4884acfe..685a2651dcda 100644 --- a/include/asm-blackfin/mach-bf533/portmux.h +++ b/include/asm-blackfin/mach-bf533/portmux.h @@ -1,6 +1,8 @@ #ifndef _MACH_PORTMUX_H_ #define _MACH_PORTMUX_H_ +#define MAX_RESOURCES MAX_BLACKFIN_GPIOS + #define P_PPI0_CLK (P_DONTCARE) #define P_PPI0_FS1 (P_DONTCARE) #define P_PPI0_FS2 (P_DONTCARE) diff --git a/include/asm-blackfin/mach-bf537/anomaly.h b/include/asm-blackfin/mach-bf537/anomaly.h index 2b66ecf489f7..746a794b3119 100644 --- a/include/asm-blackfin/mach-bf537/anomaly.h +++ b/include/asm-blackfin/mach-bf537/anomaly.h @@ -7,9 +7,7 @@ */ /* This file shoule be up to date with: - * - Revision M, March 13, 2007; ADSP-BF537 Blackfin Processor Anomaly List - * - Revision L, March 13, 2007; ADSP-BF536 Blackfin Processor Anomaly List - * - Revision M, March 13, 2007; ADSP-BF534 Blackfin Processor Anomaly List + * - Revision A, 09/04/2007; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List */ #ifndef _MACH_ANOMALY_H_ @@ -17,7 +15,7 @@ /* We do not support 0.1 silicon - sorry */ #if __SILICON_REVISION__ < 2 -# error Kernel will not work on BF537 silicon version 0.0 or 0.1 +# error will not work on BF537 silicon version 0.0 or 0.1 #endif #if defined(__ADSPBF534__) @@ -44,6 +42,8 @@ #define ANOMALY_05000122 (1) /* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */ #define ANOMALY_05000157 (__SILICON_REVISION__ < 2) +/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */ +#define ANOMALY_05000167 (1) /* PPI_DELAY not functional in PPI modes with 0 frame syncs */ #define ANOMALY_05000180 (1) /* Instruction Cache Is Not Functional */ @@ -130,6 +130,12 @@ #define ANOMALY_05000321 (__SILICON_REVISION__ < 3) /* EMAC RMII mode at 10-Base-T speed: RX frames not received properly */ #define ANOMALY_05000322 (1) +/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */ +#define ANOMALY_05000341 (__SILICON_REVISION__ >= 3) +/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ +#define ANOMALY_05000357 (1) +/* DMAs that Go Urgent during Tight Core Writes to External Memory Are Blocked */ +#define ANOMALY_05000359 (1) /* Anomalies that don't exist on this proc */ #define ANOMALY_05000125 (0) diff --git a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h index 6fb328f5186a..86e45c379838 100644 --- a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h @@ -146,7 +146,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart) if (uart->rts_pin >= 0) { gpio_request(uart->rts_pin, DRIVER_NAME); - gpio_direction_output(uart->rts_pin); + gpio_direction_output(uart->rts_pin, 0); } #endif } diff --git a/include/asm-blackfin/mach-bf537/portmux.h b/include/asm-blackfin/mach-bf537/portmux.h index 5a3f7d3bf73d..78fee6e0f237 100644 --- a/include/asm-blackfin/mach-bf537/portmux.h +++ b/include/asm-blackfin/mach-bf537/portmux.h @@ -1,6 +1,8 @@ #ifndef _MACH_PORTMUX_H_ #define _MACH_PORTMUX_H_ +#define MAX_RESOURCES (MAX_BLACKFIN_GPIOS + GPIO_BANKSIZE) /* We additionally handle PORTJ */ + #define P_UART0_TX (P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(0)) #define P_UART0_RX (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(0)) #define P_UART1_TX (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(0)) diff --git a/include/asm-blackfin/mach-bf548/anomaly.h b/include/asm-blackfin/mach-bf548/anomaly.h index c5b63759cdee..850dc12eb7f2 100644 --- a/include/asm-blackfin/mach-bf548/anomaly.h +++ b/include/asm-blackfin/mach-bf548/anomaly.h @@ -7,7 +7,7 @@ */ /* This file shoule be up to date with: - * - Revision C, July 16, 2007; ADSP-BF549 Silicon Anomaly List + * - Revision E, 11/28/2007; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List */ #ifndef _MACH_ANOMALY_H_ @@ -26,47 +26,59 @@ /* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */ #define ANOMALY_05000272 (1) /* False Hardware Error Exception when ISR context is not restored */ -#define ANOMALY_05000281 (1) +#define ANOMALY_05000281 (__SILICON_REVISION__ < 1) /* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */ -#define ANOMALY_05000304 (1) +#define ANOMALY_05000304 (__SILICON_REVISION__ < 1) /* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */ #define ANOMALY_05000310 (1) /* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */ -#define ANOMALY_05000312 (1) +#define ANOMALY_05000312 (__SILICON_REVISION__ < 1) /* TWI Slave Boot Mode Is Not Functional */ -#define ANOMALY_05000324 (1) +#define ANOMALY_05000324 (__SILICON_REVISION__ < 1) /* External FIFO Boot Mode Is Not Functional */ -#define ANOMALY_05000325 (1) +#define ANOMALY_05000325 (__SILICON_REVISION__ < 1) /* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */ -#define ANOMALY_05000327 (1) +#define ANOMALY_05000327 (__SILICON_REVISION__ < 1) /* Incorrect Access of OTP_STATUS During otp_write() Function */ -#define ANOMALY_05000328 (1) +#define ANOMALY_05000328 (__SILICON_REVISION__ < 1) /* Synchronous Burst Flash Boot Mode Is Not Functional */ -#define ANOMALY_05000329 (1) +#define ANOMALY_05000329 (__SILICON_REVISION__ < 1) /* Host DMA Boot Mode Is Not Functional */ -#define ANOMALY_05000330 (1) +#define ANOMALY_05000330 (__SILICON_REVISION__ < 1) /* Inadequate Timing Margins on DDR DQS to DQ and DQM Skew */ -#define ANOMALY_05000334 (1) +#define ANOMALY_05000334 (__SILICON_REVISION__ < 1) /* Inadequate Rotary Debounce Logic Duration */ -#define ANOMALY_05000335 (1) +#define ANOMALY_05000335 (__SILICON_REVISION__ < 1) /* Phantom Interrupt Occurs After First Configuration of Host DMA Port */ -#define ANOMALY_05000336 (1) +#define ANOMALY_05000336 (__SILICON_REVISION__ < 1) /* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */ -#define ANOMALY_05000337 (1) +#define ANOMALY_05000337 (__SILICON_REVISION__ < 1) /* Slave-Mode SPI0 MISO Failure With CPHA = 0 */ -#define ANOMALY_05000338 (1) +#define ANOMALY_05000338 (__SILICON_REVISION__ < 1) /* If Memory Reads Are Enabled on SDH or HOSTDP, Other DMAC1 Peripherals Cannot Read */ -#define ANOMALY_05000340 (1) +#define ANOMALY_05000340 (__SILICON_REVISION__ < 1) /* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */ -#define ANOMALY_05000344 (1) +#define ANOMALY_05000344 (__SILICON_REVISION__ < 1) /* USB Calibration Value Is Not Intialized */ -#define ANOMALY_05000346 (1) +#define ANOMALY_05000346 (__SILICON_REVISION__ < 1) /* Boot ROM Kernel Incorrectly Alters Reset Value of USB Register */ -#define ANOMALY_05000347 (1) +#define ANOMALY_05000347 (__SILICON_REVISION__ < 1) /* Data Lost when Core Reads SDH Data FIFO */ -#define ANOMALY_05000349 (1) +#define ANOMALY_05000349 (__SILICON_REVISION__ < 1) /* PLL Status Register Is Inaccurate */ -#define ANOMALY_05000351 (1) +#define ANOMALY_05000351 (__SILICON_REVISION__ < 1) +/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ +#define ANOMALY_05000357 (1) +/* External Memory Read Access Hangs Core With PLL Bypass */ +#define ANOMALY_05000360 (1) +/* DMAs that Go Urgent during Tight Core Writes to External Memory Are Blocked */ +#define ANOMALY_05000365 (1) +/* Addressing Conflict between Boot ROM and Asynchronous Memory */ +#define ANOMALY_05000369 (1) +/* Mobile DDR Operation Not Functional */ +#define ANOMALY_05000377 (1) +/* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */ +#define ANOMALY_05000378 (1) /* Anomalies that don't exist on this proc */ #define ANOMALY_05000125 (0) diff --git a/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h index f21a1620e6bd..3770aa38ee9f 100644 --- a/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h @@ -186,7 +186,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart) if (uart->rts_pin >= 0) { gpio_request(uart->rts_pin, DRIVER_NAME); - gpio_direction_output(uart->rts_pin); + gpio_direction_output(uart->rts_pin, 0); } #endif } diff --git a/include/asm-blackfin/mach-bf548/cdefBF54x_base.h b/include/asm-blackfin/mach-bf548/cdefBF54x_base.h index aefab3f618c1..19ddcd83c71f 100644 --- a/include/asm-blackfin/mach-bf548/cdefBF54x_base.h +++ b/include/asm-blackfin/mach-bf548/cdefBF54x_base.h @@ -244,39 +244,6 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val) #define bfin_read_TWI0_RCV_DATA16() bfin_read16(TWI0_RCV_DATA16) #define bfin_write_TWI0_RCV_DATA16(val) bfin_write16(TWI0_RCV_DATA16, val) -#define bfin_read_TWI_CLKDIV() bfin_read16(TWI0_CLKDIV) -#define bfin_write_TWI_CLKDIV(val) bfin_write16(TWI0_CLKDIV, val) -#define bfin_read_TWI_CONTROL() bfin_read16(TWI0_CONTROL) -#define bfin_write_TWI_CONTROL(val) bfin_write16(TWI0_CONTROL, val) -#define bfin_read_TWI_SLAVE_CTRL() bfin_read16(TWI0_SLAVE_CTRL) -#define bfin_write_TWI_SLAVE_CTRL(val) bfin_write16(TWI0_SLAVE_CTRL, val) -#define bfin_read_TWI_SLAVE_STAT() bfin_read16(TWI0_SLAVE_STAT) -#define bfin_write_TWI_SLAVE_STAT(val) bfin_write16(TWI0_SLAVE_STAT, val) -#define bfin_read_TWI_SLAVE_ADDR() bfin_read16(TWI0_SLAVE_ADDR) -#define bfin_write_TWI_SLAVE_ADDR(val) bfin_write16(TWI0_SLAVE_ADDR, val) -#define bfin_read_TWI_MASTER_CTL() bfin_read16(TWI0_MASTER_CTRL) -#define bfin_write_TWI_MASTER_CTL(val) bfin_write16(TWI0_MASTER_CTRL, val) -#define bfin_read_TWI_MASTER_STAT() bfin_read16(TWI0_MASTER_STAT) -#define bfin_write_TWI_MASTER_STAT(val) bfin_write16(TWI0_MASTER_STAT, val) -#define bfin_read_TWI_MASTER_ADDR() bfin_read16(TWI0_MASTER_ADDR) -#define bfin_write_TWI_MASTER_ADDR(val) bfin_write16(TWI0_MASTER_ADDR, val) -#define bfin_read_TWI_INT_STAT() bfin_read16(TWI0_INT_STAT) -#define bfin_write_TWI_INT_STAT(val) bfin_write16(TWI0_INT_STAT, val) -#define bfin_read_TWI_INT_MASK() bfin_read16(TWI0_INT_MASK) -#define bfin_write_TWI_INT_MASK(val) bfin_write16(TWI0_INT_MASK, val) -#define bfin_read_TWI_FIFO_CTL() bfin_read16(TWI0_FIFO_CTRL) -#define bfin_write_TWI_FIFO_CTL(val) bfin_write16(TWI0_FIFO_CTRL, val) -#define bfin_read_TWI_FIFO_STAT() bfin_read16(TWI0_FIFO_STAT) -#define bfin_write_TWI_FIFO_STAT(val) bfin_write16(TWI0_FIFO_STAT, val) -#define bfin_read_TWI_XMT_DATA8() bfin_read16(TWI0_XMT_DATA8) -#define bfin_write_TWI_XMT_DATA8(val) bfin_write16(TWI0_XMT_DATA8, val) -#define bfin_read_TWI_XMT_DATA16() bfin_read16(TWI0_XMT_DATA16) -#define bfin_write_TWI_XMT_DATA16(val) bfin_write16(TWI0_XMT_DATA16, val) -#define bfin_read_TWI_RCV_DATA8() bfin_read16(TWI0_RCV_DATA8) -#define bfin_write_TWI_RCV_DATA8(val) bfin_write16(TWI0_RCV_DATA8, val) -#define bfin_read_TWI_RCV_DATA16() bfin_read16(TWI0_RCV_DATA16) -#define bfin_write_TWI_RCV_DATA16(val) bfin_write16(TWI0_RCV_DATA16, val) - /* SPORT0 is not defined in the shared file because it is not available on the ADSP-BF542 and ADSP-BF544 bfin_read_()rocessors */ /* SPORT1 Registers */ diff --git a/include/asm-blackfin/mach-bf548/defBF542.h b/include/asm-blackfin/mach-bf548/defBF542.h index 32d07130200c..a7c809f29ede 100644 --- a/include/asm-blackfin/mach-bf548/defBF542.h +++ b/include/asm-blackfin/mach-bf548/defBF542.h @@ -432,8 +432,8 @@ #define CMD_CRC_FAIL 0x1 /* CMD CRC Fail */ #define DAT_CRC_FAIL 0x2 /* Data CRC Fail */ -#define CMD_TIMEOUT 0x4 /* CMD Time Out */ -#define DAT_TIMEOUT 0x8 /* Data Time Out */ +#define CMD_TIME_OUT 0x4 /* CMD Time Out */ +#define DAT_TIME_OUT 0x8 /* Data Time Out */ #define TX_UNDERRUN 0x10 /* Transmit Underrun */ #define RX_OVERRUN 0x20 /* Receive Overrun */ #define CMD_RESP_END 0x40 /* CMD Response End */ diff --git a/include/asm-blackfin/mach-bf548/defBF548.h b/include/asm-blackfin/mach-bf548/defBF548.h index ecbca952985c..e46f56891e6a 100644 --- a/include/asm-blackfin/mach-bf548/defBF548.h +++ b/include/asm-blackfin/mach-bf548/defBF548.h @@ -1095,8 +1095,8 @@ #define CMD_CRC_FAIL 0x1 /* CMD CRC Fail */ #define DAT_CRC_FAIL 0x2 /* Data CRC Fail */ -#define CMD_TIMEOUT 0x4 /* CMD Time Out */ -#define DAT_TIMEOUT 0x8 /* Data Time Out */ +#define CMD_TIME_OUT 0x4 /* CMD Time Out */ +#define DAT_TIME_OUT 0x8 /* Data Time Out */ #define TX_UNDERRUN 0x10 /* Transmit Underrun */ #define RX_OVERRUN 0x20 /* Receive Overrun */ #define CMD_RESP_END 0x40 /* CMD Response End */ diff --git a/include/asm-blackfin/mach-bf548/defBF54x_base.h b/include/asm-blackfin/mach-bf548/defBF54x_base.h index 319a48590c9c..08f90c21fe8a 100644 --- a/include/asm-blackfin/mach-bf548/defBF54x_base.h +++ b/include/asm-blackfin/mach-bf548/defBF54x_base.h @@ -1772,17 +1772,36 @@ #define TRP 0x3c0000 /* Pre charge-to-active command period */ #define TRAS 0x3c00000 /* Min Active-to-pre charge time */ #define TRC 0x3c000000 /* Active-to-active time */ +#define DDR_TRAS(x) ((x<<22)&TRAS) /* DDR tRAS = (1~15) cycles */ +#define DDR_TRP(x) ((x<<18)&TRP) /* DDR tRP = (1~15) cycles */ +#define DDR_TRC(x) ((x<<26)&TRC) /* DDR tRC = (1~15) cycles */ +#define DDR_TRFC(x) ((x<<14)&TRFC) /* DDR tRFC = (1~15) cycles */ +#define DDR_TREFI(x) (x&TREFI) /* DDR tRFC = (1~15) cycles */ /* Bit masks for EBIU_DDRCTL1 */ #define TRCD 0xf /* Active-to-Read/write delay */ -#define MRD 0xf0 /* Mode register set to active */ +#define TMRD 0xf0 /* Mode register set to active */ #define TWR 0x300 /* Write Recovery time */ #define DDRDATWIDTH 0x3000 /* DDR data width */ #define EXTBANKS 0xc000 /* External banks */ #define DDRDEVWIDTH 0x30000 /* DDR device width */ #define DDRDEVSIZE 0xc0000 /* DDR device size */ -#define TWWTR 0xf0000000 /* Write-to-read delay */ +#define TWTR 0xf0000000 /* Write-to-read delay */ +#define DDR_TWTR(x) ((x<<28)&TWTR) /* DDR tWTR = (1~15) cycles */ +#define DDR_TMRD(x) ((x<<4)&TMRD) /* DDR tMRD = (1~15) cycles */ +#define DDR_TWR(x) ((x<<8)&TWR) /* DDR tWR = (1~15) cycles */ +#define DDR_TRCD(x) (x&TRCD) /* DDR tRCD = (1~15) cycles */ +#define DDR_DATWIDTH 0x2000 /* DDR data width */ +#define EXTBANK_1 0 /* 1 external bank */ +#define EXTBANK_2 0x4000 /* 2 external banks */ +#define DEVSZ_64 0x40000 /* DDR External Bank Size = 64MB */ +#define DEVSZ_128 0x80000 /* DDR External Bank Size = 128MB */ +#define DEVSZ_256 0xc0000 /* DDR External Bank Size = 256MB */ +#define DEVSZ_512 0 /* DDR External Bank Size = 512MB */ +#define DEVWD_4 0 /* DDR Device Width = 4 Bits */ +#define DEVWD_8 0x10000 /* DDR Device Width = 8 Bits */ +#define DEVWD_16 0x20000 /* DDR Device Width = 16 Bits */ /* Bit masks for EBIU_DDRCTL2 */ @@ -1790,6 +1809,10 @@ #define CASLATENCY 0x70 /* CAS latency */ #define DLLRESET 0x100 /* DLL Reset */ #define REGE 0x1000 /* Register mode enable */ +#define CL_1_5 0x50 /* DDR CAS Latency = 1.5 cycles */ +#define CL_2 0x20 /* DDR CAS Latency = 2 cycles */ +#define CL_2_5 0x60 /* DDR CAS Latency = 2.5 cycles */ +#define CL_3 0x30 /* DDR CAS Latency = 3 cycles */ /* Bit masks for EBIU_DDRCTL3 */ @@ -2257,6 +2280,10 @@ #define CSEL 0x30 /* Core Select */ #define SSEL 0xf /* System Select */ +#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */ +#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */ +#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */ +#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */ /* Bit masks for PLL_CTL */ diff --git a/include/asm-blackfin/mach-bf548/irq.h b/include/asm-blackfin/mach-bf548/irq.h index 9fb7bc5399a8..c34507a3f1df 100644 --- a/include/asm-blackfin/mach-bf548/irq.h +++ b/include/asm-blackfin/mach-bf548/irq.h @@ -88,7 +88,7 @@ Events (highest priority) EMU 0 #define IRQ_PINT1 BFIN_IRQ(20) /* PINT1 Interrupt */ #define IRQ_MDMAS0 BFIN_IRQ(21) /* MDMA Stream 0 Interrupt */ #define IRQ_MDMAS1 BFIN_IRQ(22) /* MDMA Stream 1 Interrupt */ -#define IRQ_WATCHDOG BFIN_IRQ(23) /* Watchdog Interrupt */ +#define IRQ_WATCH BFIN_IRQ(23) /* Watchdog Interrupt */ #define IRQ_DMAC1_ERROR BFIN_IRQ(24) /* DMAC1 Status (Error) Interrupt */ #define IRQ_SPORT2_ERROR BFIN_IRQ(25) /* SPORT2 Error Interrupt */ #define IRQ_SPORT3_ERROR BFIN_IRQ(26) /* SPORT3 Error Interrupt */ @@ -406,7 +406,7 @@ Events (highest priority) EMU 0 #define IRQ_PINT1_POS 16 #define IRQ_MDMAS0_POS 20 #define IRQ_MDMAS1_POS 24 -#define IRQ_WATCHDOG_POS 28 +#define IRQ_WATCH_POS 28 /* IAR3 BIT FIELDS */ #define IRQ_DMAC1_ERR_POS 0 diff --git a/include/asm-blackfin/mach-bf548/mem_init.h b/include/asm-blackfin/mach-bf548/mem_init.h index 0cb279e973d7..befc2903d5a5 100644 --- a/include/asm-blackfin/mach-bf548/mem_init.h +++ b/include/asm-blackfin/mach-bf548/mem_init.h @@ -28,8 +28,68 @@ * If not, write to the Free Software Foundation, * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#define MIN_DDR_SCLK(x) (x*(CONFIG_SCLK_HZ/1000/1000)/1000 + 1) + +#if (CONFIG_MEM_MT46V32M16_6T) +#define DDR_SIZE DEVSZ_512 +#define DDR_WIDTH DEVWD_16 + +#define DDR_tRC DDR_TRC(MIN_DDR_SCLK(60)) +#define DDR_tRAS DDR_TRAS(MIN_DDR_SCLK(42)) +#define DDR_tRP DDR_TRP(MIN_DDR_SCLK(15)) +#define DDR_tRFC DDR_TRFC(MIN_DDR_SCLK(72)) +#define DDR_tREFI DDR_TREFI(MIN_DDR_SCLK(7800)) + +#define DDR_tRCD DDR_TRCD(MIN_DDR_SCLK(15)) +#define DDR_tWTR DDR_TWTR(1) +#define DDR_tMRD DDR_TMRD(MIN_DDR_SCLK(12)) +#define DDR_tWR DDR_TWR(MIN_DDR_SCLK(15)) +#endif + +#if (CONFIG_MEM_MT46V32M16_5B) +#define DDR_SIZE DEVSZ_512 +#define DDR_WIDTH DEVWD_16 + +#define DDR_tRC DDR_TRC(MIN_DDR_SCLK(55)) +#define DDR_tRAS DDR_TRAS(MIN_DDR_SCLK(40)) +#define DDR_tRP DDR_TRP(MIN_DDR_SCLK(15)) +#define DDR_tRFC DDR_TRFC(MIN_DDR_SCLK(70)) +#define DDR_tREFI DDR_TREFI(MIN_DDR_SCLK(7800)) + +#define DDR_tRCD DDR_TRCD(MIN_DDR_SCLK(15)) +#define DDR_tWTR DDR_TWTR(2) +#define DDR_tMRD DDR_TMRD(MIN_DDR_SCLK(10)) +#define DDR_tWR DDR_TWR(MIN_DDR_SCLK(15)) +#endif + +#if (CONFIG_MEM_GENERIC_BOARD) +#define DDR_SIZE DEVSZ_512 +#define DDR_WIDTH DEVWD_16 + +#define DDR_tRCD DDR_TRCD(3) +#define DDR_tWTR DDR_TWTR(2) +#define DDR_tWR DDR_TWR(2) +#define DDR_tMRD DDR_TMRD(2) +#define DDR_tRP DDR_TRP(3) +#define DDR_tRAS DDR_TRAS(7) +#define DDR_tRC DDR_TRC(10) +#define DDR_tRFC DDR_TRFC(12) +#define DDR_tREFI DDR_TREFI(1288) +#endif + +#if (CONFIG_SCLK_HZ <= 133333333) +#define DDR_CL CL_2 +#elif (CONFIG_SCLK_HZ <= 166666666) +#define DDR_CL CL_2_5 +#else +#define DDR_CL CL_3 +#endif + +#define mem_DDRCTL0 (DDR_tRP | DDR_tRAS | DDR_tRC | DDR_tRFC | DDR_tREFI) +#define mem_DDRCTL1 (DDR_DATWIDTH | EXTBANK_1 | DDR_SIZE | DDR_WIDTH | DDR_tWTR \ + | DDR_tMRD | DDR_tWR | DDR_tRCD) +#define mem_DDRCTL2 DDR_CL -#if (CONFIG_MEM_MT46V32M16) #if defined CONFIG_CLKIN_HALF #define CLKIN_HALF 1 diff --git a/include/asm-blackfin/mach-bf548/portmux.h b/include/asm-blackfin/mach-bf548/portmux.h index 6b485120015f..8177a567dcdb 100644 --- a/include/asm-blackfin/mach-bf548/portmux.h +++ b/include/asm-blackfin/mach-bf548/portmux.h @@ -1,6 +1,8 @@ #ifndef _MACH_PORTMUX_H_ #define _MACH_PORTMUX_H_ +#define MAX_RESOURCES MAX_BLACKFIN_GPIOS + #define P_SPORT2_TFS (P_DEFINED | P_IDENT(GPIO_PA0) | P_FUNCT(0)) #define P_SPORT2_DTSEC (P_DEFINED | P_IDENT(GPIO_PA1) | P_FUNCT(0)) #define P_SPORT2_DTPRI (P_DEFINED | P_IDENT(GPIO_PA2) | P_FUNCT(0)) diff --git a/include/asm-blackfin/mach-bf561/anomaly.h b/include/asm-blackfin/mach-bf561/anomaly.h index bed956456884..0c1d46193939 100644 --- a/include/asm-blackfin/mach-bf561/anomaly.h +++ b/include/asm-blackfin/mach-bf561/anomaly.h @@ -7,7 +7,7 @@ */ /* This file shoule be up to date with: - * - Revision N, March 28, 2007; ADSP-BF561 Silicon Anomaly List + * - Revision O, 11/15/2007; ADSP-BF561 Blackfin Processor Anomaly List */ #ifndef _MACH_ANOMALY_H_ @@ -15,7 +15,7 @@ /* We do not support 0.1, 0.2, or 0.4 silicon - sorry */ #if __SILICON_REVISION__ < 3 || __SILICON_REVISION__ == 4 -# error Kernel will not work on BF561 silicon version 0.0, 0.1, 0.2, or 0.4 +# error will not work on BF561 silicon version 0.0, 0.1, 0.2, or 0.4 #endif /* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot 2 Not Supported */ @@ -208,6 +208,8 @@ #define ANOMALY_05000275 (__SILICON_REVISION__ > 2) /* Timing Requirements Change for External Frame Sync PPI Modes with Non-Zero PPI_DELAY */ #define ANOMALY_05000276 (__SILICON_REVISION__ < 5) +/* Writes to an I/O data register one SCLK cycle after an edge is detected may clear interrupt */ +#define ANOMALY_05000277 (__SILICON_REVISION__ < 3) /* Disabling Peripherals with DMA Running May Cause DMA System Instability */ #define ANOMALY_05000278 (__SILICON_REVISION__ < 5) /* False Hardware Error Exception When ISR Context Is Not Restored */ @@ -246,6 +248,18 @@ #define ANOMALY_05000332 (__SILICON_REVISION__ < 5) /* Flag Data Register Writes One SCLK Cycle After Edge Is Detected May Clear Interrupt Status */ #define ANOMALY_05000333 (__SILICON_REVISION__ < 5) +/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available on Older Silicon) */ +#define ANOMALY_05000339 (__SILICON_REVISION__ < 5) +/* Memory DMA FIFO Causes Throughput Degradation on Writes to External Memory */ +#define ANOMALY_05000343 (__SILICON_REVISION__ < 5) +/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ +#define ANOMALY_05000357 (1) +/* Conflicting Column Address Widths Causes SDRAM Errors */ +#define ANOMALY_05000362 (1) +/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */ +#define ANOMALY_05000366 (1) +/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */ +#define ANOMALY_05000371 (1) /* Anomalies that don't exist on this proc */ #define ANOMALY_05000158 (0) diff --git a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h index 69b9f8e120e9..7871d4313f49 100644 --- a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h @@ -111,7 +111,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart) } if (uart->rts_pin >= 0) { gpio_request(uart->rts_pin, DRIVER_NAME); - gpio_direction_input(uart->rts_pin); + gpio_direction_input(uart->rts_pin, 0); } #endif } diff --git a/include/asm-blackfin/mach-bf561/portmux.h b/include/asm-blackfin/mach-bf561/portmux.h index 132ad31665e3..a6ee8206efb6 100644 --- a/include/asm-blackfin/mach-bf561/portmux.h +++ b/include/asm-blackfin/mach-bf561/portmux.h @@ -1,6 +1,8 @@ #ifndef _MACH_PORTMUX_H_ #define _MACH_PORTMUX_H_ +#define MAX_RESOURCES MAX_BLACKFIN_GPIOS + #define P_PPI0_CLK (P_DONTCARE) #define P_PPI0_FS1 (P_DONTCARE) #define P_PPI0_FS2 (P_DONTCARE) diff --git a/include/asm-blackfin/mmu.h b/include/asm-blackfin/mmu.h index 11d52f1167d0..757e43906ed4 100644 --- a/include/asm-blackfin/mmu.h +++ b/include/asm-blackfin/mmu.h @@ -24,7 +24,9 @@ typedef struct { unsigned long exec_fdpic_loadmap; unsigned long interp_fdpic_loadmap; #endif - +#ifdef CONFIG_MPU + unsigned long *page_rwx_mask; +#endif } mm_context_t; #endif diff --git a/include/asm-blackfin/mmu_context.h b/include/asm-blackfin/mmu_context.h index c5c71a6aaf19..b5eb67596ad5 100644 --- a/include/asm-blackfin/mmu_context.h +++ b/include/asm-blackfin/mmu_context.h @@ -30,9 +30,12 @@ #ifndef __BLACKFIN_MMU_CONTEXT_H__ #define __BLACKFIN_MMU_CONTEXT_H__ +#include <linux/gfp.h> +#include <linux/sched.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/pgalloc.h> +#include <asm/cplbinit.h> extern void *current_l1_stack_save; extern int nr_l1stack_tasks; @@ -50,6 +53,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { +#ifdef CONFIG_MPU + unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order); + mm->context.page_rwx_mask = (unsigned long *)p; + memset(mm->context.page_rwx_mask, 0, + page_mask_nelts * 3 * sizeof(long)); +#endif return 0; } @@ -73,6 +82,11 @@ static inline void destroy_context(struct mm_struct *mm) sram_free(tmp->addr); kfree(tmp); } +#ifdef CONFIG_MPU + if (current_rwx_mask == mm->context.page_rwx_mask) + current_rwx_mask = NULL; + free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); +#endif } static inline unsigned long @@ -106,9 +120,21 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base) #define deactivate_mm(tsk,mm) do { } while (0) -static inline void activate_mm(struct mm_struct *prev_mm, - struct mm_struct *next_mm) +#define activate_mm(prev, next) switch_mm(prev, next, NULL) + +static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *tsk) { + if (prev_mm == next_mm) + return; +#ifdef CONFIG_MPU + if (prev_mm->context.page_rwx_mask == current_rwx_mask) { + flush_switched_cplbs(); + set_mask_dcplbs(next_mm->context.page_rwx_mask); + } +#endif + + /* L1 stack switching. */ if (!next_mm->context.l1_stack_save) return; if (next_mm->context.l1_stack_save == current_l1_stack_save) @@ -120,10 +146,36 @@ static inline void activate_mm(struct mm_struct *prev_mm, memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) +#ifdef CONFIG_MPU +static inline void protect_page(struct mm_struct *mm, unsigned long addr, + unsigned long flags) +{ + unsigned long *mask = mm->context.page_rwx_mask; + unsigned long page = addr >> 12; + unsigned long idx = page >> 5; + unsigned long bit = 1 << (page & 31); + + if (flags & VM_MAYREAD) + mask[idx] |= bit; + else + mask[idx] &= ~bit; + mask += page_mask_nelts; + if (flags & VM_MAYWRITE) + mask[idx] |= bit; + else + mask[idx] &= ~bit; + mask += page_mask_nelts; + if (flags & VM_MAYEXEC) + mask[idx] |= bit; + else + mask[idx] &= ~bit; +} + +static inline void update_protections(struct mm_struct *mm) { - activate_mm(prev, next); + flush_switched_cplbs(); + set_mask_dcplbs(mm->context.page_rwx_mask); } +#endif #endif diff --git a/include/asm-blackfin/traps.h b/include/asm-blackfin/traps.h index ee1cbf73a9ab..f0e5f940d9ca 100644 --- a/include/asm-blackfin/traps.h +++ b/include/asm-blackfin/traps.h @@ -45,6 +45,10 @@ #define VEC_CPLB_I_M (44) #define VEC_CPLB_I_MHIT (45) #define VEC_ILL_RES (46) /* including unvalid supervisor mode insn */ +/* The hardware reserves (63) for future use - we use it to tell our + * normal exception handling code we have a hardware error + */ +#define VEC_HWERR (63) #ifndef __ASSEMBLY__ diff --git a/include/asm-blackfin/uaccess.h b/include/asm-blackfin/uaccess.h index 2233f8f9314d..22a410b8003b 100644 --- a/include/asm-blackfin/uaccess.h +++ b/include/asm-blackfin/uaccess.h @@ -31,7 +31,7 @@ static inline void set_fs(mm_segment_t fs) #define VERIFY_READ 0 #define VERIFY_WRITE 1 -#define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) +#define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size)) static inline int is_in_rom(unsigned long addr) { diff --git a/include/asm-blackfin/unistd.h b/include/asm-blackfin/unistd.h index 07ffe8b718c5..e98167358d26 100644 --- a/include/asm-blackfin/unistd.h +++ b/include/asm-blackfin/unistd.h @@ -369,8 +369,9 @@ #define __NR_set_robust_list 354 #define __NR_get_robust_list 355 #define __NR_fallocate 356 +#define __NR_semtimedop 357 -#define __NR_syscall 357 +#define __NR_syscall 358 #define NR_syscalls __NR_syscall /* Old optional stuff no one actually uses */ diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h index 78b301ed7b12..ea34e0d0a388 100644 --- a/include/asm-cris/arch-v10/ide.h +++ b/include/asm-cris/arch-v10/ide.h @@ -89,11 +89,6 @@ static inline void ide_init_default_hwifs(void) } } -/* some configuration options we don't need */ - -#undef SUPPORT_VLB_SYNC -#define SUPPORT_VLB_SYNC 0 - #endif /* __KERNEL__ */ #endif /* __ASMCRIS_IDE_H */ diff --git a/include/asm-cris/arch-v32/ide.h b/include/asm-cris/arch-v32/ide.h index 11296170d057..fb9c3627a5b4 100644 --- a/include/asm-cris/arch-v32/ide.h +++ b/include/asm-cris/arch-v32/ide.h @@ -48,11 +48,6 @@ static inline unsigned long ide_default_io_base(int index) return REG_TYPE_CONV(unsigned long, reg_ata_rw_ctrl2, ctrl2); } -/* some configuration options we don't need */ - -#undef SUPPORT_VLB_SYNC -#define SUPPORT_VLB_SYNC 0 - #define IDE_ARCH_ACK_INTR #define ide_ack_intr(hwif) ((hwif)->ack_intr(hwif)) diff --git a/include/asm-frv/ide.h b/include/asm-frv/ide.h index f0bd2cb250c1..8c9a540d4344 100644 --- a/include/asm-frv/ide.h +++ b/include/asm-frv/ide.h @@ -18,12 +18,6 @@ #include <asm/io.h> #include <asm/irq.h> -#undef SUPPORT_SLOW_DATA_PORTS -#define SUPPORT_SLOW_DATA_PORTS 0 - -#undef SUPPORT_VLB_SYNC -#define SUPPORT_VLB_SYNC 0 - #ifndef MAX_HWIFS #define MAX_HWIFS 8 #endif diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h index 1697404afa05..63cf822431a2 100644 --- a/include/asm-generic/bitops/ext2-non-atomic.h +++ b/include/asm-generic/bitops/ext2-non-atomic.h @@ -14,5 +14,7 @@ generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) #define ext2_find_next_zero_bit(addr, size, off) \ generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index b9c7e5d2d2ad..80e3bf13b2b9 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -20,6 +20,8 @@ #define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) #define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) +#define generic_find_next_le_bit(addr, size, offset) \ + find_next_bit(addr, size, offset) #elif defined(__BIG_ENDIAN) @@ -42,6 +44,8 @@ extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +extern unsigned long generic_find_next_le_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); #else #error "Please fix <asm/byteorder.h>" diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index d56fedbb457a..2632328d8646 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -31,14 +31,19 @@ struct bug_entry { #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) #endif -#ifndef HAVE_ARCH_WARN_ON +#ifndef __WARN +#ifndef __ASSEMBLY__ +extern void warn_on_slowpath(const char *file, const int line); +#define WANT_WARN_ON_SLOWPATH +#endif +#define __WARN() warn_on_slowpath(__FILE__, __LINE__) +#endif + +#ifndef WARN_ON #define WARN_ON(condition) ({ \ int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - printk("WARNING: at %s:%d %s()\n", __FILE__, \ - __LINE__, __FUNCTION__); \ - dump_stack(); \ - } \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ unlikely(__ret_warn_on); \ }) #endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d85172e9ed45..4b8d31cda1a0 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -3,54 +3,79 @@ #include <linux/compiler.h> #include <linux/threads.h> -#define __GENERIC_PER_CPU +/* + * Determine the real variable name from the name visible in the + * kernel sources. + */ +#define per_cpu_var(var) per_cpu__##var + #ifdef CONFIG_SMP +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + * + * Most arches use the __per_cpu_offset array for those offsets but + * some arches have their own ways of determining the offset (x86_64, s390). + */ +#ifndef __per_cpu_offset extern unsigned long __per_cpu_offset[NR_CPUS]; #define per_cpu_offset(x) (__per_cpu_offset[x]) +#endif -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) (*({ \ - extern int simple_identifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) -#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) -#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id()) - -/* A macro to avoid #include hell... */ -#define percpu_modcopy(pcpudst, src, size) \ -do { \ - unsigned int __i; \ - for_each_possible_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ -} while (0) -#else /* ! SMP */ +/* + * Determine the offset for the currently active processor. + * An arch may define __my_cpu_offset to provide a more effective + * means of obtaining the offset to the per cpu variables of the + * current processor. + */ +#ifndef __my_cpu_offset +#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#define my_cpu_offset per_cpu_offset(smp_processor_id()) +#else +#define my_cpu_offset __my_cpu_offset +#endif + +/* + * Add a offset to a pointer but keep the pointer as is. + * + * Only S390 provides its own means of moving the pointer. + */ +#ifndef SHIFT_PERCPU_PTR +#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) +#endif -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name +/* + * A percpu variable may point to a discarded regions. The following are + * established ways to produce a usable pointer from the percpu variable + * offset. + */ +#define per_cpu(var, cpu) \ + (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) +#define __get_cpu_var(var) \ + (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) +#define __raw_get_cpu_var(var) \ + (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) -#define __get_cpu_var(var) per_cpu__##var -#define __raw_get_cpu_var(var) per_cpu__##var +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); +#endif + +#else /* ! SMP */ + +#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) +#define __get_cpu_var(var) per_cpu_var(var) +#define __raw_get_cpu_var(var) per_cpu_var(var) #endif /* SMP */ -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) +#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ + __typeof__(type) per_cpu_var(name) #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index a4a22cc35898..587566f95f6c 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -44,8 +44,8 @@ #define RLIMIT_NICE 13 /* max nice prio allowed to raise to 0-39 for nice level 19 .. -20 */ #define RLIMIT_RTPRIO 14 /* maximum realtime priority */ - -#define RLIM_NLIMITS 15 +#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */ +#define RLIM_NLIMITS 16 /* * SuS says limits have to be unsigned. @@ -86,6 +86,7 @@ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ [RLIMIT_NICE] = { 0, 0 }, \ [RLIMIT_RTPRIO] = { 0, 0 }, \ + [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ } #endif /* __KERNEL__ */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 75f2bfab614f..6ce9f3ab928d 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -15,7 +15,6 @@ #include <linux/swap.h> #include <linux/quicklist.h> -#include <asm/pgalloc.h> #include <asm/tlbflush.h> /* diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9f584cc5c5fb..f784d2f34149 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -9,10 +9,46 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) +/* The actual configuration determine if the init/exit sections + * are handled as text/data or they can be discarded (which + * often happens at runtime) + */ +#ifdef CONFIG_HOTPLUG +#define DEV_KEEP(sec) *(.dev##sec) +#define DEV_DISCARD(sec) +#else +#define DEV_KEEP(sec) +#define DEV_DISCARD(sec) *(.dev##sec) +#endif + +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_KEEP(sec) *(.cpu##sec) +#define CPU_DISCARD(sec) +#else +#define CPU_KEEP(sec) +#define CPU_DISCARD(sec) *(.cpu##sec) +#endif + +#if defined(CONFIG_MEMORY_HOTPLUG) +#define MEM_KEEP(sec) *(.mem##sec) +#define MEM_DISCARD(sec) +#else +#define MEM_KEEP(sec) +#define MEM_DISCARD(sec) *(.mem##sec) +#endif + + /* .data section */ #define DATA_DATA \ *(.data) \ *(.data.init.refok) \ + *(.ref.data) \ + DEV_KEEP(init.data) \ + DEV_KEEP(exit.data) \ + CPU_KEEP(init.data) \ + CPU_KEEP(exit.data) \ + MEM_KEEP(init.data) \ + MEM_KEEP(exit.data) \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___markers) = .; \ *(__markers) \ @@ -132,14 +168,25 @@ *(__ksymtab_strings) \ } \ \ + /* __*init sections */ \ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ + DEV_KEEP(init.rodata) \ + DEV_KEEP(exit.rodata) \ + CPU_KEEP(init.rodata) \ + CPU_KEEP(exit.rodata) \ + MEM_KEEP(init.rodata) \ + MEM_KEEP(exit.rodata) \ + } \ + \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___param) = .; \ *(__param) \ VMLINUX_SYMBOL(__stop___param) = .; \ + . = ALIGN((align)); \ VMLINUX_SYMBOL(__end_rodata) = .; \ } \ - \ . = ALIGN((align)); /* RODATA provided for backward compatibility. @@ -158,8 +205,16 @@ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text) \ + *(.ref.text) \ *(.text.init.refok) \ - *(.exit.text.refok) + *(.exit.text.refok) \ + DEV_KEEP(init.text) \ + DEV_KEEP(exit.text) \ + CPU_KEEP(init.text) \ + CPU_KEEP(exit.text) \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) + /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ @@ -183,6 +238,37 @@ *(.kprobes.text) \ VMLINUX_SYMBOL(__kprobes_text_end) = .; +/* init and exit section handling */ +#define INIT_DATA \ + *(.init.data) \ + DEV_DISCARD(init.data) \ + DEV_DISCARD(init.rodata) \ + CPU_DISCARD(init.data) \ + CPU_DISCARD(init.rodata) \ + MEM_DISCARD(init.data) \ + MEM_DISCARD(init.rodata) + +#define INIT_TEXT \ + *(.init.text) \ + DEV_DISCARD(init.text) \ + CPU_DISCARD(init.text) \ + MEM_DISCARD(init.text) + +#define EXIT_DATA \ + *(.exit.data) \ + DEV_DISCARD(exit.data) \ + DEV_DISCARD(exit.rodata) \ + CPU_DISCARD(exit.data) \ + CPU_DISCARD(exit.rodata) \ + MEM_DISCARD(exit.data) \ + MEM_DISCARD(exit.rodata) + +#define EXIT_TEXT \ + *(.exit.text) \ + DEV_DISCARD(exit.text) \ + CPU_DISCARD(exit.text) \ + MEM_DISCARD(exit.text) + /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index 81bcd5e51789..cd1cc39b5599 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h @@ -127,6 +127,8 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif +#define acpi_unlazy_tlb(x) + #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h index 4e517f0e6afa..c11fdd8ab4d7 100644 --- a/include/asm-ia64/agp.h +++ b/include/asm-ia64/agp.h @@ -15,7 +15,6 @@ */ #define map_page_into_agp(page) /* nothing */ #define unmap_page_from_agp(page) /* nothing */ -#define flush_agp_mappings() /* nothing */ #define flush_agp_cache() mb() /* Convert a physical address to an address suitable for the GART. */ diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index e58d3298fa10..5b6665c754c9 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h @@ -24,7 +24,7 @@ extern void ia64_bad_param_for_setreg (void); extern void ia64_bad_param_for_getreg (void); -register unsigned long ia64_r13 asm ("r13") __attribute_used__; +register unsigned long ia64_r13 asm ("r13") __used; #define ia64_setreg(regnum, val) \ ({ \ diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index c4f1e328a5ba..0095bcf79848 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h @@ -16,28 +16,11 @@ #include <linux/threads.h> #ifdef HAVE_MODEL_SMALL_ATTRIBUTE -# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__))) -#else -# define __SMALL_ADDR_AREA +# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__))) #endif #define DECLARE_PER_CPU(type, name) \ - extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name - -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) \ - __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name - -#ifdef CONFIG_SMP -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp -#else -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) -#endif + extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name /* * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an @@ -68,9 +51,6 @@ extern void *per_cpu_init(void); #endif /* SMP */ -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - /* * Be extremely careful when taking the address of this variable! Due to virtual * remapping, it is different from the canonical address returned by __get_cpu_var(var)! diff --git a/include/asm-m32r/signal.h b/include/asm-m32r/signal.h index 937258686ba5..1a607066bc64 100644 --- a/include/asm-m32r/signal.h +++ b/include/asm-m32r/signal.h @@ -157,7 +157,7 @@ typedef struct sigaltstack { #undef __HAVE_ARCH_SIG_BITOPS struct pt_regs; -extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset)); +extern int do_signal(struct pt_regs *regs, sigset_t *oldset); #define ptrace_signal_deliver(regs, cookie) do { } while (0) diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h index 2976b5d68e96..83d1f286230b 100644 --- a/include/asm-m68k/bitops.h +++ b/include/asm-m68k/bitops.h @@ -410,6 +410,8 @@ static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size, res = ext2_find_first_zero_bit (p, size - 32 * (p - addr)); return (p - addr) * 32 + res; } +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #endif /* __KERNEL__ */ diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h index f8dfb7ba2e25..f43afe1fc3b3 100644 --- a/include/asm-m68knommu/bitops.h +++ b/include/asm-m68knommu/bitops.h @@ -294,6 +294,8 @@ found_middle: return result + ffz(__swab32(tmp)); } +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h index 0bb7a93b7a5e..569f80aacbd2 100644 --- a/include/asm-mips/addrspace.h +++ b/include/asm-mips/addrspace.h @@ -127,7 +127,7 @@ #define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p)) #define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK) #define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \ - ((cm)<<59) | (a)) + (_CONST64_(cm) << 59) | (a)) /* * The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting diff --git a/include/asm-mips/asm.h b/include/asm-mips/asm.h index 12e17581b823..608cfcfbb3ea 100644 --- a/include/asm-mips/asm.h +++ b/include/asm-mips/asm.h @@ -398,4 +398,12 @@ symbol = value #define SSNOP sll zero, zero, 1 +#ifdef CONFIG_SGI_IP28 +/* Inhibit speculative stores to volatile (e.g.DMA) or invalid addresses. */ +#include <asm/cacheops.h> +#define R10KCBARRIER(addr) cache Cache_Barrier, addr; +#else +#define R10KCBARRIER(addr) +#endif + #endif /* __ASM_ASM_H */ diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h index b2dd9b33de8f..e031bdff9920 100644 --- a/include/asm-mips/bootinfo.h +++ b/include/asm-mips/bootinfo.h @@ -48,22 +48,11 @@ #define MACH_DS5900 10 /* DECsystem 5900 */ /* - * Valid machtype for group ARC - */ -#define MACH_DESKSTATION_RPC44 0 /* Deskstation rPC44 */ -#define MACH_DESKSTATION_TYNE 1 /* Deskstation Tyne */ - -/* * Valid machtype for group SNI_RM */ #define MACH_SNI_RM200_PCI 0 /* RM200/RM300/RM400 PCI series */ /* - * Valid machtype for group ACN - */ -#define MACH_ACN_MIPS_BOARD 0 /* ACN MIPS single board */ - -/* * Valid machtype for group SGI */ #define MACH_SGI_IP22 0 /* Indy, Indigo2, Challenge S */ @@ -73,44 +62,6 @@ #define MACH_SGI_IP30 4 /* Octane, Octane2 */ /* - * Valid machtype for group COBALT - */ -#define MACH_COBALT_27 0 /* Proto "27" hardware */ - -/* - * Valid machtype for group BAGET - */ -#define MACH_BAGET201 0 /* BT23-201 */ -#define MACH_BAGET202 1 /* BT23-202 */ - -/* - * Cosine boards. - */ -#define MACH_COSINE_ORION 0 - -/* - * Valid machtype for group MOMENCO - */ -#define MACH_MOMENCO_OCELOT 0 -#define MACH_MOMENCO_OCELOT_G 1 /* no more supported (may 2007) */ -#define MACH_MOMENCO_OCELOT_C 2 /* no more supported (jun 2007) */ -#define MACH_MOMENCO_JAGUAR_ATX 3 /* no more supported (may 2007) */ -#define MACH_MOMENCO_OCELOT_3 4 - -/* - * Valid machtype for group PHILIPS - */ -#define MACH_PHILIPS_NINO 0 /* Nino */ -#define MACH_PHILIPS_VELO 1 /* Velo */ -#define MACH_PHILIPS_JBS 2 /* JBS */ -#define MACH_PHILIPS_STB810 3 /* STB810 */ - -/* - * Valid machtype for group SIBYTE - */ -#define MACH_SWARM 0 - -/* * Valid machtypes for group Toshiba */ #define MACH_PALLAS 0 @@ -122,64 +73,17 @@ #define MACH_TOSHIBA_RBTX4938 6 /* - * Valid machtype for group Alchemy - */ -#define MACH_PB1000 0 /* Au1000-based eval board */ -#define MACH_PB1100 1 /* Au1100-based eval board */ -#define MACH_PB1500 2 /* Au1500-based eval board */ -#define MACH_DB1000 3 /* Au1000-based eval board */ -#define MACH_DB1100 4 /* Au1100-based eval board */ -#define MACH_DB1500 5 /* Au1500-based eval board */ -#define MACH_XXS1500 6 /* Au1500-based eval board */ -#define MACH_MTX1 7 /* 4G MTX-1 Au1500-based board */ -#define MACH_PB1550 8 /* Au1550-based eval board */ -#define MACH_DB1550 9 /* Au1550-based eval board */ -#define MACH_PB1200 10 /* Au1200-based eval board */ -#define MACH_DB1200 11 /* Au1200-based eval board */ - -/* - * Valid machtype for group NEC_VR41XX - * - * Various NEC-based devices. - * - * FIXME: MACH_GROUPs should be by _MANUFACTURER_ of * the device, not by - * technical properties, so no new additions to this group. - */ -#define MACH_NEC_OSPREY 0 /* Osprey eval board */ -#define MACH_NEC_EAGLE 1 /* NEC Eagle/Hawk board */ -#define MACH_ZAO_CAPCELLA 2 /* ZAO Networks Capcella */ -#define MACH_VICTOR_MPC30X 3 /* Victor MP-C303/304 */ -#define MACH_IBM_WORKPAD 4 /* IBM WorkPad z50 */ -#define MACH_CASIO_E55 5 /* CASIO CASSIOPEIA E-10/15/55/65 */ -#define MACH_TANBAC_TB0226 6 /* TANBAC TB0226 (Mbase) */ -#define MACH_TANBAC_TB0229 7 /* TANBAC TB0229 (VR4131DIMM) */ -#define MACH_NEC_CMBVR4133 8 /* CMB VR4133 Board */ - -#define MACH_HP_LASERJET 1 - -/* * Valid machtype for group LASAT */ #define MACH_LASAT_100 0 /* Masquerade II/SP100/SP50/SP25 */ #define MACH_LASAT_200 1 /* Masquerade PRO/SP200 */ /* - * Valid machtype for group TITAN - */ -#define MACH_TITAN_YOSEMITE 1 /* PMC-Sierra Yosemite */ -#define MACH_TITAN_EXCITE 2 /* Basler eXcite */ - -/* * Valid machtype for group NEC EMMA2RH */ #define MACH_NEC_MARKEINS 0 /* NEC EMMA2RH Mark-eins */ /* - * Valid machtype for group LEMOTE - */ -#define MACH_LEMOTE_FULONG 0 - -/* * Valid machtype for group PMC-MSP */ #define MACH_MSP4200_EVAL 0 /* PMC-Sierra MSP4200 Evaluation */ @@ -190,16 +94,9 @@ #define MACH_MSP7120_FPGA 5 /* PMC-Sierra MSP7120 Emulation */ #define MACH_MSP_OTHER 255 /* PMC-Sierra unknown board type */ -#define MACH_WRPPMC 1 - -/* - * Valid machtype for group Broadcom - */ -#define MACH_GROUP_BRCM 23 /* Broadcom */ -#define MACH_BCM47XX 1 /* Broadcom BCM47XX */ - #define CL_SIZE COMMAND_LINE_SIZE +extern char *system_type; const char *get_system_type(void); extern unsigned long mips_machtype; diff --git a/include/asm-mips/bugs.h b/include/asm-mips/bugs.h index 0d7f9c1f5546..9dc10df32078 100644 --- a/include/asm-mips/bugs.h +++ b/include/asm-mips/bugs.h @@ -1,19 +1,34 @@ /* * This is included by init/main.c to check for architecture-dependent bugs. * + * Copyright (C) 2007 Maciej W. Rozycki + * * Needs: * void check_bugs(void); */ #ifndef _ASM_BUGS_H #define _ASM_BUGS_H +#include <linux/bug.h> #include <linux/delay.h> + #include <asm/cpu.h> #include <asm/cpu-info.h> +extern int daddiu_bug; + +extern void check_bugs64_early(void); + extern void check_bugs32(void); extern void check_bugs64(void); +static inline void check_bugs_early(void) +{ +#ifdef CONFIG_64BIT + check_bugs64_early(); +#endif +} + static inline void check_bugs(void) { unsigned int cpu = smp_processor_id(); @@ -25,4 +40,14 @@ static inline void check_bugs(void) #endif } +static inline int r4k_daddiu_bug(void) +{ +#ifdef CONFIG_64BIT + WARN_ON(daddiu_bug < 0); + return daddiu_bug != 0; +#else + return 0; +#endif +} + #endif /* _ASM_BUGS_H */ diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h index ed5c02c6afbb..0c5a358863f3 100644 --- a/include/asm-mips/cpu-info.h +++ b/include/asm-mips/cpu-info.h @@ -55,6 +55,7 @@ struct cpuinfo_mips { struct cache_desc scache; /* Secondary cache */ struct cache_desc tcache; /* Tertiary/split secondary cache */ int srsets; /* Shadow register sets */ + int core; /* physical core number */ #if defined(CONFIG_MIPS_MT_SMTC) /* * In the MIPS MT "SMTC" model, each TC is considered @@ -63,8 +64,10 @@ struct cpuinfo_mips { * to all TCs within the same VPE. */ int vpe_id; /* Virtual Processor number */ - int tc_id; /* Thread Context number */ #endif /* CONFIG_MIPS_MT */ +#ifdef CONFIG_MIPS_MT_SMTC + int tc_id; /* Thread Context number */ +#endif void *data; /* Additional data */ } __attribute__((aligned(SMP_CACHE_BYTES))); diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h index 54fc18a4e5a8..bf5bbc78a9f7 100644 --- a/include/asm-mips/cpu.h +++ b/include/asm-mips/cpu.h @@ -195,8 +195,8 @@ enum cpu_type_enum { * MIPS32 class processors */ CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_74K, CPU_AU1000, - CPU_AU1100, CPU_AU1200, CPU_AU1500, CPU_AU1550, CPU_PR4450, - CPU_BCM3302, CPU_BCM4710, + CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500, CPU_AU1550, + CPU_PR4450, CPU_BCM3302, CPU_BCM4710, /* * MIPS64 class processors diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h index fab32131e9b4..b0bccd2c4ed5 100644 --- a/include/asm-mips/delay.h +++ b/include/asm-mips/delay.h @@ -6,13 +6,16 @@ * Copyright (C) 1994 by Waldorf Electronics * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_DELAY_H #define _ASM_DELAY_H #include <linux/param.h> #include <linux/smp.h> + #include <asm/compiler.h> +#include <asm/war.h> static inline void __delay(unsigned long loops) { @@ -25,7 +28,7 @@ static inline void __delay(unsigned long loops) " .set reorder \n" : "=r" (loops) : "0" (loops)); - else if (sizeof(long) == 8) + else if (sizeof(long) == 8 && !DADDI_WAR) __asm__ __volatile__ ( " .set noreorder \n" " .align 3 \n" @@ -34,6 +37,15 @@ static inline void __delay(unsigned long loops) " .set reorder \n" : "=r" (loops) : "0" (loops)); + else if (sizeof(long) == 8 && DADDI_WAR) + __asm__ __volatile__ ( + " .set noreorder \n" + " .align 3 \n" + "1: bnez %0, 1b \n" + " dsubu %0, %2 \n" + " .set reorder \n" + : "=r" (loops) + : "0" (loops), "r" (1)); } @@ -50,7 +62,7 @@ static inline void __delay(unsigned long loops) static inline void __udelay(unsigned long usecs, unsigned long lpj) { - unsigned long lo; + unsigned long hi, lo; /* * The rates of 128 is rounded wrongly by the catchall case @@ -70,11 +82,16 @@ static inline void __udelay(unsigned long usecs, unsigned long lpj) : "=h" (usecs), "=l" (lo) : "r" (usecs), "r" (lpj) : GCC_REG_ACCUM); - else if (sizeof(long) == 8) + else if (sizeof(long) == 8 && !R4000_WAR) __asm__("dmultu\t%2, %3" : "=h" (usecs), "=l" (lo) : "r" (usecs), "r" (lpj) : GCC_REG_ACCUM); + else if (sizeof(long) == 8 && R4000_WAR) + __asm__("dmultu\t%3, %4\n\tmfhi\t%0" + : "=r" (usecs), "=h" (hi), "=l" (lo) + : "r" (usecs), "r" (lpj) + : GCC_REG_ACCUM); __delay(usecs); } diff --git a/include/asm-mips/dma.h b/include/asm-mips/dma.h index d6a6c21f16db..1353c81065d1 100644 --- a/include/asm-mips/dma.h +++ b/include/asm-mips/dma.h @@ -84,10 +84,9 @@ * Deskstations or Acer PICA but not the much more versatile DMA logic used * for the local devices on Acer PICA or Magnums. */ -#ifdef CONFIG_SGI_IP22 -/* Horrible hack to have a correct DMA window on IP22 */ -#include <asm/sgi/mc.h> -#define MAX_DMA_ADDRESS (PAGE_OFFSET + SGIMC_SEG0_BADDR + 0x01000000) +#if defined(CONFIG_SGI_IP22) || defined(CONFIG_SGI_IP28) +/* don't care; ISA bus master won't work, ISA slave DMA supports 32bit addr */ +#define MAX_DMA_ADDRESS PAGE_OFFSET #else #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000) #endif diff --git a/include/asm-mips/fixmap.h b/include/asm-mips/fixmap.h index f27b96cfac2e..9cc8522a394f 100644 --- a/include/asm-mips/fixmap.h +++ b/include/asm-mips/fixmap.h @@ -60,16 +60,6 @@ enum fixed_addresses { __end_of_fixed_addresses }; -extern void __set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); - -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL) -/* - * Some hardware wants to get fixmapped without caching. - */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) /* * used by vmalloc.c. * diff --git a/include/asm-mips/fw/cfe/cfe_api.h b/include/asm-mips/fw/cfe/cfe_api.h index 1003e7156bfc..0995575db320 100644 --- a/include/asm-mips/fw/cfe/cfe_api.h +++ b/include/asm-mips/fw/cfe/cfe_api.h @@ -15,49 +15,27 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ - -/* ********************************************************************* - * - * Broadcom Common Firmware Environment (CFE) - * - * Device function prototypes File: cfe_api.h - * - * This file contains declarations for doing callbacks to - * cfe from an application. It should be the only header - * needed by the application to use this library - * - * Authors: Mitch Lichtenberg, Chris Demetriou - * - ********************************************************************* */ - +/* + * Broadcom Common Firmware Environment (CFE) + * + * This file contains declarations for doing callbacks to + * cfe from an application. It should be the only header + * needed by the application to use this library + * + * Authors: Mitch Lichtenberg, Chris Demetriou + */ #ifndef CFE_API_H #define CFE_API_H -/* - * Apply customizations here for different OSes. These need to: - * * typedef uint64_t, int64_t, intptr_t, uintptr_t. - * * define cfe_strlen() if use of an existing function is desired. - * * define CFE_API_IMPL_NAMESPACE if API functions are to use - * names in the implementation namespace. - * Also, optionally, if the build environment does not do so automatically, - * CFE_API_* can be defined here as desired. - */ -/* Begin customization. */ #include <linux/types.h> #include <linux/string.h> typedef long intptr_t; -#define cfe_strlen strlen -#define CFE_API_ALL -#define CFE_API_STRLEN_CUSTOM -/* End customization. */ - - -/* ********************************************************************* - * Constants - ********************************************************************* */ +/* + * Constants + */ /* Seal indicating CFE's presence, passed to user program. */ #define CFE_EPTSEAL 0x43464531 @@ -109,54 +87,13 @@ typedef struct { /* - * cfe_strlen is handled specially: If already defined, it has been - * overridden in this environment with a standard strlen-like function. - */ -#ifdef cfe_strlen -# define CFE_API_STRLEN_CUSTOM -#else -# ifdef CFE_API_IMPL_NAMESPACE -# define cfe_strlen(a) __cfe_strlen(a) -# endif -int cfe_strlen(char *name); -#endif - -/* * Defines and prototypes for functions which take no arguments. */ -#ifdef CFE_API_IMPL_NAMESPACE -int64_t __cfe_getticks(void); -#define cfe_getticks() __cfe_getticks() -#else int64_t cfe_getticks(void); -#endif /* * Defines and prototypes for the rest of the functions. */ -#ifdef CFE_API_IMPL_NAMESPACE -#define cfe_close(a) __cfe_close(a) -#define cfe_cpu_start(a, b, c, d, e) __cfe_cpu_start(a, b, c, d, e) -#define cfe_cpu_stop(a) __cfe_cpu_stop(a) -#define cfe_enumenv(a, b, d, e, f) __cfe_enumenv(a, b, d, e, f) -#define cfe_enummem(a, b, c, d, e) __cfe_enummem(a, b, c, d, e) -#define cfe_exit(a, b) __cfe_exit(a, b) -#define cfe_flushcache(a) __cfe_cacheflush(a) -#define cfe_getdevinfo(a) __cfe_getdevinfo(a) -#define cfe_getenv(a, b, c) __cfe_getenv(a, b, c) -#define cfe_getfwinfo(a) __cfe_getfwinfo(a) -#define cfe_getstdhandle(a) __cfe_getstdhandle(a) -#define cfe_init(a, b) __cfe_init(a, b) -#define cfe_inpstat(a) __cfe_inpstat(a) -#define cfe_ioctl(a, b, c, d, e, f) __cfe_ioctl(a, b, c, d, e, f) -#define cfe_open(a) __cfe_open(a) -#define cfe_read(a, b, c) __cfe_read(a, b, c) -#define cfe_readblk(a, b, c, d) __cfe_readblk(a, b, c, d) -#define cfe_setenv(a, b) __cfe_setenv(a, b) -#define cfe_write(a, b, c) __cfe_write(a, b, c) -#define cfe_writeblk(a, b, c, d) __cfe_writeblk(a, b, c, d) -#endif /* CFE_API_IMPL_NAMESPACE */ - int cfe_close(int handle); int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1); int cfe_cpu_stop(int cpu); diff --git a/include/asm-mips/fw/cfe/cfe_error.h b/include/asm-mips/fw/cfe/cfe_error.h index 975f00002cbe..b80374636279 100644 --- a/include/asm-mips/fw/cfe/cfe_error.h +++ b/include/asm-mips/fw/cfe/cfe_error.h @@ -16,18 +16,13 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -/* ********************************************************************* - * - * Broadcom Common Firmware Environment (CFE) - * - * Error codes File: cfe_error.h - * - * CFE's global error code list is here. - * - * Author: Mitch Lichtenberg - * - ********************************************************************* */ - +/* + * Broadcom Common Firmware Environment (CFE) + * + * CFE's global error code list is here. + * + * Author: Mitch Lichtenberg + */ #define CFE_OK 0 #define CFE_ERR -1 /* generic error */ diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h index a79e7caf3a86..5b9fce73f11d 100644 --- a/include/asm-mips/mach-cobalt/cobalt.h +++ b/include/asm-mips/mach-cobalt/cobalt.h @@ -1,5 +1,5 @@ /* - * Lowlevel hardware stuff for the MIPS based Cobalt microservers. + * The Cobalt board ID information. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -12,9 +12,6 @@ #ifndef __ASM_COBALT_H #define __ASM_COBALT_H -/* - * The Cobalt board ID information. - */ extern int cobalt_board_id; #define COBALT_BRD_ID_QUBE1 0x3 @@ -22,14 +19,4 @@ extern int cobalt_board_id; #define COBALT_BRD_ID_QUBE2 0x5 #define COBALT_BRD_ID_RAQ2 0x6 -#define COBALT_KEY_PORT ((~*(volatile unsigned int *) CKSEG1ADDR(0x1d000000) >> 24) & COBALT_KEY_MASK) -# define COBALT_KEY_CLEAR (1 << 1) -# define COBALT_KEY_LEFT (1 << 2) -# define COBALT_KEY_UP (1 << 3) -# define COBALT_KEY_DOWN (1 << 4) -# define COBALT_KEY_RIGHT (1 << 5) -# define COBALT_KEY_ENTER (1 << 6) -# define COBALT_KEY_SELECT (1 << 7) -# define COBALT_KEY_MASK 0xfe - #endif /* __ASM_COBALT_H */ diff --git a/include/asm-mips/mach-ip28/cpu-feature-overrides.h b/include/asm-mips/mach-ip28/cpu-feature-overrides.h new file mode 100644 index 000000000000..9a53b326f848 --- /dev/null +++ b/include/asm-mips/mach-ip28/cpu-feature-overrides.h @@ -0,0 +1,50 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Ralf Baechle + * 6/2004 pf + */ +#ifndef __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H + +/* + * IP28 only comes with R10000 family processors all using the same config + */ +#define cpu_has_watch 1 +#define cpu_has_mips16 0 +#define cpu_has_divec 0 +#define cpu_has_vce 0 +#define cpu_has_cache_cdex_p 0 +#define cpu_has_cache_cdex_s 0 +#define cpu_has_prefetch 1 +#define cpu_has_mcheck 0 +#define cpu_has_ejtag 0 + +#define cpu_has_llsc 1 +#define cpu_has_vtag_icache 0 +#define cpu_has_dc_aliases 0 /* see probe_pcache() */ +#define cpu_has_ic_fills_f_dc 0 +#define cpu_has_dsp 0 +#define cpu_icache_snoops_remote_store 1 +#define cpu_has_mipsmt 0 +#define cpu_has_userlocal 0 + +#define cpu_has_nofpuex 0 +#define cpu_has_64bits 1 + +#define cpu_has_4kex 1 +#define cpu_has_4k_cache 1 + +#define cpu_has_inclusive_pcaches 1 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 64 + +#define cpu_has_mips32r1 0 +#define cpu_has_mips32r2 0 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#endif /* __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H */ diff --git a/include/asm-mips/mach-ip28/ds1286.h b/include/asm-mips/mach-ip28/ds1286.h new file mode 100644 index 000000000000..471bb9a33e0f --- /dev/null +++ b/include/asm-mips/mach-ip28/ds1286.h @@ -0,0 +1,4 @@ +#ifndef __ASM_MACH_IP28_DS1286_H +#define __ASM_MACH_IP28_DS1286_H +#include <asm/mach-ip22/ds1286.h> +#endif /* __ASM_MACH_IP28_DS1286_H */ diff --git a/include/asm-mips/mach-ip28/spaces.h b/include/asm-mips/mach-ip28/spaces.h new file mode 100644 index 000000000000..05aabb27e5e7 --- /dev/null +++ b/include/asm-mips/mach-ip28/spaces.h @@ -0,0 +1,22 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + * 2004 pf + */ +#ifndef _ASM_MACH_IP28_SPACES_H +#define _ASM_MACH_IP28_SPACES_H + +#define CAC_BASE 0xa800000000000000 + +#define HIGHMEM_START (~0UL) + +#define PHYS_OFFSET _AC(0x20000000, UL) + +#include <asm/mach-generic/spaces.h> + +#endif /* _ASM_MACH_IP28_SPACES_H */ diff --git a/include/asm-mips/mach-qemu/war.h b/include/asm-mips/mach-ip28/war.h index 0eaf0c548a47..a1baafab486a 100644 --- a/include/asm-mips/mach-qemu/war.h +++ b/include/asm-mips/mach-ip28/war.h @@ -5,8 +5,8 @@ * * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> */ -#ifndef __ASM_MIPS_MACH_QEMU_WAR_H -#define __ASM_MIPS_MACH_QEMU_WAR_H +#ifndef __ASM_MIPS_MACH_IP28_WAR_H +#define __ASM_MIPS_MACH_IP28_WAR_H #define R4600_V1_INDEX_ICACHEOP_WAR 0 #define R4600_V1_HIT_CACHEOP_WAR 0 @@ -19,7 +19,7 @@ #define TX49XX_ICACHE_INDEX_INV_WAR 0 #define RM9000_CDEX_SMP_WAR 0 #define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 +#define R10000_LLSC_WAR 1 #define MIPS34K_MISSED_ITLB_WAR 0 -#endif /* __ASM_MIPS_MACH_QEMU_WAR_H */ +#endif /* __ASM_MIPS_MACH_IP28_WAR_H */ diff --git a/include/asm-mips/mach-qemu/cpu-feature-overrides.h b/include/asm-mips/mach-qemu/cpu-feature-overrides.h deleted file mode 100644 index d2daaed235d5..000000000000 --- a/include/asm-mips/mach-qemu/cpu-feature-overrides.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2003, 07 Ralf Baechle - */ -#ifndef __ASM_MACH_QEMU_CPU_FEATURE_OVERRIDES_H -#define __ASM_MACH_QEMU_CPU_FEATURE_OVERRIDES_H - -/* - * QEMU only comes with a hazard-free MIPS32 processor, so things are easy. - */ -#define cpu_has_mips16 0 -#define cpu_has_divec 0 -#define cpu_has_cache_cdex_p 0 -#define cpu_has_prefetch 0 -#define cpu_has_mcheck 0 -#define cpu_has_ejtag 0 - -#define cpu_has_llsc 1 -#define cpu_has_vtag_icache 0 -#define cpu_has_dc_aliases 0 -#define cpu_has_ic_fills_f_dc 0 - -#define cpu_has_dsp 0 -#define cpu_has_mipsmt 0 - -#define cpu_has_nofpuex 0 -#define cpu_has_64bits 0 - -#endif /* __ASM_MACH_QEMU_CPU_FEATURE_OVERRIDES_H */ diff --git a/include/asm-mips/mips-boards/generic.h b/include/asm-mips/mips-boards/generic.h index d58977483534..1c39d339521e 100644 --- a/include/asm-mips/mips-boards/generic.h +++ b/include/asm-mips/mips-boards/generic.h @@ -97,10 +97,16 @@ extern int mips_revision_corid; extern int mips_revision_sconid; +extern void mips_reboot_setup(void); + #ifdef CONFIG_PCI extern void mips_pcibios_init(void); #else #define mips_pcibios_init() do { } while (0) #endif +#ifdef CONFIG_KGDB +extern void kgdb_config(void); +#endif + #endif /* __ASM_MIPS_BOARDS_GENERIC_H */ diff --git a/include/asm-mips/mipsprom.h b/include/asm-mips/mipsprom.h index ce7cff7f1e8e..146d41b67adc 100644 --- a/include/asm-mips/mipsprom.h +++ b/include/asm-mips/mipsprom.h @@ -71,4 +71,6 @@ #define PROM_NV_GET 53 /* XXX */ #define PROM_NV_SET 54 /* XXX */ +extern char *prom_getenv(char *); + #endif /* __ASM_MIPS_PROM_H */ diff --git a/include/asm-mips/pmc-sierra/msp71xx/msp_regs.h b/include/asm-mips/pmc-sierra/msp71xx/msp_regs.h index 0b56f55206c6..603eb737b4a8 100644 --- a/include/asm-mips/pmc-sierra/msp71xx/msp_regs.h +++ b/include/asm-mips/pmc-sierra/msp71xx/msp_regs.h @@ -585,11 +585,7 @@ * UART defines * *************************************************************************** */ -#ifndef CONFIG_MSP_FPGA #define MSP_BASE_BAUD 25000000 -#else -#define MSP_BASE_BAUD 6000000 -#endif #define MSP_UART_REG_LEN 0x20 /* diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h index 2b8466ffd3ca..4c140db36786 100644 --- a/include/asm-mips/r4kcache.h +++ b/include/asm-mips/r4kcache.h @@ -403,6 +403,13 @@ __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) + /* build blast_xxx_range, protected_blast_xxx_range */ #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ diff --git a/include/asm-mips/sgi/ioc.h b/include/asm-mips/sgi/ioc.h index f3e3dc9bb732..343ed15f8dc4 100644 --- a/include/asm-mips/sgi/ioc.h +++ b/include/asm-mips/sgi/ioc.h @@ -138,8 +138,8 @@ struct sgioc_regs { u8 _sysid[3]; volatile u8 sysid; #define SGIOC_SYSID_FULLHOUSE 0x01 -#define SGIOC_SYSID_BOARDREV(x) ((x & 0xe0) > 5) -#define SGIOC_SYSID_CHIPREV(x) ((x & 0x1e) > 1) +#define SGIOC_SYSID_BOARDREV(x) (((x) & 0x1e) >> 1) +#define SGIOC_SYSID_CHIPREV(x) (((x) & 0xe0) >> 5) u32 _unused2; u8 _read[3]; volatile u8 read; diff --git a/include/asm-mips/sibyte/board.h b/include/asm-mips/sibyte/board.h index da198a1c8c81..25372ae0e814 100644 --- a/include/asm-mips/sibyte/board.h +++ b/include/asm-mips/sibyte/board.h @@ -19,10 +19,8 @@ #ifndef _SIBYTE_BOARD_H #define _SIBYTE_BOARD_H -#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_PTSWARM) || \ - defined(CONFIG_SIBYTE_PT1120) || defined(CONFIG_SIBYTE_PT1125) || \ - defined(CONFIG_SIBYTE_CRHONE) || defined(CONFIG_SIBYTE_CRHINE) || \ - defined(CONFIG_SIBYTE_LITTLESUR) +#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_CRHONE) || \ + defined(CONFIG_SIBYTE_CRHINE) || defined(CONFIG_SIBYTE_LITTLESUR) #include <asm/sibyte/swarm.h> #endif diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h index 0dad844a3b5b..80c1a052662a 100644 --- a/include/asm-mips/sibyte/sb1250.h +++ b/include/asm-mips/sibyte/sb1250.h @@ -48,12 +48,10 @@ extern unsigned int zbbus_mhz; extern void sb1250_time_init(void); extern void sb1250_mask_irq(int cpu, int irq); extern void sb1250_unmask_irq(int cpu, int irq); -extern void sb1250_smp_finish(void); extern void bcm1480_time_init(void); extern void bcm1480_mask_irq(int cpu, int irq); extern void bcm1480_unmask_irq(int cpu, int irq); -extern void bcm1480_smp_finish(void); #define AT_spin \ __asm__ __volatile__ ( \ diff --git a/include/asm-mips/sibyte/swarm.h b/include/asm-mips/sibyte/swarm.h index 540865fa7ec3..114d9d29ca9d 100644 --- a/include/asm-mips/sibyte/swarm.h +++ b/include/asm-mips/sibyte/swarm.h @@ -26,24 +26,6 @@ #define SIBYTE_HAVE_PCMCIA 1 #define SIBYTE_HAVE_IDE 1 #endif -#ifdef CONFIG_SIBYTE_PTSWARM -#define SIBYTE_BOARD_NAME "PTSWARM" -#define SIBYTE_HAVE_PCMCIA 1 -#define SIBYTE_HAVE_IDE 1 -#define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200" -#endif -#ifdef CONFIG_SIBYTE_PT1120 -#define SIBYTE_BOARD_NAME "PT1120" -#define SIBYTE_HAVE_PCMCIA 1 -#define SIBYTE_HAVE_IDE 1 -#define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200" -#endif -#ifdef CONFIG_SIBYTE_PT1125 -#define SIBYTE_BOARD_NAME "PT1125" -#define SIBYTE_HAVE_PCMCIA 1 -#define SIBYTE_HAVE_IDE 1 -#define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200" -#endif #ifdef CONFIG_SIBYTE_LITTLESUR #define SIBYTE_BOARD_NAME "BCM91250C2 (LittleSur)" #define SIBYTE_HAVE_PCMCIA 0 diff --git a/include/asm-mips/smp-ops.h b/include/asm-mips/smp-ops.h new file mode 100644 index 000000000000..b17fdfb5d818 --- /dev/null +++ b/include/asm-mips/smp-ops.h @@ -0,0 +1,56 @@ +/* + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) + * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2002 Ralf Baechle + * Copyright (C) 2000, 2001 Broadcom Corporation + */ +#ifndef __ASM_SMP_OPS_H +#define __ASM_SMP_OPS_H + +#ifdef CONFIG_SMP + +#include <linux/cpumask.h> + +struct plat_smp_ops { + void (*send_ipi_single)(int cpu, unsigned int action); + void (*send_ipi_mask)(cpumask_t mask, unsigned int action); + void (*init_secondary)(void); + void (*smp_finish)(void); + void (*cpus_done)(void); + void (*boot_secondary)(int cpu, struct task_struct *idle); + void (*smp_setup)(void); + void (*prepare_cpus)(unsigned int max_cpus); +}; + +extern void register_smp_ops(struct plat_smp_ops *ops); + +static inline void plat_smp_setup(void) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->smp_setup(); +} + +#else /* !CONFIG_SMP */ + +struct plat_smp_ops; + +static inline void plat_smp_setup(void) +{ + /* UP, nothing to do ... */ +} + +static inline void register_smp_ops(struct plat_smp_ops *ops) +{ +} + +#endif /* !CONFIG_SMP */ + +extern struct plat_smp_ops up_smp_ops; +extern struct plat_smp_ops vsmp_smp_ops; + +#endif /* __ASM_SMP_OPS_H */ diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index dc770025a9b0..84fef1aeec0c 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h @@ -11,14 +11,16 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H - -#ifdef CONFIG_SMP - #include <linux/bitops.h> #include <linux/linkage.h> #include <linux/threads.h> #include <linux/cpumask.h> + #include <asm/atomic.h> +#include <asm/smp-ops.h> + +extern int smp_num_siblings; +extern cpumask_t cpu_sibling_map[]; #define raw_smp_processor_id() (current_thread_info()->cpu) @@ -49,56 +51,6 @@ extern struct call_data_struct *call_data; extern cpumask_t phys_cpu_present_map; #define cpu_possible_map phys_cpu_present_map -/* - * These are defined by the board-specific code. - */ - -/* - * Cause the function described by call_data to be executed on the passed - * cpu. When the function has finished, increment the finished field of - * call_data. - */ -extern void core_send_ipi(int cpu, unsigned int action); - -static inline void core_send_ipi_mask(cpumask_t mask, unsigned int action) -{ - unsigned int i; - - for_each_cpu_mask(i, mask) - core_send_ipi(i, action); -} - - -/* - * Firmware CPU startup hook - */ -extern void prom_boot_secondary(int cpu, struct task_struct *idle); - -/* - * After we've done initial boot, this function is called to allow the - * board code to clean up state, if needed - */ -extern void prom_init_secondary(void); - -/* - * Populate cpu_possible_map before smp_init, called from setup_arch. - */ -extern void plat_smp_setup(void); - -/* - * Called in smp_prepare_cpus. - */ -extern void plat_prepare_cpus(unsigned int max_cpus); - -/* - * Last chance for the board code to finish SMP initialization before - * the CPU is "online". - */ -extern void prom_smp_finish(void); - -/* Hook for after all CPUs are online */ -extern void prom_cpus_done(void); - extern void asmlinkage smp_bootstrap(void); /* @@ -108,11 +60,11 @@ extern void asmlinkage smp_bootstrap(void); */ static inline void smp_send_reschedule(int cpu) { - core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); } extern asmlinkage void smp_call_function_interrupt(void); -#endif /* CONFIG_SMP */ - #endif /* __ASM_SMP_H */ diff --git a/include/asm-mips/sni.h b/include/asm-mips/sni.h index af081457f847..e716447e5e03 100644 --- a/include/asm-mips/sni.h +++ b/include/asm-mips/sni.h @@ -35,23 +35,23 @@ extern unsigned int sni_brd_type; #define SNI_CPU_M8050 0x0b #define SNI_CPU_M8053 0x0d -#define SNI_PORT_BASE 0xb4000000 +#define SNI_PORT_BASE CKSEG1ADDR(0xb4000000) #ifndef __MIPSEL__ /* * ASIC PCI registers for big endian configuration. */ -#define PCIMT_UCONF 0xbfff0004 -#define PCIMT_IOADTIMEOUT2 0xbfff000c -#define PCIMT_IOMEMCONF 0xbfff0014 -#define PCIMT_IOMMU 0xbfff001c -#define PCIMT_IOADTIMEOUT1 0xbfff0024 -#define PCIMT_DMAACCESS 0xbfff002c -#define PCIMT_DMAHIT 0xbfff0034 -#define PCIMT_ERRSTATUS 0xbfff003c -#define PCIMT_ERRADDR 0xbfff0044 -#define PCIMT_SYNDROME 0xbfff004c -#define PCIMT_ITPEND 0xbfff0054 +#define PCIMT_UCONF CKSEG1ADDR(0xbfff0004) +#define PCIMT_IOADTIMEOUT2 CKSEG1ADDR(0xbfff000c) +#define PCIMT_IOMEMCONF CKSEG1ADDR(0xbfff0014) +#define PCIMT_IOMMU CKSEG1ADDR(0xbfff001c) +#define PCIMT_IOADTIMEOUT1 CKSEG1ADDR(0xbfff0024) +#define PCIMT_DMAACCESS CKSEG1ADDR(0xbfff002c) +#define PCIMT_DMAHIT CKSEG1ADDR(0xbfff0034) +#define PCIMT_ERRSTATUS CKSEG1ADDR(0xbfff003c) +#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0044) +#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff004c) +#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0054) #define IT_INT2 0x01 #define IT_INTD 0x02 #define IT_INTC 0x04 @@ -60,32 +60,32 @@ extern unsigned int sni_brd_type; #define IT_EISA 0x20 #define IT_SCSI 0x40 #define IT_ETH 0x80 -#define PCIMT_IRQSEL 0xbfff005c -#define PCIMT_TESTMEM 0xbfff0064 -#define PCIMT_ECCREG 0xbfff006c -#define PCIMT_CONFIG_ADDRESS 0xbfff0074 -#define PCIMT_ASIC_ID 0xbfff007c /* read */ -#define PCIMT_SOFT_RESET 0xbfff007c /* write */ -#define PCIMT_PIA_OE 0xbfff0084 -#define PCIMT_PIA_DATAOUT 0xbfff008c -#define PCIMT_PIA_DATAIN 0xbfff0094 -#define PCIMT_CACHECONF 0xbfff009c -#define PCIMT_INVSPACE 0xbfff00a4 +#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff005c) +#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0064) +#define PCIMT_ECCREG CKSEG1ADDR(0xbfff006c) +#define PCIMT_CONFIG_ADDRESS CKSEG1ADDR(0xbfff0074) +#define PCIMT_ASIC_ID CKSEG1ADDR(0xbfff007c) /* read */ +#define PCIMT_SOFT_RESET CKSEG1ADDR(0xbfff007c) /* write */ +#define PCIMT_PIA_OE CKSEG1ADDR(0xbfff0084) +#define PCIMT_PIA_DATAOUT CKSEG1ADDR(0xbfff008c) +#define PCIMT_PIA_DATAIN CKSEG1ADDR(0xbfff0094) +#define PCIMT_CACHECONF CKSEG1ADDR(0xbfff009c) +#define PCIMT_INVSPACE CKSEG1ADDR(0xbfff00a4) #else /* * ASIC PCI registers for little endian configuration. */ -#define PCIMT_UCONF 0xbfff0000 -#define PCIMT_IOADTIMEOUT2 0xbfff0008 -#define PCIMT_IOMEMCONF 0xbfff0010 -#define PCIMT_IOMMU 0xbfff0018 -#define PCIMT_IOADTIMEOUT1 0xbfff0020 -#define PCIMT_DMAACCESS 0xbfff0028 -#define PCIMT_DMAHIT 0xbfff0030 -#define PCIMT_ERRSTATUS 0xbfff0038 -#define PCIMT_ERRADDR 0xbfff0040 -#define PCIMT_SYNDROME 0xbfff0048 -#define PCIMT_ITPEND 0xbfff0050 +#define PCIMT_UCONF CKSEG1ADDR(0xbfff0000) +#define PCIMT_IOADTIMEOUT2 CKSEG1ADDR(0xbfff0008) +#define PCIMT_IOMEMCONF CKSEG1ADDR(0xbfff0010) +#define PCIMT_IOMMU CKSEG1ADDR(0xbfff0018) +#define PCIMT_IOADTIMEOUT1 CKSEG1ADDR(0xbfff0020) +#define PCIMT_DMAACCESS CKSEG1ADDR(0xbfff0028) +#define PCIMT_DMAHIT CKSEG1ADDR(0xbfff0030) +#define PCIMT_ERRSTATUS CKSEG1ADDR(0xbfff0038) +#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0040) +#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff0048) +#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0050) #define IT_INT2 0x01 #define IT_INTD 0x02 #define IT_INTC 0x04 @@ -94,20 +94,20 @@ extern unsigned int sni_brd_type; #define IT_EISA 0x20 #define IT_SCSI 0x40 #define IT_ETH 0x80 -#define PCIMT_IRQSEL 0xbfff0058 -#define PCIMT_TESTMEM 0xbfff0060 -#define PCIMT_ECCREG 0xbfff0068 -#define PCIMT_CONFIG_ADDRESS 0xbfff0070 -#define PCIMT_ASIC_ID 0xbfff0078 /* read */ -#define PCIMT_SOFT_RESET 0xbfff0078 /* write */ -#define PCIMT_PIA_OE 0xbfff0080 -#define PCIMT_PIA_DATAOUT 0xbfff0088 -#define PCIMT_PIA_DATAIN 0xbfff0090 -#define PCIMT_CACHECONF 0xbfff0098 -#define PCIMT_INVSPACE 0xbfff00a0 +#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff0058) +#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0060) +#define PCIMT_ECCREG CKSEG1ADDR(0xbfff0068) +#define PCIMT_CONFIG_ADDRESS CKSEG1ADDR(0xbfff0070) +#define PCIMT_ASIC_ID CKSEG1ADDR(0xbfff0078) /* read */ +#define PCIMT_SOFT_RESET CKSEG1ADDR(0xbfff0078) /* write */ +#define PCIMT_PIA_OE CKSEG1ADDR(0xbfff0080) +#define PCIMT_PIA_DATAOUT CKSEG1ADDR(0xbfff0088) +#define PCIMT_PIA_DATAIN CKSEG1ADDR(0xbfff0090) +#define PCIMT_CACHECONF CKSEG1ADDR(0xbfff0098) +#define PCIMT_INVSPACE CKSEG1ADDR(0xbfff00a0) #endif -#define PCIMT_PCI_CONF 0xbfff0100 +#define PCIMT_PCI_CONF CKSEG1ADDR(0xbfff0100) /* * Data port for the PCI bus in IO space @@ -117,34 +117,34 @@ extern unsigned int sni_brd_type; /* * Board specific registers */ -#define PCIMT_CSMSR 0xbfd00000 -#define PCIMT_CSSWITCH 0xbfd10000 -#define PCIMT_CSITPEND 0xbfd20000 -#define PCIMT_AUTO_PO_EN 0xbfd30000 -#define PCIMT_CLR_TEMP 0xbfd40000 -#define PCIMT_AUTO_PO_DIS 0xbfd50000 -#define PCIMT_EXMSR 0xbfd60000 -#define PCIMT_UNUSED1 0xbfd70000 -#define PCIMT_CSWCSM 0xbfd80000 -#define PCIMT_UNUSED2 0xbfd90000 -#define PCIMT_CSLED 0xbfda0000 -#define PCIMT_CSMAPISA 0xbfdb0000 -#define PCIMT_CSRSTBP 0xbfdc0000 -#define PCIMT_CLRPOFF 0xbfdd0000 -#define PCIMT_CSTIMER 0xbfde0000 -#define PCIMT_PWDN 0xbfdf0000 +#define PCIMT_CSMSR CKSEG1ADDR(0xbfd00000) +#define PCIMT_CSSWITCH CKSEG1ADDR(0xbfd10000) +#define PCIMT_CSITPEND CKSEG1ADDR(0xbfd20000) +#define PCIMT_AUTO_PO_EN CKSEG1ADDR(0xbfd30000) +#define PCIMT_CLR_TEMP CKSEG1ADDR(0xbfd40000) +#define PCIMT_AUTO_PO_DIS CKSEG1ADDR(0xbfd50000) +#define PCIMT_EXMSR CKSEG1ADDR(0xbfd60000) +#define PCIMT_UNUSED1 CKSEG1ADDR(0xbfd70000) +#define PCIMT_CSWCSM CKSEG1ADDR(0xbfd80000) +#define PCIMT_UNUSED2 CKSEG1ADDR(0xbfd90000) +#define PCIMT_CSLED CKSEG1ADDR(0xbfda0000) +#define PCIMT_CSMAPISA CKSEG1ADDR(0xbfdb0000) +#define PCIMT_CSRSTBP CKSEG1ADDR(0xbfdc0000) +#define PCIMT_CLRPOFF CKSEG1ADDR(0xbfdd0000) +#define PCIMT_CSTIMER CKSEG1ADDR(0xbfde0000) +#define PCIMT_PWDN CKSEG1ADDR(0xbfdf0000) /* * A20R based boards */ -#define A20R_PT_CLOCK_BASE 0xbc040000 -#define A20R_PT_TIM0_ACK 0xbc050000 -#define A20R_PT_TIM1_ACK 0xbc060000 +#define A20R_PT_CLOCK_BASE CKSEG1ADDR(0xbc040000) +#define A20R_PT_TIM0_ACK CKSEG1ADDR(0xbc050000) +#define A20R_PT_TIM1_ACK CKSEG1ADDR(0xbc060000) #define SNI_A20R_IRQ_BASE MIPS_CPU_IRQ_BASE #define SNI_A20R_IRQ_TIMER (SNI_A20R_IRQ_BASE+5) -#define SNI_PCIT_INT_REG 0xbfff000c +#define SNI_PCIT_INT_REG CKSEG1ADDR(0xbfff000c) #define SNI_PCIT_INT_START 24 #define SNI_PCIT_INT_END 30 @@ -186,10 +186,30 @@ extern unsigned int sni_brd_type; /* * Base address for the mapped 16mb EISA bus segment. */ -#define PCIMT_EISA_BASE 0xb0000000 +#define PCIMT_EISA_BASE CKSEG1ADDR(0xb0000000) /* PCI EISA Interrupt acknowledge */ -#define PCIMT_INT_ACKNOWLEDGE 0xba000000 +#define PCIMT_INT_ACKNOWLEDGE CKSEG1ADDR(0xba000000) + +/* + * SNI ID PROM + * + * SNI_IDPROM_MEMSIZE Memsize in 16MB quantities + * SNI_IDPROM_BRDTYPE Board Type + * SNI_IDPROM_CPUTYPE CPU Type on RM400 + */ +#ifdef CONFIG_CPU_BIG_ENDIAN +#define __SNI_END 0 +#endif +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define __SNI_END 3 +#endif +#define SNI_IDPROM_BASE CKSEG1ADDR(0x1ff00000) +#define SNI_IDPROM_MEMSIZE (SNI_IDPROM_BASE + (0x28 ^ __SNI_END)) +#define SNI_IDPROM_BRDTYPE (SNI_IDPROM_BASE + (0x29 ^ __SNI_END)) +#define SNI_IDPROM_CPUTYPE (SNI_IDPROM_BASE + (0x30 ^ __SNI_END)) + +#define SNI_IDPROM_SIZE 0x1000 /* board specific init functions */ extern void sni_a20r_init(void); @@ -207,6 +227,9 @@ extern void sni_pcimt_irq_init(void); /* timer inits */ extern void sni_cpu_time_init(void); +/* eisa init for RM200/400 */ +extern int sni_eisa_root_init(void); + /* common irq stuff */ extern void (*sni_hwint)(void); extern struct irqaction sni_isa_irq; diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index fb41a8d76392..051e1af0bb95 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h @@ -6,6 +6,7 @@ * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_STACKFRAME_H #define _ASM_STACKFRAME_H @@ -145,8 +146,16 @@ .set reorder /* Called from user mode, new stack. */ get_saved_sp +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 8: move k0, sp PTR_SUBU sp, k1, PT_SIZE +#else + .set at=k0 +8: PTR_SUBU k1, PT_SIZE + .set noat + move k0, sp + move sp, k1 +#endif LONG_S k0, PT_R29(sp) LONG_S $3, PT_R3(sp) /* diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h index 7717934f94c3..a8fd16e1981f 100644 --- a/include/asm-mips/time.h +++ b/include/asm-mips/time.h @@ -31,20 +31,13 @@ extern int rtc_mips_set_time(unsigned long); extern int rtc_mips_set_mmss(unsigned long); /* - * Timer interrupt functions. - * mips_timer_state is needed for high precision timer calibration. - */ -extern int (*mips_timer_state)(void); - -/* * board specific routines required by time_init(). */ extern void plat_time_init(void); /* * mips_hpt_frequency - must be set if you intend to use an R4k-compatible - * counter as a timer interrupt source; otherwise it can be set up - * automagically with an aid of mips_timer_state. + * counter as a timer interrupt source. */ extern unsigned int mips_hpt_frequency; diff --git a/include/asm-mips/topology.h b/include/asm-mips/topology.h index 0440fb9f2180..259145e07e97 100644 --- a/include/asm-mips/topology.h +++ b/include/asm-mips/topology.h @@ -1 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 by Ralf Baechle + */ +#ifndef __ASM_TOPOLOGY_H +#define __ASM_TOPOLOGY_H + #include <topology.h> + +#ifdef CONFIG_SMP +#define smt_capable() (smp_num_siblings > 1) +#endif + +#endif /* __ASM_TOPOLOGY_H */ diff --git a/include/asm-mips/tx4927/tx4927_pci.h b/include/asm-mips/tx4927/tx4927_pci.h index 3f1e470192e3..0be77df70f2b 100644 --- a/include/asm-mips/tx4927/tx4927_pci.h +++ b/include/asm-mips/tx4927/tx4927_pci.h @@ -9,6 +9,7 @@ #define __ASM_TX4927_TX4927_PCI_H #define TX4927_CCFG_TOE 0x00004000 +#define TX4927_CCFG_WR 0x00008000 #define TX4927_CCFG_TINTDIS 0x01000000 #define TX4927_PCIMEM 0x08000000 diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h index c30c718994c9..66523d610950 100644 --- a/include/asm-mips/uaccess.h +++ b/include/asm-mips/uaccess.h @@ -5,6 +5,7 @@ * * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_UACCESS_H #define _ASM_UACCESS_H @@ -387,6 +388,12 @@ extern void __put_user_unknown(void); "jal\t" #destination "\n\t" #endif +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS +#define DADDI_SCRATCH "$0" +#else +#define DADDI_SCRATCH "$3" +#endif + extern size_t __copy_user(void *__to, const void *__from, size_t __n); #define __invoke_copy_to_user(to, from, n) \ @@ -403,7 +410,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ - "memory"); \ + DADDI_SCRATCH, "memory"); \ __cu_len_r; \ }) @@ -512,7 +519,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ - "memory"); \ + DADDI_SCRATCH, "memory"); \ __cu_len_r; \ }) @@ -535,7 +542,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ - "memory"); \ + DADDI_SCRATCH, "memory"); \ __cu_len_r; \ }) diff --git a/include/asm-mips/war.h b/include/asm-mips/war.h index d2808edfd4e9..22361d5e3bf0 100644 --- a/include/asm-mips/war.h +++ b/include/asm-mips/war.h @@ -4,6 +4,7 @@ * for more details. * * Copyright (C) 2002, 2004, 2007 by Ralf Baechle + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_WAR_H #define _ASM_WAR_H @@ -11,6 +12,67 @@ #include <war.h> /* + * Work around certain R4000 CPU errata (as implemented by GCC): + * + * - A double-word or a variable shift may give an incorrect result + * if executed immediately after starting an integer division: + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #28 + * "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0", erratum + * #19 + * + * - A double-word or a variable shift may give an incorrect result + * if executed while an integer multiplication is in progress: + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * errata #16 & #28 + * + * - An integer division may give an incorrect result if started in + * a delay slot of a taken branch or a jump: + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #52 + */ +#ifdef CONFIG_CPU_R4000_WORKAROUNDS +#define R4000_WAR 1 +#else +#define R4000_WAR 0 +#endif + +/* + * Work around certain R4400 CPU errata (as implemented by GCC): + * + * - A double-word or a variable shift may give an incorrect result + * if executed immediately after starting an integer division: + * "MIPS R4400MC Errata, Processor Revision 1.0", erratum #10 + * "MIPS R4400MC Errata, Processor Revision 2.0 & 3.0", erratum #4 + */ +#ifdef CONFIG_CPU_R4400_WORKAROUNDS +#define R4400_WAR 1 +#else +#define R4400_WAR 0 +#endif + +/* + * Work around the "daddi" and "daddiu" CPU errata: + * + * - The `daddi' instruction fails to trap on overflow. + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #23 + * + * - The `daddiu' instruction can produce an incorrect result. + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #41 + * "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0", erratum + * #15 + * "MIPS R4400PC/SC Errata, Processor Revision 1.0", erratum #7 + * "MIPS R4400MC Errata, Processor Revision 1.0", erratum #5 + */ +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS +#define DADDI_WAR 1 +#else +#define DADDI_WAR 0 +#endif + +/* * Another R4600 erratum. Due to the lack of errata information the exact * technical details aren't known. I've experimentally found that disabling * interrupts during indexed I-cache flushes seems to be sufficient to deal diff --git a/include/asm-parisc/agp.h b/include/asm-parisc/agp.h index 9f61d4eb6c01..9651660da639 100644 --- a/include/asm-parisc/agp.h +++ b/include/asm-parisc/agp.h @@ -9,7 +9,6 @@ #define map_page_into_agp(page) /* nothing */ #define unmap_page_from_agp(page) /* nothing */ -#define flush_agp_mappings() /* nothing */ #define flush_agp_cache() mb() /* Convert a physical address to an address suitable for the GART. */ diff --git a/include/asm-powerpc/agp.h b/include/asm-powerpc/agp.h index e5ccaca2f5a4..86455c4c31ee 100644 --- a/include/asm-powerpc/agp.h +++ b/include/asm-powerpc/agp.h @@ -6,7 +6,6 @@ #define map_page_into_agp(page) #define unmap_page_from_agp(page) -#define flush_agp_mappings() #define flush_agp_cache() mb() /* Convert a physical address to an address suitable for the GART. */ diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index 733b4af7f4f1..220d9a781ab9 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h @@ -359,6 +359,8 @@ static __inline__ int test_le_bit(unsigned long nr, unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +unsigned long generic_find_next_le_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); /* Bitmap functions for the ext2 filesystem */ #define ext2_set_bit(nr,addr) \ @@ -378,6 +380,8 @@ unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, #define ext2_find_next_zero_bit(addr, size, off) \ generic_find_next_zero_le_bit((unsigned long*)addr, size, off) +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)addr, size, off) /* Bitmap functions for the minix filesystem. */ #define minix_test_and_set_bit(nr,addr) \ diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h index fd7f5a430f0a..6d50310ecaea 100644 --- a/include/asm-powerpc/ide.h +++ b/include/asm-powerpc/ide.h @@ -42,9 +42,6 @@ struct ide_machdep_calls { extern struct ide_machdep_calls ppc_ide_md; -#undef SUPPORT_SLOW_DATA_PORTS -#define SUPPORT_SLOW_DATA_PORTS 0 - #define IDE_ARCH_OBSOLETE_DEFAULTS static __inline__ int ide_default_irq(unsigned long base) diff --git a/include/asm-powerpc/pasemi_dma.h b/include/asm-powerpc/pasemi_dma.h new file mode 100644 index 000000000000..b4526ff3a50d --- /dev/null +++ b/include/asm-powerpc/pasemi_dma.h @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2006 PA Semi, Inc + * + * Hardware register layout and descriptor formats for the on-board + * DMA engine on PA Semi PWRficient. Used by ethernet, function and security + * drivers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef ASM_PASEMI_DMA_H +#define ASM_PASEMI_DMA_H + +/* status register layout in IOB region, at 0xfb800000 */ +struct pasdma_status { + u64 rx_sta[64]; /* RX channel status */ + u64 tx_sta[20]; /* TX channel status */ +}; + + +/* All these registers live in the PCI configuration space for the DMA PCI + * device. Use the normal PCI config access functions for them. + */ +enum { + PAS_DMA_CAP_TXCH = 0x44, /* Transmit Channel Info */ + PAS_DMA_CAP_RXCH = 0x48, /* Transmit Channel Info */ + PAS_DMA_CAP_IFI = 0x4c, /* Interface Info */ + PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */ + PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */ + PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */ + PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */ +}; + + +#define PAS_DMA_CAP_TXCH_TCHN_M 0x00ff0000 /* # of TX channels */ +#define PAS_DMA_CAP_TXCH_TCHN_S 16 + +#define PAS_DMA_CAP_RXCH_RCHN_M 0x00ff0000 /* # of RX channels */ +#define PAS_DMA_CAP_RXCH_RCHN_S 16 + +#define PAS_DMA_CAP_IFI_IOFF_M 0xff000000 /* Cfg reg for intf pointers */ +#define PAS_DMA_CAP_IFI_IOFF_S 24 +#define PAS_DMA_CAP_IFI_NIN_M 0x00ff0000 /* # of interfaces */ +#define PAS_DMA_CAP_IFI_NIN_S 16 + +#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */ +#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */ +#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */ +#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */ + + +/* Per-interface and per-channel registers */ +#define _PAS_DMA_RXINT_STRIDE 0x20 +#define PAS_DMA_RXINT_RCMDSTA(i) (0x200+(i)*_PAS_DMA_RXINT_STRIDE) +#define PAS_DMA_RXINT_RCMDSTA_EN 0x00000001 +#define PAS_DMA_RXINT_RCMDSTA_ST 0x00000002 +#define PAS_DMA_RXINT_RCMDSTA_MBT 0x00000008 +#define PAS_DMA_RXINT_RCMDSTA_MDR 0x00000010 +#define PAS_DMA_RXINT_RCMDSTA_MOO 0x00000020 +#define PAS_DMA_RXINT_RCMDSTA_MBP 0x00000040 +#define PAS_DMA_RXINT_RCMDSTA_BT 0x00000800 +#define PAS_DMA_RXINT_RCMDSTA_DR 0x00001000 +#define PAS_DMA_RXINT_RCMDSTA_OO 0x00002000 +#define PAS_DMA_RXINT_RCMDSTA_BP 0x00004000 +#define PAS_DMA_RXINT_RCMDSTA_TB 0x00008000 +#define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000 +#define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000 +#define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17 +#define PAS_DMA_RXINT_CFG(i) (0x204+(i)*_PAS_DMA_RXINT_STRIDE) +#define PAS_DMA_RXINT_CFG_RBP 0x80000000 +#define PAS_DMA_RXINT_CFG_ITRR 0x40000000 +#define PAS_DMA_RXINT_CFG_DHL_M 0x07000000 +#define PAS_DMA_RXINT_CFG_DHL_S 24 +#define PAS_DMA_RXINT_CFG_DHL(x) (((x) << PAS_DMA_RXINT_CFG_DHL_S) & \ + PAS_DMA_RXINT_CFG_DHL_M) +#define PAS_DMA_RXINT_CFG_ITR 0x00400000 +#define PAS_DMA_RXINT_CFG_LW 0x00200000 +#define PAS_DMA_RXINT_CFG_L2 0x00100000 +#define PAS_DMA_RXINT_CFG_HEN 0x00080000 +#define PAS_DMA_RXINT_CFG_WIF 0x00000002 +#define PAS_DMA_RXINT_CFG_WIL 0x00000001 + +#define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE) +#define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff +#define PAS_DMA_RXINT_INCR_INCR_S 0 +#define PAS_DMA_RXINT_INCR_INCR(x) ((x) & 0x0000ffff) +#define PAS_DMA_RXINT_BASEL(i) (0x218+(i)*_PAS_DMA_RXINT_STRIDE) +#define PAS_DMA_RXINT_BASEL_BRBL(x) ((x) & ~0x3f) +#define PAS_DMA_RXINT_BASEU(i) (0x21c+(i)*_PAS_DMA_RXINT_STRIDE) +#define PAS_DMA_RXINT_BASEU_BRBH(x) ((x) & 0xfff) +#define PAS_DMA_RXINT_BASEU_SIZ_M 0x3fff0000 /* # of cache lines worth of buffer ring */ +#define PAS_DMA_RXINT_BASEU_SIZ_S 16 /* 0 = 16K */ +#define PAS_DMA_RXINT_BASEU_SIZ(x) (((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \ + PAS_DMA_RXINT_BASEU_SIZ_M) + + +#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */ +#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */ +#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */ +#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */ +#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */ +#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */ +#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */ +#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */ +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */ +#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */ +#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */ +#define PAS_DMA_TXCHAN_TCMDSTA_SZ 0x00000800 +#define PAS_DMA_TXCHAN_TCMDSTA_DB 0x00000400 +#define PAS_DMA_TXCHAN_TCMDSTA_DE 0x00000200 +#define PAS_DMA_TXCHAN_TCMDSTA_DA 0x00000100 +#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */ +#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c +#define PAS_DMA_TXCHAN_CFG_TATTR_S 2 +#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \ + PAS_DMA_TXCHAN_CFG_TATTR_M) +#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0 +#define PAS_DMA_TXCHAN_CFG_WT_S 6 +#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \ + PAS_DMA_TXCHAN_CFG_WT_M) +#define PAS_DMA_TXCHAN_CFG_TRD 0x00010000 /* translate data */ +#define PAS_DMA_TXCHAN_CFG_TRR 0x00008000 /* translate rings */ +#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */ +#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */ +#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */ +#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0 +#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0 +#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \ + PAS_DMA_TXCHAN_BASEL_BRBL_M) +#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE) +#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff +#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0 +#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \ + PAS_DMA_TXCHAN_BASEU_BRBH_M) +/* # of cache lines worth of buffer ring */ +#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000 +#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */ +#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \ + PAS_DMA_TXCHAN_BASEU_SIZ_M) + +#define _PAS_DMA_RXCHAN_STRIDE 0x20 /* Size per channel */ +#define _PAS_DMA_RXCHAN_CCMDSTA 0x800 /* Command / Status */ +#define _PAS_DMA_RXCHAN_CFG 0x804 /* Configuration */ +#define _PAS_DMA_RXCHAN_INCR 0x810 /* Descriptor increment */ +#define _PAS_DMA_RXCHAN_CNT 0x814 /* Descriptor count/offset */ +#define _PAS_DMA_RXCHAN_BASEL 0x818 /* Descriptor ring base (low) */ +#define _PAS_DMA_RXCHAN_BASEU 0x81c /* (high) */ +#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE) +#define PAS_DMA_RXCHAN_CCMDSTA_EN 0x00000001 /* Enabled */ +#define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */ +#define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */ +#define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000 +#define PAS_DMA_RXCHAN_CCMDSTA_OD 0x00002000 +#define PAS_DMA_RXCHAN_CCMDSTA_FD 0x00001000 +#define PAS_DMA_RXCHAN_CCMDSTA_DT 0x00000800 +#define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE) +#define PAS_DMA_RXCHAN_CFG_CTR 0x00000400 +#define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380 +#define PAS_DMA_RXCHAN_CFG_HBU_S 7 +#define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \ + PAS_DMA_RXCHAN_CFG_HBU_M) +#define PAS_DMA_RXCHAN_INCR(c) (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE) +#define PAS_DMA_RXCHAN_BASEL(c) (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE) +#define PAS_DMA_RXCHAN_BASEL_BRBL_M 0xffffffc0 +#define PAS_DMA_RXCHAN_BASEL_BRBL_S 0 +#define PAS_DMA_RXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \ + PAS_DMA_RXCHAN_BASEL_BRBL_M) +#define PAS_DMA_RXCHAN_BASEU(c) (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE) +#define PAS_DMA_RXCHAN_BASEU_BRBH_M 0x00000fff +#define PAS_DMA_RXCHAN_BASEU_BRBH_S 0 +#define PAS_DMA_RXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \ + PAS_DMA_RXCHAN_BASEU_BRBH_M) +/* # of cache lines worth of buffer ring */ +#define PAS_DMA_RXCHAN_BASEU_SIZ_M 0x3fff0000 +#define PAS_DMA_RXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */ +#define PAS_DMA_RXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \ + PAS_DMA_RXCHAN_BASEU_SIZ_M) + +#define PAS_STATUS_PCNT_M 0x000000000000ffffull +#define PAS_STATUS_PCNT_S 0 +#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull +#define PAS_STATUS_DCNT_S 16 +#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull +#define PAS_STATUS_BPCNT_S 32 +#define PAS_STATUS_CAUSE_M 0xf000000000000000ull +#define PAS_STATUS_TIMER 0x1000000000000000ull +#define PAS_STATUS_ERROR 0x2000000000000000ull +#define PAS_STATUS_SOFT 0x4000000000000000ull +#define PAS_STATUS_INT 0x8000000000000000ull + +#define PAS_IOB_COM_PKTHDRCNT 0x120 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_M 0x0fff0000 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_S 16 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_M 0x00000fff +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_S 0 + +#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4) +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \ + PAS_IOB_DMA_RXCH_CFG_CNTTH_M) +#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4) +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \ + PAS_IOB_DMA_TXCH_CFG_CNTTH_M) +#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4) +#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\ + PAS_IOB_DMA_RXCH_STAT_CNTDEL_M) +#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4) +#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\ + PAS_IOB_DMA_TXCH_STAT_CNTDEL_M) +#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4) +#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16 +#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \ + PAS_IOB_DMA_RXCH_RESET_PCNT_M) +#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020 +#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010 +#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008 +#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004 +#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002 +#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001 +#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4) +#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16 +#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \ + PAS_IOB_DMA_TXCH_RESET_PCNT_M) +#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020 +#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010 +#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008 +#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004 +#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002 +#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001 + +#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \ + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M) + +/* Transmit descriptor fields */ +#define XCT_MACTX_T 0x8000000000000000ull +#define XCT_MACTX_ST 0x4000000000000000ull +#define XCT_MACTX_NORES 0x0000000000000000ull +#define XCT_MACTX_8BRES 0x1000000000000000ull +#define XCT_MACTX_24BRES 0x2000000000000000ull +#define XCT_MACTX_40BRES 0x3000000000000000ull +#define XCT_MACTX_I 0x0800000000000000ull +#define XCT_MACTX_O 0x0400000000000000ull +#define XCT_MACTX_E 0x0200000000000000ull +#define XCT_MACTX_VLAN_M 0x0180000000000000ull +#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull +#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull +#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull +#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull +#define XCT_MACTX_CRC_M 0x0060000000000000ull +#define XCT_MACTX_CRC_NOP 0x0000000000000000ull +#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull +#define XCT_MACTX_CRC_PAD 0x0040000000000000ull +#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull +#define XCT_MACTX_SS 0x0010000000000000ull +#define XCT_MACTX_LLEN_M 0x00007fff00000000ull +#define XCT_MACTX_LLEN_S 32ull +#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \ + XCT_MACTX_LLEN_M) +#define XCT_MACTX_IPH_M 0x00000000f8000000ull +#define XCT_MACTX_IPH_S 27ull +#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \ + XCT_MACTX_IPH_M) +#define XCT_MACTX_IPO_M 0x0000000007c00000ull +#define XCT_MACTX_IPO_S 22ull +#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \ + XCT_MACTX_IPO_M) +#define XCT_MACTX_CSUM_M 0x0000000000000060ull +#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull +#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull +#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull +#define XCT_MACTX_V6 0x0000000000000010ull +#define XCT_MACTX_C 0x0000000000000004ull +#define XCT_MACTX_AL2 0x0000000000000002ull + +/* Receive descriptor fields */ +#define XCT_MACRX_T 0x8000000000000000ull +#define XCT_MACRX_ST 0x4000000000000000ull +#define XCT_MACRX_RR_M 0x3000000000000000ull +#define XCT_MACRX_RR_NORES 0x0000000000000000ull +#define XCT_MACRX_RR_8BRES 0x1000000000000000ull +#define XCT_MACRX_O 0x0400000000000000ull +#define XCT_MACRX_E 0x0200000000000000ull +#define XCT_MACRX_FF 0x0100000000000000ull +#define XCT_MACRX_PF 0x0080000000000000ull +#define XCT_MACRX_OB 0x0040000000000000ull +#define XCT_MACRX_OD 0x0020000000000000ull +#define XCT_MACRX_FS 0x0010000000000000ull +#define XCT_MACRX_NB_M 0x000fc00000000000ull +#define XCT_MACRX_NB_S 46ULL +#define XCT_MACRX_NB(x) ((((long)(x)) << XCT_MACRX_NB_S) & \ + XCT_MACRX_NB_M) +#define XCT_MACRX_LLEN_M 0x00003fff00000000ull +#define XCT_MACRX_LLEN_S 32ULL +#define XCT_MACRX_LLEN(x) ((((long)(x)) << XCT_MACRX_LLEN_S) & \ + XCT_MACRX_LLEN_M) +#define XCT_MACRX_CRC 0x0000000080000000ull +#define XCT_MACRX_LEN_M 0x0000000060000000ull +#define XCT_MACRX_LEN_TOOSHORT 0x0000000020000000ull +#define XCT_MACRX_LEN_BELOWMIN 0x0000000040000000ull +#define XCT_MACRX_LEN_TRUNC 0x0000000060000000ull +#define XCT_MACRX_CAST_M 0x0000000018000000ull +#define XCT_MACRX_CAST_UNI 0x0000000000000000ull +#define XCT_MACRX_CAST_MULTI 0x0000000008000000ull +#define XCT_MACRX_CAST_BROAD 0x0000000010000000ull +#define XCT_MACRX_CAST_PAUSE 0x0000000018000000ull +#define XCT_MACRX_VLC_M 0x0000000006000000ull +#define XCT_MACRX_FM 0x0000000001000000ull +#define XCT_MACRX_HTY_M 0x0000000000c00000ull +#define XCT_MACRX_HTY_IPV4_OK 0x0000000000000000ull +#define XCT_MACRX_HTY_IPV6 0x0000000000400000ull +#define XCT_MACRX_HTY_IPV4_BAD 0x0000000000800000ull +#define XCT_MACRX_HTY_NONIP 0x0000000000c00000ull +#define XCT_MACRX_IPP_M 0x00000000003f0000ull +#define XCT_MACRX_IPP_S 16 +#define XCT_MACRX_CSUM_M 0x000000000000ffffull +#define XCT_MACRX_CSUM_S 0 + +#define XCT_PTR_T 0x8000000000000000ull +#define XCT_PTR_LEN_M 0x7ffff00000000000ull +#define XCT_PTR_LEN_S 44 +#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \ + XCT_PTR_LEN_M) +#define XCT_PTR_ADDR_M 0x00000fffffffffffull +#define XCT_PTR_ADDR_S 0 +#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \ + XCT_PTR_ADDR_M) + +/* Receive interface 8byte result fields */ +#define XCT_RXRES_8B_L4O_M 0xff00000000000000ull +#define XCT_RXRES_8B_L4O_S 56 +#define XCT_RXRES_8B_RULE_M 0x00ffff0000000000ull +#define XCT_RXRES_8B_RULE_S 40 +#define XCT_RXRES_8B_EVAL_M 0x000000ffff000000ull +#define XCT_RXRES_8B_EVAL_S 24 +#define XCT_RXRES_8B_HTYPE_M 0x0000000000f00000ull +#define XCT_RXRES_8B_HASH_M 0x00000000000fffffull +#define XCT_RXRES_8B_HASH_S 0 + +/* Receive interface buffer fields */ +#define XCT_RXB_LEN_M 0x0ffff00000000000ull +#define XCT_RXB_LEN_S 44 +#define XCT_RXB_LEN(x) ((((long)(x)) << XCT_RXB_LEN_S) & \ + XCT_RXB_LEN_M) +#define XCT_RXB_ADDR_M 0x00000fffffffffffull +#define XCT_RXB_ADDR_S 0 +#define XCT_RXB_ADDR(x) ((((long)(x)) << XCT_RXB_ADDR_S) & \ + XCT_RXB_ADDR_M) + +/* Copy descriptor fields */ +#define XCT_COPY_T 0x8000000000000000ull +#define XCT_COPY_ST 0x4000000000000000ull +#define XCT_COPY_RR_M 0x3000000000000000ull +#define XCT_COPY_RR_NORES 0x0000000000000000ull +#define XCT_COPY_RR_8BRES 0x1000000000000000ull +#define XCT_COPY_RR_24BRES 0x2000000000000000ull +#define XCT_COPY_RR_40BRES 0x3000000000000000ull +#define XCT_COPY_I 0x0800000000000000ull +#define XCT_COPY_O 0x0400000000000000ull +#define XCT_COPY_E 0x0200000000000000ull +#define XCT_COPY_STY_ZERO 0x01c0000000000000ull +#define XCT_COPY_DTY_PREF 0x0038000000000000ull +#define XCT_COPY_LLEN_M 0x0007ffff00000000ull +#define XCT_COPY_LLEN_S 32 +#define XCT_COPY_LLEN(x) ((((long)(x)) << XCT_COPY_LLEN_S) & \ + XCT_COPY_LLEN_M) +#define XCT_COPY_SE 0x0000000000000001ull + +/* Control descriptor fields */ +#define CTRL_CMD_T 0x8000000000000000ull +#define CTRL_CMD_META_EVT 0x2000000000000000ull +#define CTRL_CMD_O 0x0400000000000000ull +#define CTRL_CMD_REG_M 0x000000000000000full +#define CTRL_CMD_REG_S 0 +#define CTRL_CMD_REG(x) ((((long)(x)) << CTRL_CMD_REG_S) & \ + CTRL_CMD_REG_M) + + + +/* Prototypes for the shared DMA functions in the platform code. */ + +/* DMA TX Channel type. Right now only limitations used are event types 0/1, + * for event-triggered DMA transactions. + */ + +enum pasemi_dmachan_type { + RXCHAN = 0, /* Any RX chan */ + TXCHAN = 1, /* Any TX chan */ + TXCHAN_EVT0 = 0x1001, /* TX chan in event class 0 (chan 0-9) */ + TXCHAN_EVT1 = 0x2001, /* TX chan in event class 1 (chan 10-19) */ +}; + +struct pasemi_dmachan { + int chno; /* Channel number */ + enum pasemi_dmachan_type chan_type; /* TX / RX */ + u64 *status; /* Ptr to cacheable status */ + int irq; /* IRQ used by channel */ + unsigned int ring_size; /* size of allocated ring */ + dma_addr_t ring_dma; /* DMA address for ring */ + u64 *ring_virt; /* Virt address for ring */ + void *priv; /* Ptr to start of client struct */ +}; + +/* Read/write the different registers in the I/O Bridge, Ethernet + * and DMA Controller + */ +extern unsigned int pasemi_read_iob_reg(unsigned int reg); +extern void pasemi_write_iob_reg(unsigned int reg, unsigned int val); + +extern unsigned int pasemi_read_mac_reg(int intf, unsigned int reg); +extern void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val); + +extern unsigned int pasemi_read_dma_reg(unsigned int reg); +extern void pasemi_write_dma_reg(unsigned int reg, unsigned int val); + +/* Channel management routines */ + +extern void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type, + int total_size, int offset); +extern void pasemi_dma_free_chan(struct pasemi_dmachan *chan); + +extern void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, + const u32 cmdsta); +extern int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan); + +/* Common routines to allocate rings and buffers */ + +extern int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size); +extern void pasemi_dma_free_ring(struct pasemi_dmachan *chan); + +extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, + dma_addr_t *handle); +extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, + dma_addr_t *handle); + +/* Initialize the library, must be called before any other functions */ +extern int pasemi_dma_init(void); + +#endif /* ASM_PASEMI_DMA_H */ diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index 6b229626d3ff..cc1cbf656b02 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -16,15 +16,6 @@ #define __my_cpu_offset() get_paca()->data_offset #define per_cpu_offset(x) (__per_cpu_offset(x)) -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) @@ -43,11 +34,6 @@ extern void setup_per_cpu_areas(void); #else /* ! SMP */ -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define __raw_get_cpu_var(var) per_cpu__##var @@ -56,9 +42,6 @@ extern void setup_per_cpu_areas(void); #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #else #include <asm-generic/percpu.h> #endif diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h index c662287efd8f..ffc150f602b8 100644 --- a/include/asm-powerpc/ptrace.h +++ b/include/asm-powerpc/ptrace.h @@ -120,6 +120,13 @@ do { \ } while (0) #endif /* __powerpc64__ */ +/* + * These are defined as per linux/ptrace.h, which see. + */ +#define arch_has_single_step() (1) +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-s390/airq.h b/include/asm-s390/airq.h new file mode 100644 index 000000000000..41d028cb52a4 --- /dev/null +++ b/include/asm-s390/airq.h @@ -0,0 +1,19 @@ +/* + * include/asm-s390/airq.h + * + * Copyright IBM Corp. 2002,2007 + * Author(s): Ingo Adlung <adlung@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> + * Arnd Bergmann <arndb@de.ibm.com> + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#ifndef _ASM_S390_AIRQ_H +#define _ASM_S390_AIRQ_H + +typedef void (*adapter_int_handler_t)(void *, void *); + +void *s390_register_adapter_interrupt(adapter_int_handler_t, void *); +void s390_unregister_adapter_interrupt(void *); + +#endif /* _ASM_S390_AIRQ_H */ diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 34d9a6357c38..dba6fecad0be 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -772,6 +772,8 @@ static inline int sched_find_first_bit(unsigned long *b) test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_test_bit(nr, addr) \ test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) +#define ext2_find_next_bit(addr, size, off) \ + generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) #ifndef __s390x__ diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index 2f08c16e44ad..123b557c3ff4 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h @@ -24,8 +24,8 @@ * @fmt: format * @pfch: prefetch * @isic: initial-status interruption control - * @alcc: adress-limit checking control - * @ssi: supress-suspended interruption + * @alcc: address-limit checking control + * @ssi: suppress-suspended interruption * @zcc: zero condition code * @ectl: extended control * @pno: path not operational diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h index 604f68fa6f56..3f002e13d024 100644 --- a/include/asm-s390/dasd.h +++ b/include/asm-s390/dasd.h @@ -105,7 +105,7 @@ typedef struct dasd_information_t { } dasd_information_t; /* - * Read Subsystem Data - Perfomance Statistics + * Read Subsystem Data - Performance Statistics */ typedef struct dasd_rssd_perf_stats_t { unsigned char invalid:1; diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index 2c40fd3a137f..c1b2e50392bb 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h @@ -83,6 +83,8 @@ extern u32 dump_prefix_page; extern unsigned int zfcpdump_prefix_array[]; extern void do_reipl(void); +extern void do_halt(void); +extern void do_poff(void); extern void ipl_save_parameters(void); enum { @@ -118,7 +120,7 @@ struct ipl_info }; extern struct ipl_info ipl_info; -extern void setup_ipl_info(void); +extern void setup_ipl(void); /* * DIAG 308 support @@ -141,6 +143,10 @@ enum diag308_opt { DIAG308_IPL_OPT_DUMP = 0x20, }; +enum diag308_flags { + DIAG308_FLAGS_LP_VALID = 0x80, +}; + enum diag308_rc { DIAG308_RC_OK = 1, }; diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h index 05b842126b99..a77d4ba3c8eb 100644 --- a/include/asm-s390/mmu_context.h +++ b/include/asm-s390/mmu_context.h @@ -12,10 +12,15 @@ #include <asm/pgalloc.h> #include <asm-generic/mm_hooks.h> -/* - * get a new mmu context.. S390 don't know about contexts. - */ -#define init_new_context(tsk,mm) 0 +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; +#ifdef CONFIG_64BIT + mm->context |= _ASCE_TYPE_REGION3; +#endif + return 0; +} #define destroy_context(mm) do { } while (0) @@ -27,19 +32,11 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) { - pgd_t *pgd = mm->pgd; - unsigned long asce_bits; - - /* Calculate asce bits from the first pgd table entry. */ - asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; -#ifdef CONFIG_64BIT - asce_bits |= _ASCE_TYPE_REGION3; -#endif - S390_lowcore.user_asce = asce_bits | __pa(pgd); + S390_lowcore.user_asce = mm->context | __pa(mm->pgd); if (switch_amode) { /* Load primary space page table origin. */ - pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd; - S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd); + pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd; + S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd); asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_exec_asce) ); } else diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index 545857e64443..2d676a873858 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h @@ -4,8 +4,6 @@ #include <linux/compiler.h> #include <asm/lowcore.h> -#define __GENERIC_PER_CPU - /* * s390 uses its own implementation for per cpu data, the offset of * the cpu local data area is cached in the cpu's lowcore memory. @@ -36,16 +34,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) \ - __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) @@ -62,11 +50,6 @@ do { \ #else /* ! SMP */ -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - #define __get_cpu_var(var) __reloc_hide(var,0) #define __raw_get_cpu_var(var) __reloc_hide(var,0) #define per_cpu(var,cpu) __reloc_hide(var,0) @@ -75,7 +58,4 @@ do { \ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #endif /* __ARCH_S390_PERCPU__ */ diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 1f530f8a6280..79b9eab1a0c7 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -104,41 +104,27 @@ extern char empty_zero_page[PAGE_SIZE]; #ifndef __ASSEMBLY__ /* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - * vmalloc area starts at 4GB to prevent syscall table entry exchanging - * from modules. - */ -extern unsigned long vmalloc_end; - -#ifdef CONFIG_64BIT -#define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory)) -#else -#define VMALLOC_ADDR ((unsigned long) high_memory) -#endif -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_END vmalloc_end - -/* - * We need some free virtual space to be able to do vmalloc. - * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc - * area. On a machine with 2GB memory we make sure that we - * have at least 128MB free space for vmalloc. On a machine - * with 4TB we make sure we have at least 128GB. + * The vmalloc area will always be on the topmost area of the kernel + * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, + * which should be enough for any sane case. + * By putting vmalloc at the top, we maximise the gap between physical + * memory and vmalloc to catch misplaced memory accesses. As a side + * effect, this also makes sure that 64 bit module code cannot be used + * as system call address. */ #ifndef __s390x__ -#define VMALLOC_MIN_SIZE 0x8000000UL -#define VMALLOC_END_INIT 0x80000000UL +#define VMALLOC_START 0x78000000UL +#define VMALLOC_END 0x7e000000UL +#define VMEM_MAP_MAX 0x80000000UL #else /* __s390x__ */ -#define VMALLOC_MIN_SIZE 0x2000000000UL -#define VMALLOC_END_INIT 0x40000000000UL +#define VMALLOC_START 0x3e000000000UL +#define VMALLOC_END 0x3e040000000UL +#define VMEM_MAP_MAX 0x40000000000UL #endif /* __s390x__ */ +#define VMEM_MAP ((struct page *) VMALLOC_END) +#define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page)) + /* * A 31 bit pagetable entry of S390 has following format: * | PFRA | | OS | diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 21d40a19355e..c86b982aef5a 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -59,9 +59,6 @@ extern void s390_adjust_jiffies(void); extern void print_cpu_info(struct cpuinfo_S390 *); extern int get_cpu_capability(unsigned int *); -/* Lazy FPU handling on uni-processor */ -extern struct task_struct *last_task_used_math; - /* * User space process size: 2GB for 31 bit, 4TB for 64 bit. */ @@ -95,7 +92,6 @@ struct thread_struct { unsigned long ksp; /* kernel stack pointer */ mm_segment_t mm_segment; unsigned long prot_addr; /* address of protection-excep. */ - unsigned int error_code; /* error-code of last prog-excep. */ unsigned int trap_no; per_struct per_info; /* Used to give failing instruction back to user for ieee exceptions */ diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index 332ee73688fc..61f6952f2e35 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h @@ -465,6 +465,14 @@ struct user_regs_struct #ifdef __KERNEL__ #define __ARCH_SYS_PTRACE 1 +/* + * These are defined as per linux/ptrace.h, which see. + */ +#define arch_has_single_step() (1) +struct task_struct; +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); + #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) #define regs_return_value(regs)((regs)->gprs[2]) diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h index 74db1dc10a7d..4b8ff55f680e 100644 --- a/include/asm-s390/qdio.h +++ b/include/asm-s390/qdio.h @@ -184,7 +184,7 @@ struct qdr { #endif /* QDIO_32_BIT */ unsigned long qiba; /* queue-information-block address */ unsigned int res8; /* reserved */ - unsigned int qkey : 4; /* queue-informatio-block key */ + unsigned int qkey : 4; /* queue-information-block key */ unsigned int res9 : 28; /* reserved */ /* union _qd {*/ /* why this? */ struct qdesfmt0 qdf0[126]; diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h index 90f4eccaa290..9d2a17971805 100644 --- a/include/asm-s390/rwsem.h +++ b/include/asm-s390/rwsem.h @@ -91,8 +91,8 @@ struct rw_semaphore { #endif #define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h index cb9faf1ea5cf..b5f2843013a3 100644 --- a/include/asm-s390/sclp.h +++ b/include/asm-s390/sclp.h @@ -27,7 +27,25 @@ struct sclp_ipl_info { char loadparm[LOADPARM_LEN]; }; -void sclp_readinfo_early(void); +struct sclp_cpu_entry { + u8 address; + u8 reserved0[13]; + u8 type; + u8 reserved1; +} __attribute__((packed)); + +struct sclp_cpu_info { + unsigned int configured; + unsigned int standby; + unsigned int combined; + int has_cpu_type; + struct sclp_cpu_entry cpu[255]; +}; + +int sclp_get_cpu_info(struct sclp_cpu_info *info); +int sclp_cpu_configure(u8 cpu); +int sclp_cpu_deconfigure(u8 cpu); +void sclp_read_info_early(void); void sclp_facilities_detect(void); unsigned long long sclp_memory_detect(void); int sclp_sdias_blk_count(void); diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 07708c07701e..c7b74326a527 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -35,8 +35,6 @@ extern void machine_restart_smp(char *); extern void machine_halt_smp(void); extern void machine_power_off_smp(void); -extern void smp_setup_cpu_possible_map(void); - #define NO_PROC_ID 0xFF /* No processor magic marker */ /* @@ -92,6 +90,8 @@ extern void __cpu_die (unsigned int cpu); extern void cpu_die (void) __attribute__ ((noreturn)); extern int __cpu_up (unsigned int cpu); +extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), + void *info, int wait); #endif #ifndef CONFIG_SMP @@ -103,7 +103,6 @@ static inline void smp_send_stop(void) #define hard_smp_processor_id() 0 #define smp_cpu_not_running(cpu) 1 -#define smp_setup_cpu_possible_map() do { } while (0) #endif extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 3fd43826fd0b..df84ae96915f 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h @@ -53,44 +53,48 @@ _raw_compare_and_swap(volatile unsigned int *lock, */ #define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) \ _raw_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc); -extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc); +extern void _raw_spin_lock_wait(raw_spinlock_t *); +extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); +extern int _raw_spin_trylock_retry(raw_spinlock_t *); extern void _raw_spin_relax(raw_spinlock_t *lock); static inline void __raw_spin_lock(raw_spinlock_t *lp) { - unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) { - lp->owner_pc = pc; + if (likely(old == 0)) return; - } - _raw_spin_lock_wait(lp, pc); + _raw_spin_lock_wait(lp); +} + +static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, + unsigned long flags) +{ + int old; + + old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); + if (likely(old == 0)) + return; + _raw_spin_lock_wait_flags(lp, flags); } static inline int __raw_spin_trylock(raw_spinlock_t *lp) { - unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) { - lp->owner_pc = pc; + if (likely(old == 0)) return 1; - } - return _raw_spin_trylock_retry(lp, pc); + return _raw_spin_trylock_retry(lp); } static inline void __raw_spin_unlock(raw_spinlock_t *lp) { - lp->owner_pc = 0; _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h index b7ac13f7aa37..654abc40de04 100644 --- a/include/asm-s390/spinlock_types.h +++ b/include/asm-s390/spinlock_types.h @@ -7,7 +7,6 @@ typedef struct { volatile unsigned int owner_cpu; - volatile unsigned int owner_pc; } __attribute__ ((aligned (4))) raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index a69bd2490d52..70fa5ae58180 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h @@ -42,11 +42,11 @@ static inline void __tlb_flush_global(void) /* * Flush all tlb entries of a page table on all cpus. */ -static inline void __tlb_flush_idte(pgd_t *pgd) +static inline void __tlb_flush_idte(unsigned long asce) { asm volatile( " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" ); + : : "a" (2048), "a" (asce) : "cc" ); } static inline void __tlb_flush_mm(struct mm_struct * mm) @@ -61,11 +61,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) * only ran on the local cpu. */ if (MACHINE_HAS_IDTE) { - pgd_t *shadow_pgd = get_shadow_table(mm->pgd); + pgd_t *shadow = get_shadow_table(mm->pgd); - if (shadow_pgd) - __tlb_flush_idte(shadow_pgd); - __tlb_flush_idte(mm->pgd); + if (shadow) + __tlb_flush_idte((unsigned long) shadow | mm->context); + __tlb_flush_idte((unsigned long) mm->pgd | mm->context); return; } preempt_disable(); @@ -106,9 +106,23 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm) */ #define flush_tlb() do { } while (0) #define flush_tlb_all() do { } while (0) -#define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm) #define flush_tlb_page(vma, addr) do { } while (0) -#define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm) -#define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm) + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + __tlb_flush_mm_cond(mm); +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + __tlb_flush_mm_cond(vma->vm_mm); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + __tlb_flush_mm(&init_mm); +} #endif /* _S390_TLBFLUSH_H */ diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h index a5dada617751..f228f1b86877 100644 --- a/include/asm-s390/zcrypt.h +++ b/include/asm-s390/zcrypt.h @@ -117,7 +117,7 @@ struct CPRBX { unsigned char padx004[16 - sizeof (char *)]; unsigned char * req_extb; /* request extension block 'addr'*/ unsigned char padx005[16 - sizeof (char *)]; - unsigned char * rpl_extb; /* reply extension block 'addres'*/ + unsigned char * rpl_extb; /* reply extension block 'address'*/ unsigned short ccp_rtcode; /* server return code */ unsigned short ccp_rscode; /* server reason code */ unsigned int mac_data_len; /* Mac Data Length */ diff --git a/include/asm-sh/Kbuild b/include/asm-sh/Kbuild index 76a8ccf254a5..43910cdf78a5 100644 --- a/include/asm-sh/Kbuild +++ b/include/asm-sh/Kbuild @@ -1,3 +1,8 @@ include include/asm-generic/Kbuild.asm header-y += cpu-features.h + +unifdef-y += unistd_32.h +unifdef-y += unistd_64.h +unifdef-y += posix_types_32.h +unifdef-y += posix_types_64.h diff --git a/include/asm-sh/addrspace.h b/include/asm-sh/addrspace.h index b860218e402e..fa544fc38c23 100644 --- a/include/asm-sh/addrspace.h +++ b/include/asm-sh/addrspace.h @@ -9,24 +9,21 @@ */ #ifndef __ASM_SH_ADDRSPACE_H #define __ASM_SH_ADDRSPACE_H + #ifdef __KERNEL__ #include <asm/cpu/addrspace.h> -/* Memory segments (32bit Privileged mode addresses) */ -#ifndef CONFIG_CPU_SH2A -#define P0SEG 0x00000000 -#define P1SEG 0x80000000 -#define P2SEG 0xa0000000 -#define P3SEG 0xc0000000 -#define P4SEG 0xe0000000 -#else -#define P0SEG 0x00000000 -#define P1SEG 0x00000000 -#define P2SEG 0x20000000 -#define P3SEG 0x00000000 -#define P4SEG 0x80000000 -#endif +/* If this CPU supports segmentation, hook up the helpers */ +#ifdef P1SEG + +/* + [ P0/U0 (virtual) ] 0x00000000 <------ User space + [ P1 (fixed) cached ] 0x80000000 <------ Kernel space + [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access + [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area + [ P4 control ] 0xE0000000 + */ /* Returns the privileged segment base of a given address */ #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) @@ -34,13 +31,23 @@ /* Returns the physical address of a PnSEG (n=1,2) address */ #define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) +#ifdef CONFIG_29BIT /* * Map an address to a certain privileged segment */ -#define P1SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P1SEG)) -#define P2SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P2SEG)) -#define P3SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) -#define P4SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) +#define P1SEGADDR(a) \ + ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P1SEG)) +#define P2SEGADDR(a) \ + ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P2SEG)) +#define P3SEGADDR(a) \ + ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) +#define P4SEGADDR(a) \ + ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) +#endif /* 29BIT */ +#endif /* P1SEG */ + +/* Check if an address can be reached in 29 bits */ +#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000) #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/include/asm-sh/atomic-grb.h b/include/asm-sh/atomic-grb.h new file mode 100644 index 000000000000..4c5b7dbfcedb --- /dev/null +++ b/include/asm-sh/atomic-grb.h @@ -0,0 +1,169 @@ +#ifndef __ASM_SH_ATOMIC_GRB_H +#define __ASM_SH_ATOMIC_GRB_H + +static inline void atomic_add(int i, atomic_t *v) +{ + int tmp; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " add %2, %0 \n\t" /* add */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (v) + : "r" (i) + : "memory" , "r0", "r1"); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + int tmp; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " sub %2, %0 \n\t" /* sub */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (v) + : "r" (i) + : "memory" , "r0", "r1"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + int tmp; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " add %2, %0 \n\t" /* add */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (v) + : "r" (i) + : "memory" , "r0", "r1"); + + return tmp; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + int tmp; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " sub %2, %0 \n\t" /* sub */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (v) + : "r" (i) + : "memory", "r0", "r1"); + + return tmp; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + int tmp; + unsigned int _mask = ~mask; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " and %2, %0 \n\t" /* add */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (v) + : "r" (_mask) + : "memory" , "r0", "r1"); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + int tmp; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " or %2, %0 \n\t" /* or */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (v) + : "r" (mask) + : "memory" , "r0", "r1"); +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" + " nop \n\t" + " mov r15, r1 \n\t" + " mov #-8, r15 \n\t" + " mov.l @%1, %0 \n\t" + " cmp/eq %2, %0 \n\t" + " bf 1f \n\t" + " mov.l %3, @%1 \n\t" + "1: mov r1, r15 \n\t" + : "=&r" (ret) + : "r" (v), "r" (old), "r" (new) + : "memory" , "r0", "r1" , "t"); + + return ret; +} + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long tmp; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" + " nop \n\t" + " mov r15, r1 \n\t" + " mov #-12, r15 \n\t" + " mov.l @%2, %1 \n\t" + " mov %1, %0 \n\t" + " cmp/eq %4, %0 \n\t" + " bt/s 1f \n\t" + " add %3, %1 \n\t" + " mov.l %1, @%2 \n\t" + "1: mov r1, r15 \n\t" + : "=&r" (ret), "=&r" (tmp) + : "r" (v), "r" (a), "r" (u) + : "memory" , "r0", "r1" , "t"); + + return ret != u; +} +#endif /* __ASM_SH_ATOMIC_GRB_H */ diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index e12570b9339d..c043ef003028 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -17,7 +17,9 @@ typedef struct { volatile int counter; } atomic_t; #include <linux/compiler.h> #include <asm/system.h> -#ifdef CONFIG_CPU_SH4A +#if defined(CONFIG_GUSA_RB) +#include <asm/atomic-grb.h> +#elif defined(CONFIG_CPU_SH4A) #include <asm/atomic-llsc.h> #else #include <asm/atomic-irq.h> @@ -44,6 +46,7 @@ typedef struct { volatile int counter; } atomic_t; #define atomic_inc(v) atomic_add(1,(v)) #define atomic_dec(v) atomic_sub(1,(v)) +#ifndef CONFIG_GUSA_RB static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -58,8 +61,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - static inline int atomic_add_unless(atomic_t *v, int a, int u) { int ret; @@ -73,6 +74,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) return ret != u; } +#endif + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* Atomic operations are already serializing on SH */ diff --git a/include/asm-sh/auxvec.h b/include/asm-sh/auxvec.h index 1b6916e63e90..a6b9d4f4859e 100644 --- a/include/asm-sh/auxvec.h +++ b/include/asm-sh/auxvec.h @@ -6,6 +6,12 @@ * for more of them. */ +/* + * This entry gives some information about the FPU initialization + * performed by the kernel. + */ +#define AT_FPUCW 18 /* Used FPU control word. */ + #ifdef CONFIG_VSYSCALL /* * Only define this in the vsyscall case, the entry point to @@ -15,4 +21,16 @@ #define AT_SYSINFO_EHDR 33 #endif +/* + * More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the + * value is -1, then the cache doesn't exist. Otherwise: + * + * bit 0-3: Cache set-associativity; 0 means fully associative. + * bit 4-7: Log2 of cacheline size. + * bit 8-31: Size of the entire cache >> 8. + */ +#define AT_L1I_CACHESHAPE 34 +#define AT_L1D_CACHESHAPE 35 +#define AT_L2_CACHESHAPE 36 + #endif /* __ASM_SH_AUXVEC_H */ diff --git a/include/asm-sh/bitops-grb.h b/include/asm-sh/bitops-grb.h new file mode 100644 index 000000000000..a5907b94395b --- /dev/null +++ b/include/asm-sh/bitops-grb.h @@ -0,0 +1,169 @@ +#ifndef __ASM_SH_BITOPS_GRB_H +#define __ASM_SH_BITOPS_GRB_H + +static inline void set_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " or %2, %0 \n\t" /* or */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1"); +} + +static inline void clear_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = ~(1 << (nr & 0x1f)); + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " and %2, %0 \n\t" /* and */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1"); +} + +static inline void change_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " xor %2, %0 \n\t" /* xor */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1"); +} + +static inline int test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-14, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%2, %0 \n\t" /* load old value */ + " mov %0, %1 \n\t" + " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ + " mov #-1, %1 \n\t" /* retvat = -1 */ + " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ + " or %3, %0 \n\t" + " mov.l %0, @%2 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "=&r" (retval), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1" ,"t"); + + return retval; +} + +static inline int test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval,not_mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + not_mask = ~mask; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-14, r15 \n\t" /* LOGIN */ + " mov.l @%2, %0 \n\t" /* load old value */ + " mov %0, %1 \n\t" /* %1 = *a */ + " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ + " mov #-1, %1 \n\t" /* retvat = -1 */ + " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ + " and %4, %0 \n\t" + " mov.l %0, @%2 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "=&r" (retval), + "+r" (a) + : "r" (mask), + "r" (not_mask) + : "memory" , "r0", "r1", "t"); + + return retval; +} + +static inline int test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-14, r15 \n\t" /* LOGIN */ + " mov.l @%2, %0 \n\t" /* load old value */ + " mov %0, %1 \n\t" /* %1 = *a */ + " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ + " mov #-1, %1 \n\t" /* retvat = -1 */ + " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ + " xor %3, %0 \n\t" + " mov.l %0, @%2 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "=&r" (retval), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1", "t"); + + return retval; +} +#endif /* __ASM_SH_BITOPS_GRB_H */ diff --git a/include/asm-sh/bitops-irq.h b/include/asm-sh/bitops-irq.h new file mode 100644 index 000000000000..653a12750584 --- /dev/null +++ b/include/asm-sh/bitops-irq.h @@ -0,0 +1,91 @@ +#ifndef __ASM_SH_BITOPS_IRQ_H +#define __ASM_SH_BITOPS_IRQ_H + +static inline void set_bit(int nr, volatile void *addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a |= mask; + local_irq_restore(flags); +} + +static inline void clear_bit(int nr, volatile void *addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a &= ~mask; + local_irq_restore(flags); +} + +static inline void change_bit(int nr, volatile void *addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a ^= mask; + local_irq_restore(flags); +} + +static inline int test_and_set_bit(int nr, volatile void *addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a |= mask; + local_irq_restore(flags); + + return retval; +} + +static inline int test_and_clear_bit(int nr, volatile void *addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a &= ~mask; + local_irq_restore(flags); + + return retval; +} + +static inline int test_and_change_bit(int nr, volatile void *addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a ^= mask; + local_irq_restore(flags); + + return retval; +} + +#endif /* __ASM_SH_BITOPS_IRQ_H */ diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h index df805f20b267..b6ba5a60dec2 100644 --- a/include/asm-sh/bitops.h +++ b/include/asm-sh/bitops.h @@ -11,100 +11,22 @@ /* For __swab32 */ #include <asm/byteorder.h> -static inline void set_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a |= mask; - local_irq_restore(flags); -} +#ifdef CONFIG_GUSA_RB +#include <asm/bitops-grb.h> +#else +#include <asm/bitops-irq.h> +#endif + /* * clear_bit() doesn't provide any barrier for the compiler. */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -static inline void clear_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a &= ~mask; - local_irq_restore(flags); -} - -static inline void change_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a ^= mask; - local_irq_restore(flags); -} - -static inline int test_and_set_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a |= mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_clear_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a &= ~mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_change_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a ^= mask; - local_irq_restore(flags); - - return retval; -} #include <asm-generic/bitops/non-atomic.h> +#ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) { unsigned long result; @@ -138,6 +60,31 @@ static inline unsigned long __ffs(unsigned long word) : "t"); return result; } +#else +static inline unsigned long ffz(unsigned long word) +{ + unsigned long result, __d2, __d3; + + __asm__("gettr tr0, %2\n\t" + "pta $+32, tr0\n\t" + "andi %1, 1, %3\n\t" + "beq %3, r63, tr0\n\t" + "pta $+4, tr0\n" + "0:\n\t" + "shlri.l %1, 1, %1\n\t" + "addi %0, 1, %0\n\t" + "andi %1, 1, %3\n\t" + "beqi %3, 1, tr0\n" + "1:\n\t" + "ptabs %2, tr0\n\t" + : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) + : "0" (0L), "1" (word)); + + return result; +} + +#include <asm-generic/bitops/__ffs.h> +#endif #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/ffs.h> diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h index a78d482e8b2f..c01718040166 100644 --- a/include/asm-sh/bug.h +++ b/include/asm-sh/bug.h @@ -3,7 +3,7 @@ #define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ -#ifdef CONFIG_BUG +#ifdef CONFIG_GENERIC_BUG #define HAVE_ARCH_BUG #define HAVE_ARCH_WARN_ON @@ -72,12 +72,7 @@ do { \ unlikely(__ret_warn_on); \ }) -struct pt_regs; - -/* arch/sh/kernel/traps.c */ -void handle_BUG(struct pt_regs *); - -#endif /* CONFIG_BUG */ +#endif /* CONFIG_GENERIC_BUG */ #include <asm-generic/bug.h> diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h index b66139ff73fc..def8128b8b78 100644 --- a/include/asm-sh/bugs.h +++ b/include/asm-sh/bugs.h @@ -25,7 +25,7 @@ static void __init check_bugs(void) case CPU_SH7619: *p++ = '2'; break; - case CPU_SH7206: + case CPU_SH7203 ... CPU_SH7263: *p++ = '2'; *p++ = 'a'; break; @@ -35,7 +35,7 @@ static void __init check_bugs(void) case CPU_SH7750 ... CPU_SH4_501: *p++ = '4'; break; - case CPU_SH7770 ... CPU_SHX3: + case CPU_SH7763 ... CPU_SHX3: *p++ = '4'; *p++ = 'a'; break; @@ -48,9 +48,16 @@ static void __init check_bugs(void) *p++ = 's'; *p++ = 'p'; break; - default: - *p++ = '?'; - *p++ = '!'; + case CPU_SH5_101 ... CPU_SH5_103: + *p++ = '6'; + *p++ = '4'; + break; + case CPU_SH_NONE: + /* + * Specifically use CPU_SH_NONE rather than default:, + * so we're able to have the compiler whine about + * unhandled enumerations. + */ break; } diff --git a/include/asm-sh/byteorder.h b/include/asm-sh/byteorder.h index bff2b1382e01..0eb9904b6545 100644 --- a/include/asm-sh/byteorder.h +++ b/include/asm-sh/byteorder.h @@ -3,40 +3,55 @@ /* * Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2000, 2001 Paolo Alberelli */ - -#include <asm/types.h> #include <linux/compiler.h> +#include <linux/types.h> -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) { - __asm__("swap.b %0, %0\n\t" - "swap.w %0, %0\n\t" - "swap.b %0, %0" + __asm__( +#ifdef CONFIG_SUPERH32 + "swap.b %0, %0\n\t" + "swap.w %0, %0\n\t" + "swap.b %0, %0" +#else + "byterev %0, %0\n\t" + "shari %0, 32, %0" +#endif : "=r" (x) : "0" (x)); + return x; } -static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) +static inline __attribute_const__ __u16 ___arch__swab16(__u16 x) { - __asm__("swap.b %0, %0" + __asm__( +#ifdef CONFIG_SUPERH32 + "swap.b %0, %0" +#else + "byterev %0, %0\n\t" + "shari %0, 32, %0" + +#endif : "=r" (x) : "0" (x)); + return x; } -static inline __u64 ___arch__swab64(__u64 val) -{ - union { +static inline __u64 ___arch__swab64(__u64 val) +{ + union { struct { __u32 a,b; } s; __u64 u; } v, w; v.u = val; - w.s.b = ___arch__swab32(v.s.a); - w.s.a = ___arch__swab32(v.s.b); - return w.u; -} + w.s.b = ___arch__swab32(v.s.a); + w.s.a = ___arch__swab32(v.s.b); + return w.u; +} #define __arch__swab64(x) ___arch__swab64(x) #define __arch__swab32(x) ___arch__swab32(x) diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h index 01e5cf51ba9b..083419f47c65 100644 --- a/include/asm-sh/cache.h +++ b/include/asm-sh/cache.h @@ -12,11 +12,6 @@ #include <linux/init.h> #include <asm/cpu/cache.h> -#define SH_CACHE_VALID 1 -#define SH_CACHE_UPDATED 2 -#define SH_CACHE_COMBINED 4 -#define SH_CACHE_ASSOC 8 - #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data.read_mostly"))) diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h index 4bc8357e8892..67496ab0ef04 100644 --- a/include/asm-sh/checksum.h +++ b/include/asm-sh/checksum.h @@ -1,215 +1,5 @@ -#ifndef __ASM_SH_CHECKSUM_H -#define __ASM_SH_CHECKSUM_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka - */ - -#include <linux/in6.h> - -/* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) - * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic - * - * this function must be called with even lengths, except - * for the last fragment, which may be odd - * - * it's best to have buff aligned on a 32-bit boundary - */ -asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); - -/* - * the same as csum_partial, but copies from src while it - * checksums, and handles user-space pointer exceptions correctly, when needed. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ - -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); - -/* - * Note: when you get a NULL pointer exception here this means someone - * passed in an incorrect kernel address to one of these functions. - * - * If you use these functions directly please don't forget the - * access_ok(). - */ -static inline -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) -{ - return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); -} - -static inline -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) -{ - return csum_partial_copy_generic((__force const void *)src, dst, - len, sum, err_ptr, NULL); -} - -/* - * Fold a partial checksum - */ - -static inline __sum16 csum_fold(__wsum sum) -{ - unsigned int __dummy; - __asm__("swap.w %0, %1\n\t" - "extu.w %0, %0\n\t" - "extu.w %1, %1\n\t" - "add %1, %0\n\t" - "swap.w %0, %1\n\t" - "add %1, %0\n\t" - "not %0, %0\n\t" - : "=r" (sum), "=&r" (__dummy) - : "0" (sum) - : "t"); - return (__force __sum16)sum; -} - -/* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. - * - * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted - * for linux by * Arnt Gulbrandsen. - */ -static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) -{ - unsigned int sum, __dummy0, __dummy1; - - __asm__ __volatile__( - "mov.l @%1+, %0\n\t" - "mov.l @%1+, %3\n\t" - "add #-2, %2\n\t" - "clrt\n\t" - "1:\t" - "addc %3, %0\n\t" - "movt %4\n\t" - "mov.l @%1+, %3\n\t" - "dt %2\n\t" - "bf/s 1b\n\t" - " cmp/eq #1, %4\n\t" - "addc %3, %0\n\t" - "addc %2, %0" /* Here %2 is 0, add carry-bit */ - /* Since the input registers which are loaded with iph and ihl - are modified, we must also specify them as outputs, or gcc - will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1) - : "1" (iph), "2" (ihl) - : "t"); - - return csum_fold(sum); -} - -static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) -{ -#ifdef __LITTLE_ENDIAN__ - unsigned long len_proto = (proto + len) << 8; +#ifdef CONFIG_SUPERH32 +# include "checksum_32.h" #else - unsigned long len_proto = proto + len; +# include "checksum_64.h" #endif - __asm__("clrt\n\t" - "addc %0, %1\n\t" - "addc %2, %1\n\t" - "addc %3, %1\n\t" - "movt %0\n\t" - "add %1, %0" - : "=r" (sum), "=r" (len_proto) - : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum) - : "t"); - - return sum; -} - -/* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ -static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); -} - -/* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c - */ -static inline __sum16 ip_compute_csum(const void *buff, int len) -{ - return csum_fold(csum_partial(buff, len, 0)); -} - -#define _HAVE_ARCH_IPV6_CSUM -static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) -{ - unsigned int __dummy; - __asm__("clrt\n\t" - "mov.l @(0,%2), %1\n\t" - "addc %1, %0\n\t" - "mov.l @(4,%2), %1\n\t" - "addc %1, %0\n\t" - "mov.l @(8,%2), %1\n\t" - "addc %1, %0\n\t" - "mov.l @(12,%2), %1\n\t" - "addc %1, %0\n\t" - "mov.l @(0,%3), %1\n\t" - "addc %1, %0\n\t" - "mov.l @(4,%3), %1\n\t" - "addc %1, %0\n\t" - "mov.l @(8,%3), %1\n\t" - "addc %1, %0\n\t" - "mov.l @(12,%3), %1\n\t" - "addc %1, %0\n\t" - "addc %4, %0\n\t" - "addc %5, %0\n\t" - "movt %1\n\t" - "add %1, %0\n" - : "=r" (sum), "=&r" (__dummy) - : "r" (saddr), "r" (daddr), - "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) - : "t"); - - return csum_fold(sum); -} - -/* - * Copy and checksum to user - */ -#define HAVE_CSUM_COPY_USER -static inline __wsum csum_and_copy_to_user(const void *src, - void __user *dst, - int len, __wsum sum, - int *err_ptr) -{ - if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic((__force const void *)src, - dst, len, sum, NULL, err_ptr); - - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ -} -#endif /* __ASM_SH_CHECKSUM_H */ diff --git a/include/asm-sh/checksum_32.h b/include/asm-sh/checksum_32.h new file mode 100644 index 000000000000..4bc8357e8892 --- /dev/null +++ b/include/asm-sh/checksum_32.h @@ -0,0 +1,215 @@ +#ifndef __ASM_SH_CHECKSUM_H +#define __ASM_SH_CHECKSUM_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka + */ + +#include <linux/in6.h> + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums, and handles user-space pointer exceptions correctly, when needed. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + +/* + * Note: when you get a NULL pointer exception here this means someone + * passed in an incorrect kernel address to one of these functions. + * + * If you use these functions directly please don't forget the + * access_ok(). + */ +static inline +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum) +{ + return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); +} + +static inline +__wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *err_ptr) +{ + return csum_partial_copy_generic((__force const void *)src, dst, + len, sum, err_ptr, NULL); +} + +/* + * Fold a partial checksum + */ + +static inline __sum16 csum_fold(__wsum sum) +{ + unsigned int __dummy; + __asm__("swap.w %0, %1\n\t" + "extu.w %0, %0\n\t" + "extu.w %1, %1\n\t" + "add %1, %0\n\t" + "swap.w %0, %1\n\t" + "add %1, %0\n\t" + "not %0, %0\n\t" + : "=r" (sum), "=&r" (__dummy) + : "0" (sum) + : "t"); + return (__force __sum16)sum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + * + * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted + * for linux by * Arnt Gulbrandsen. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned int sum, __dummy0, __dummy1; + + __asm__ __volatile__( + "mov.l @%1+, %0\n\t" + "mov.l @%1+, %3\n\t" + "add #-2, %2\n\t" + "clrt\n\t" + "1:\t" + "addc %3, %0\n\t" + "movt %4\n\t" + "mov.l @%1+, %3\n\t" + "dt %2\n\t" + "bf/s 1b\n\t" + " cmp/eq #1, %4\n\t" + "addc %3, %0\n\t" + "addc %2, %0" /* Here %2 is 0, add carry-bit */ + /* Since the input registers which are loaded with iph and ihl + are modified, we must also specify them as outputs, or gcc + will assume they contain their original values. */ + : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1) + : "1" (iph), "2" (ihl) + : "t"); + + return csum_fold(sum); +} + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) +{ +#ifdef __LITTLE_ENDIAN__ + unsigned long len_proto = (proto + len) << 8; +#else + unsigned long len_proto = proto + len; +#endif + __asm__("clrt\n\t" + "addc %0, %1\n\t" + "addc %2, %1\n\t" + "addc %3, %1\n\t" + "movt %0\n\t" + "add %1, %0" + : "=r" (sum), "=r" (len_proto) + : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum) + : "t"); + + return sum; +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +static inline __sum16 ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) +{ + unsigned int __dummy; + __asm__("clrt\n\t" + "mov.l @(0,%2), %1\n\t" + "addc %1, %0\n\t" + "mov.l @(4,%2), %1\n\t" + "addc %1, %0\n\t" + "mov.l @(8,%2), %1\n\t" + "addc %1, %0\n\t" + "mov.l @(12,%2), %1\n\t" + "addc %1, %0\n\t" + "mov.l @(0,%3), %1\n\t" + "addc %1, %0\n\t" + "mov.l @(4,%3), %1\n\t" + "addc %1, %0\n\t" + "mov.l @(8,%3), %1\n\t" + "addc %1, %0\n\t" + "mov.l @(12,%3), %1\n\t" + "addc %1, %0\n\t" + "addc %4, %0\n\t" + "addc %5, %0\n\t" + "movt %1\n\t" + "add %1, %0\n" + : "=r" (sum), "=&r" (__dummy) + : "r" (saddr), "r" (daddr), + "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) + : "t"); + + return csum_fold(sum); +} + +/* + * Copy and checksum to user + */ +#define HAVE_CSUM_COPY_USER +static inline __wsum csum_and_copy_to_user(const void *src, + void __user *dst, + int len, __wsum sum, + int *err_ptr) +{ + if (access_ok(VERIFY_WRITE, dst, len)) + return csum_partial_copy_generic((__force const void *)src, + dst, len, sum, NULL, err_ptr); + + if (len) + *err_ptr = -EFAULT; + + return (__force __wsum)-1; /* invalid checksum */ +} +#endif /* __ASM_SH_CHECKSUM_H */ diff --git a/include/asm-sh64/checksum.h b/include/asm-sh/checksum_64.h index ba594ccb42e5..9c62a031a8f5 100644 --- a/include/asm-sh64/checksum.h +++ b/include/asm-sh/checksum_64.h @@ -1,19 +1,16 @@ -#ifndef __ASM_SH64_CHECKSUM_H -#define __ASM_SH64_CHECKSUM_H +#ifndef __ASM_SH_CHECKSUM_64_H +#define __ASM_SH_CHECKSUM_64_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/checksum.h + * include/asm-sh/checksum_64.h * * Copyright (C) 2000, 2001 Paolo Alberelli * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ -#include <asm/registers.h> - /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) @@ -78,5 +75,4 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) return csum_fold(csum_partial(buff, len, 0)); } -#endif /* __ASM_SH64_CHECKSUM_H */ - +#endif /* __ASM_SH_CHECKSUM_64_H */ diff --git a/include/asm-sh/cmpxchg-grb.h b/include/asm-sh/cmpxchg-grb.h new file mode 100644 index 000000000000..e2681abe764f --- /dev/null +++ b/include/asm-sh/cmpxchg-grb.h @@ -0,0 +1,70 @@ +#ifndef __ASM_SH_CMPXCHG_GRB_H +#define __ASM_SH_CMPXCHG_GRB_H + +static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) +{ + unsigned long retval; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " nop \n\t" + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-4, r15 \n\t" /* LOGIN */ + " mov.l @%1, %0 \n\t" /* load old value */ + " mov.l %2, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (retval), + "+r" (m) + : "r" (val) + : "memory", "r0", "r1"); + + return retval; +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + unsigned long retval; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN */ + " mov.b @%1, %0 \n\t" /* load old value */ + " extu.b %0, %0 \n\t" /* extend as unsigned */ + " mov.b %2, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (retval), + "+r" (m) + : "r" (val) + : "memory" , "r0", "r1"); + + return retval; +} + +static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, + unsigned long new) +{ + unsigned long retval; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " nop \n\t" + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-8, r15 \n\t" /* LOGIN */ + " mov.l @%1, %0 \n\t" /* load old value */ + " cmp/eq %0, %2 \n\t" + " bf 1f \n\t" /* if not equal */ + " mov.l %2, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (retval), + "+r" (m) + : "r" (new) + : "memory" , "r0", "r1", "t"); + + return retval; +} + +#endif /* __ASM_SH_CMPXCHG_GRB_H */ diff --git a/include/asm-sh/cmpxchg-irq.h b/include/asm-sh/cmpxchg-irq.h new file mode 100644 index 000000000000..43049ec0554b --- /dev/null +++ b/include/asm-sh/cmpxchg-irq.h @@ -0,0 +1,40 @@ +#ifndef __ASM_SH_CMPXCHG_IRQ_H +#define __ASM_SH_CMPXCHG_IRQ_H + +static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) +{ + unsigned long flags, retval; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + unsigned long flags, retval; + + local_irq_save(flags); + retval = *m; + *m = val & 0xff; + local_irq_restore(flags); + return retval; +} + +static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, + unsigned long new) +{ + __u32 retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); /* implies memory barrier */ + return retval; +} + +#endif /* __ASM_SH_CMPXCHG_IRQ_H */ diff --git a/include/asm-sh/cpu-sh2/addrspace.h b/include/asm-sh/cpu-sh2/addrspace.h index 8706c903c5a0..2b9ab93efa4e 100644 --- a/include/asm-sh/cpu-sh2/addrspace.h +++ b/include/asm-sh/cpu-sh2/addrspace.h @@ -10,7 +10,10 @@ #ifndef __ASM_CPU_SH2_ADDRSPACE_H #define __ASM_CPU_SH2_ADDRSPACE_H -/* Should fill here */ +#define P0SEG 0x00000000 +#define P1SEG 0x80000000 +#define P2SEG 0xa0000000 +#define P3SEG 0xc0000000 +#define P4SEG 0xe0000000 #endif /* __ASM_CPU_SH2_ADDRSPACE_H */ - diff --git a/include/asm-sh/cpu-sh2/cache.h b/include/asm-sh/cpu-sh2/cache.h index f02ba7a672b2..4e0b16500686 100644 --- a/include/asm-sh/cpu-sh2/cache.h +++ b/include/asm-sh/cpu-sh2/cache.h @@ -12,9 +12,13 @@ #define L1_CACHE_SHIFT 4 +#define SH_CACHE_VALID 1 +#define SH_CACHE_UPDATED 2 +#define SH_CACHE_COMBINED 4 +#define SH_CACHE_ASSOC 8 + #if defined(CONFIG_CPU_SUBTYPE_SH7619) -#define CCR1 0xffffffec -#define CCR CCR1 +#define CCR 0xffffffec #define CCR_CACHE_CE 0x01 /* Cache enable */ #define CCR_CACHE_WT 0x06 /* CCR[bit1=1,bit2=1] */ diff --git a/include/asm-sh/cpu-sh2/rtc.h b/include/asm-sh/cpu-sh2/rtc.h new file mode 100644 index 000000000000..39e2d6e94782 --- /dev/null +++ b/include/asm-sh/cpu-sh2/rtc.h @@ -0,0 +1,8 @@ +#ifndef __ASM_SH_CPU_SH2_RTC_H +#define __ASM_SH_CPU_SH2_RTC_H + +#define rtc_reg_size sizeof(u16) +#define RTC_BIT_INVERTED 0 +#define RTC_DEF_CAPABILITIES 0UL + +#endif /* __ASM_SH_CPU_SH2_RTC_H */ diff --git a/include/asm-sh/cpu-sh2a/addrspace.h b/include/asm-sh/cpu-sh2a/addrspace.h index 3d2e9aa21522..795ddd6856a3 100644 --- a/include/asm-sh/cpu-sh2a/addrspace.h +++ b/include/asm-sh/cpu-sh2a/addrspace.h @@ -1 +1,10 @@ -#include <asm/cpu-sh2/addrspace.h> +#ifndef __ASM_SH_CPU_SH2A_ADDRSPACE_H +#define __ASM_SH_CPU_SH2A_ADDRSPACE_H + +#define P0SEG 0x00000000 +#define P1SEG 0x00000000 +#define P2SEG 0x20000000 +#define P3SEG 0x00000000 +#define P4SEG 0x80000000 + +#endif /* __ASM_SH_CPU_SH2A_ADDRSPACE_H */ diff --git a/include/asm-sh/cpu-sh2a/cache.h b/include/asm-sh/cpu-sh2a/cache.h index 3e4b9e480982..afe228b3f493 100644 --- a/include/asm-sh/cpu-sh2a/cache.h +++ b/include/asm-sh/cpu-sh2a/cache.h @@ -12,11 +12,13 @@ #define L1_CACHE_SHIFT 4 -#define CCR1 0xfffc1000 -#define CCR2 0xfffc1004 +#define SH_CACHE_VALID 1 +#define SH_CACHE_UPDATED 2 +#define SH_CACHE_COMBINED 4 +#define SH_CACHE_ASSOC 8 -/* CCR1 behaves more like the traditional CCR */ -#define CCR CCR1 +#define CCR 0xfffc1000 /* CCR1 */ +#define CCR2 0xfffc1004 /* * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not @@ -36,4 +38,3 @@ #define CCR_CACHE_INVALIDATE (CCR_CACHE_OCI | CCR_CACHE_ICI) #endif /* __ASM_CPU_SH2A_CACHE_H */ - diff --git a/include/asm-sh/cpu-sh2a/freq.h b/include/asm-sh/cpu-sh2a/freq.h index e518fff6d10f..830fd43b6cdc 100644 --- a/include/asm-sh/cpu-sh2a/freq.h +++ b/include/asm-sh/cpu-sh2a/freq.h @@ -10,9 +10,7 @@ #ifndef __ASM_CPU_SH2A_FREQ_H #define __ASM_CPU_SH2A_FREQ_H -#if defined(CONFIG_CPU_SUBTYPE_SH7206) #define FREQCR 0xfffe0010 -#endif #endif /* __ASM_CPU_SH2A_FREQ_H */ diff --git a/include/asm-sh/cpu-sh2a/rtc.h b/include/asm-sh/cpu-sh2a/rtc.h new file mode 100644 index 000000000000..afb511e2bed7 --- /dev/null +++ b/include/asm-sh/cpu-sh2a/rtc.h @@ -0,0 +1,8 @@ +#ifndef __ASM_SH_CPU_SH2A_RTC_H +#define __ASM_SH_CPU_SH2A_RTC_H + +#define rtc_reg_size sizeof(u16) +#define RTC_BIT_INVERTED 0 +#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR + +#endif /* __ASM_SH_CPU_SH2A_RTC_H */ diff --git a/include/asm-sh/cpu-sh3/addrspace.h b/include/asm-sh/cpu-sh3/addrspace.h index 872e9e1b548c..0f94726c7d62 100644 --- a/include/asm-sh/cpu-sh3/addrspace.h +++ b/include/asm-sh/cpu-sh3/addrspace.h @@ -10,7 +10,10 @@ #ifndef __ASM_CPU_SH3_ADDRSPACE_H #define __ASM_CPU_SH3_ADDRSPACE_H -/* Should fill here */ +#define P0SEG 0x00000000 +#define P1SEG 0x80000000 +#define P2SEG 0xa0000000 +#define P3SEG 0xc0000000 +#define P4SEG 0xe0000000 #endif /* __ASM_CPU_SH3_ADDRSPACE_H */ - diff --git a/include/asm-sh/cpu-sh3/cache.h b/include/asm-sh/cpu-sh3/cache.h index 255016fc91f0..56bd838b7db4 100644 --- a/include/asm-sh/cpu-sh3/cache.h +++ b/include/asm-sh/cpu-sh3/cache.h @@ -12,6 +12,11 @@ #define L1_CACHE_SHIFT 4 +#define SH_CACHE_VALID 1 +#define SH_CACHE_UPDATED 2 +#define SH_CACHE_COMBINED 4 +#define SH_CACHE_ASSOC 8 + #define CCR 0xffffffec /* Address of Cache Control Register */ #define CCR_CACHE_CE 0x01 /* Cache Enable */ @@ -28,7 +33,8 @@ #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ defined(CONFIG_CPU_SUBTYPE_SH7710) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) + defined(CONFIG_CPU_SUBTYPE_SH7720) || \ + defined(CONFIG_CPU_SUBTYPE_SH7721) #define CCR3 0xa40000b4 #define CCR_CACHE_16KB 0x00010000 #define CCR_CACHE_32KB 0x00020000 diff --git a/include/asm-sh/cpu-sh3/dma.h b/include/asm-sh/cpu-sh3/dma.h index 54bfece328c2..092ff9d872c3 100644 --- a/include/asm-sh/cpu-sh3/dma.h +++ b/include/asm-sh/cpu-sh3/dma.h @@ -2,7 +2,9 @@ #define __ASM_CPU_SH3_DMA_H -#if defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7709) +#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ + defined(CONFIG_CPU_SUBTYPE_SH7721) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) #define SH_DMAC_BASE 0xa4010020 #define DMTE0_IRQ 48 diff --git a/include/asm-sh/cpu-sh3/freq.h b/include/asm-sh/cpu-sh3/freq.h index 0a054b53b9de..53c62302b2e3 100644 --- a/include/asm-sh/cpu-sh3/freq.h +++ b/include/asm-sh/cpu-sh3/freq.h @@ -10,7 +10,12 @@ #ifndef __ASM_CPU_SH3_FREQ_H #define __ASM_CPU_SH3_FREQ_H +#ifdef CONFIG_CPU_SUBTYPE_SH7712 +#define FRQCR 0xA415FF80 +#else #define FRQCR 0xffffff80 +#endif + #define MIN_DIVISOR_NR 0 #define MAX_DIVISOR_NR 4 diff --git a/include/asm-sh/cpu-sh3/gpio.h b/include/asm-sh/cpu-sh3/gpio.h index 48770c1c7bdf..4e53eb314b8f 100644 --- a/include/asm-sh/cpu-sh3/gpio.h +++ b/include/asm-sh/cpu-sh3/gpio.h @@ -12,7 +12,8 @@ #ifndef _CPU_SH3_GPIO_H #define _CPU_SH3_GPIO_H -#if defined(CONFIG_CPU_SUBTYPE_SH7720) +#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ + defined(CONFIG_CPU_SUBTYPE_SH7721) /* Control registers */ #define PORT_PACR 0xA4050100UL diff --git a/include/asm-sh/cpu-sh3/mmu_context.h b/include/asm-sh/cpu-sh3/mmu_context.h index 16c2d63b7e39..ab09da73ce77 100644 --- a/include/asm-sh/cpu-sh3/mmu_context.h +++ b/include/asm-sh/cpu-sh3/mmu_context.h @@ -33,7 +33,8 @@ defined(CONFIG_CPU_SUBTYPE_SH7709) || \ defined(CONFIG_CPU_SUBTYPE_SH7710) || \ defined(CONFIG_CPU_SUBTYPE_SH7712) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) + defined(CONFIG_CPU_SUBTYPE_SH7720) || \ + defined(CONFIG_CPU_SUBTYPE_SH7721) #define INTEVT 0xa4000000 /* INTEVTE2(0xa4000000) */ #else #define INTEVT 0xffffffd8 diff --git a/include/asm-sh/cpu-sh3/rtc.h b/include/asm-sh/cpu-sh3/rtc.h new file mode 100644 index 000000000000..319404aaee37 --- /dev/null +++ b/include/asm-sh/cpu-sh3/rtc.h @@ -0,0 +1,8 @@ +#ifndef __ASM_SH_CPU_SH3_RTC_H +#define __ASM_SH_CPU_SH3_RTC_H + +#define rtc_reg_size sizeof(u16) +#define RTC_BIT_INVERTED 0 /* No bug on SH7708, SH7709A */ +#define RTC_DEF_CAPABILITIES 0UL + +#endif /* __ASM_SH_CPU_SH3_RTC_H */ diff --git a/include/asm-sh/cpu-sh3/timer.h b/include/asm-sh/cpu-sh3/timer.h index 7b795ac5477c..793acf12aa08 100644 --- a/include/asm-sh/cpu-sh3/timer.h +++ b/include/asm-sh/cpu-sh3/timer.h @@ -23,12 +23,13 @@ * --------------------------------------------------------------------------- */ -#if !defined(CONFIG_CPU_SUBTYPE_SH7720) +#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && !defined(CONFIG_CPU_SUBTYPE_SH7721) #define TMU_TOCR 0xfffffe90 /* Byte access */ #endif #if defined(CONFIG_CPU_SUBTYPE_SH7710) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) + defined(CONFIG_CPU_SUBTYPE_SH7720) || \ + defined(CONFIG_CPU_SUBTYPE_SH7721) #define TMU_012_TSTR 0xa412fe92 /* Byte access */ #define TMU0_TCOR 0xa412fe94 /* Long access */ @@ -57,7 +58,7 @@ #define TMU2_TCOR 0xfffffeac /* Long access */ #define TMU2_TCNT 0xfffffeb0 /* Long access */ #define TMU2_TCR 0xfffffeb4 /* Word access */ -#if !defined(CONFIG_CPU_SUBTYPE_SH7720) +#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && !defined(CONFIG_CPU_SUBTYPE_SH7721) #define TMU2_TCPR2 0xfffffeb8 /* Long access */ #endif #endif diff --git a/include/asm-sh/cpu-sh3/ubc.h b/include/asm-sh/cpu-sh3/ubc.h index 18467c574534..4e6381d5ff7a 100644 --- a/include/asm-sh/cpu-sh3/ubc.h +++ b/include/asm-sh/cpu-sh3/ubc.h @@ -12,7 +12,8 @@ #define __ASM_CPU_SH3_UBC_H #if defined(CONFIG_CPU_SUBTYPE_SH7710) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) + defined(CONFIG_CPU_SUBTYPE_SH7720) || \ + defined(CONFIG_CPU_SUBTYPE_SH7721) #define UBC_BARA 0xa4ffffb0 #define UBC_BAMRA 0xa4ffffb4 #define UBC_BBRA 0xa4ffffb8 diff --git a/include/asm-sh/cpu-sh4/addrspace.h b/include/asm-sh/cpu-sh4/addrspace.h index bb2e1b03060c..a3fa733c1c7d 100644 --- a/include/asm-sh/cpu-sh4/addrspace.h +++ b/include/asm-sh/cpu-sh4/addrspace.h @@ -10,6 +10,12 @@ #ifndef __ASM_CPU_SH4_ADDRSPACE_H #define __ASM_CPU_SH4_ADDRSPACE_H +#define P0SEG 0x00000000 +#define P1SEG 0x80000000 +#define P2SEG 0xa0000000 +#define P3SEG 0xc0000000 +#define P4SEG 0xe0000000 + /* Detailed P4SEG */ #define P4SEG_STORE_QUE (P4SEG) #define P4SEG_IC_ADDR 0xf0000000 diff --git a/include/asm-sh/cpu-sh4/cache.h b/include/asm-sh/cpu-sh4/cache.h index f92b20a0983d..1c61ebf5c8e3 100644 --- a/include/asm-sh/cpu-sh4/cache.h +++ b/include/asm-sh/cpu-sh4/cache.h @@ -12,6 +12,11 @@ #define L1_CACHE_SHIFT 5 +#define SH_CACHE_VALID 1 +#define SH_CACHE_UPDATED 2 +#define SH_CACHE_COMBINED 4 +#define SH_CACHE_ASSOC 8 + #define CCR 0xff00001c /* Address of Cache Control Register */ #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ diff --git a/include/asm-sh/cpu-sh4/fpu.h b/include/asm-sh/cpu-sh4/fpu.h new file mode 100644 index 000000000000..febef7342528 --- /dev/null +++ b/include/asm-sh/cpu-sh4/fpu.h @@ -0,0 +1,32 @@ +/* + * linux/arch/sh/kernel/cpu/sh4/sh4_fpu.h + * + * Copyright (C) 2006 STMicroelectronics Limited + * Author: Carl Shaw <carl.shaw@st.com> + * + * May be copied or modified under the terms of the GNU General Public + * License Version 2. See linux/COPYING for more information. + * + * Definitions for SH4 FPU operations + */ + +#ifndef __CPU_SH4_FPU_H +#define __CPU_SH4_FPU_H + +#define FPSCR_ENABLE_MASK 0x00000f80UL + +#define FPSCR_FMOV_DOUBLE (1<<1) + +#define FPSCR_CAUSE_INEXACT (1<<12) +#define FPSCR_CAUSE_UNDERFLOW (1<<13) +#define FPSCR_CAUSE_OVERFLOW (1<<14) +#define FPSCR_CAUSE_DIVZERO (1<<15) +#define FPSCR_CAUSE_INVALID (1<<16) +#define FPSCR_CAUSE_ERROR (1<<17) + +#define FPSCR_DBL_PRECISION (1<<19) +#define FPSCR_ROUNDING_MODE(x) ((x >> 20) & 3) +#define FPSCR_RM_NEAREST (0) +#define FPSCR_RM_ZERO (1) + +#endif diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h index dc1d32a86374..1ac10b9a078f 100644 --- a/include/asm-sh/cpu-sh4/freq.h +++ b/include/asm-sh/cpu-sh4/freq.h @@ -16,7 +16,8 @@ #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c #define IrDACLKCR 0xa4150010 -#elif defined(CONFIG_CPU_SUBTYPE_SH7780) +#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) #define FRQCR 0xffc80000 #elif defined(CONFIG_CPU_SUBTYPE_SH7785) #define FRQCR0 0xffc80000 diff --git a/include/asm-sh/cpu-sh4/mmu_context.h b/include/asm-sh/cpu-sh4/mmu_context.h index 979acddc0f8e..9ea8eb27b18e 100644 --- a/include/asm-sh/cpu-sh4/mmu_context.h +++ b/include/asm-sh/cpu-sh4/mmu_context.h @@ -22,12 +22,20 @@ #define MMU_UTLB_ADDRESS_ARRAY 0xF6000000 #define MMU_PAGE_ASSOC_BIT 0x80 +#define MMUCR_TI (1<<2) + #ifdef CONFIG_X2TLB #define MMUCR_ME (1 << 7) #else #define MMUCR_ME (0) #endif +#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40) +#define MMUCR_SE (1 << 4) +#else +#define MMUCR_SE (0) +#endif + #ifdef CONFIG_SH_STORE_QUEUES #define MMUCR_SQMD (1 << 9) #else @@ -35,7 +43,7 @@ #endif #define MMU_NTLB_ENTRIES 64 -#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME) +#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME|MMUCR_SE) #define MMU_ITLB_DATA_ARRAY 0xF3000000 #define MMU_UTLB_DATA_ARRAY 0xF7000000 diff --git a/include/asm-sh/cpu-sh4/rtc.h b/include/asm-sh/cpu-sh4/rtc.h new file mode 100644 index 000000000000..f3d0f53275e4 --- /dev/null +++ b/include/asm-sh/cpu-sh4/rtc.h @@ -0,0 +1,8 @@ +#ifndef __ASM_SH_CPU_SH4_RTC_H +#define __ASM_SH_CPU_SH4_RTC_H + +#define rtc_reg_size sizeof(u32) +#define RTC_BIT_INVERTED 0x40 /* bug on SH7750, SH7750S */ +#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR + +#endif /* __ASM_SH_CPU_SH4_RTC_H */ diff --git a/include/asm-sh/cpu-sh5/addrspace.h b/include/asm-sh/cpu-sh5/addrspace.h new file mode 100644 index 000000000000..dc36b9a03af6 --- /dev/null +++ b/include/asm-sh/cpu-sh5/addrspace.h @@ -0,0 +1,11 @@ +#ifndef __ASM_SH_CPU_SH5_ADDRSPACE_H +#define __ASM_SH_CPU_SH5_ADDRSPACE_H + +#define PHYS_PERIPHERAL_BLOCK 0x09000000 +#define PHYS_DMAC_BLOCK 0x0e000000 +#define PHYS_PCI_BLOCK 0x60000000 +#define PHYS_EMI_BLOCK 0xff000000 + +/* No segmentation.. */ + +#endif /* __ASM_SH_CPU_SH5_ADDRSPACE_H */ diff --git a/include/asm-sh64/cache.h b/include/asm-sh/cpu-sh5/cache.h index a4f36f0036e1..ed050ab526f2 100644 --- a/include/asm-sh64/cache.h +++ b/include/asm-sh/cpu-sh5/cache.h @@ -1,33 +1,30 @@ -#ifndef __ASM_SH64_CACHE_H -#define __ASM_SH64_CACHE_H +#ifndef __ASM_SH_CPU_SH5_CACHE_H +#define __ASM_SH_CPU_SH5_CACHE_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/cache.h + * include/asm-sh/cpu-sh5/cache.h * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003, 2004 Paul Mundt * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ -#include <asm/cacheflush.h> #define L1_CACHE_SHIFT 5 -/* bytes per L1 cache line */ -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1)) -#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK) -#define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10) - -#ifdef MODULE -#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES))) -#else -#define __cacheline_aligned \ - __attribute__((__aligned__(L1_CACHE_BYTES), \ - __section__(".data.cacheline_aligned"))) -#endif + +/* Valid and Dirty bits */ +#define SH_CACHE_VALID (1LL<<0) +#define SH_CACHE_UPDATED (1LL<<57) + +/* Unimplemented compat bits.. */ +#define SH_CACHE_COMBINED 0 +#define SH_CACHE_ASSOC 0 + +/* Cache flags */ +#define SH_CACHE_MODE_WT (1LL<<0) +#define SH_CACHE_MODE_WB (1LL<<1) /* * Control Registers. @@ -58,7 +55,6 @@ #define OCCR1_NOLOCK 0x0 /* Set No Locking */ - /* * SH-5 * A bit of description here, for neff=32. @@ -77,43 +73,6 @@ * */ -/* Valid and Dirty bits */ -#define SH_CACHE_VALID (1LL<<0) -#define SH_CACHE_UPDATED (1LL<<57) - -/* Cache flags */ -#define SH_CACHE_MODE_WT (1LL<<0) -#define SH_CACHE_MODE_WB (1LL<<1) - -#ifndef __ASSEMBLY__ - -/* - * Cache information structure. - * - * Defined for both I and D cache, per-processor. - */ -struct cache_info { - unsigned int ways; - unsigned int sets; - unsigned int linesz; - - unsigned int way_shift; - unsigned int entry_shift; - unsigned int set_shift; - unsigned int way_step_shift; - unsigned int asid_shift; - - unsigned int way_ofs; - - unsigned int asid_mask; - unsigned int idx_mask; - unsigned int epn_mask; - - unsigned long flags; -}; - -#endif /* __ASSEMBLY__ */ - /* Instruction cache */ #define CACHE_IC_ADDRESS_ARRAY 0x01000000 @@ -130,10 +89,9 @@ struct cache_info { /* Mask to select synonym bit(s) */ #define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT) - /* * Instruction cache can't be invalidated based on physical addresses. * No Instruction Cache defines required, then. */ -#endif /* __ASM_SH64_CACHE_H */ +#endif /* __ASM_SH_CPU_SH5_CACHE_H */ diff --git a/include/asm-sh64/cacheflush.h b/include/asm-sh/cpu-sh5/cacheflush.h index 1e53a47bdc97..98edb5b1da32 100644 --- a/include/asm-sh64/cacheflush.h +++ b/include/asm-sh/cpu-sh5/cacheflush.h @@ -1,5 +1,5 @@ -#ifndef __ASM_SH64_CACHEFLUSH_H -#define __ASM_SH64_CACHEFLUSH_H +#ifndef __ASM_SH_CPU_SH5_CACHEFLUSH_H +#define __ASM_SH_CPU_SH5_CACHEFLUSH_H #ifndef __ASSEMBLY__ @@ -26,25 +26,10 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_cache_vmap(start, end) flush_cache_all() -#define flush_cache_vunmap(start, end) flush_cache_all() - #define flush_icache_page(vma, page) do { } while (0) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ - } while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - } while (0) +#define p3_cache_init() do { } while (0) #endif /* __ASSEMBLY__ */ -#endif /* __ASM_SH64_CACHEFLUSH_H */ +#endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */ diff --git a/include/asm-sh/cpu-sh5/dma.h b/include/asm-sh/cpu-sh5/dma.h new file mode 100644 index 000000000000..7bf6bb3d35ed --- /dev/null +++ b/include/asm-sh/cpu-sh5/dma.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH_CPU_SH5_DMA_H +#define __ASM_SH_CPU_SH5_DMA_H + +/* Nothing yet */ + +#endif /* __ASM_SH_CPU_SH5_DMA_H */ diff --git a/include/asm-sh64/irq.h b/include/asm-sh/cpu-sh5/irq.h index 5c9e6a873aeb..f0f0756e6e84 100644 --- a/include/asm-sh64/irq.h +++ b/include/asm-sh/cpu-sh5/irq.h @@ -1,15 +1,14 @@ -#ifndef __ASM_SH64_IRQ_H -#define __ASM_SH64_IRQ_H +#ifndef __ASM_SH_CPU_SH5_IRQ_H +#define __ASM_SH_CPU_SH5_IRQ_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/irq.h + * include/asm-sh/cpu-sh5/irq.h * * Copyright (C) 2000, 2001 Paolo Alberelli * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ @@ -92,9 +91,6 @@ #define NR_EXT_IRQS 0 #endif -#define NR_IRQS (NR_INTC_IRQS+NR_EXT_IRQS) - - /* Default IRQs, fixed */ #define TIMER_IRQ IRQ_TUNI0 #define RTC_IRQ IRQ_CUI @@ -116,29 +112,6 @@ extern int intc_evt_to_irq[(0xE20/0x20)+1]; int intc_irq_describe(char* p, int irq); +extern int platform_int_priority[NR_INTC_IRQS]; -#define irq_canonicalize(irq) (irq) - -#ifdef CONFIG_SH_CAYMAN -int cayman_irq_demux(int evt); -int cayman_irq_describe(char* p, int irq); -#define irq_demux(x) cayman_irq_demux(x) -#define irq_describe(p, x) cayman_irq_describe(p, x) -#else -#define irq_demux(x) (intc_evt_to_irq[x]) -#define irq_describe(p, x) intc_irq_describe(p, x) -#endif - -/* - * Function for "on chip support modules". - */ - -/* - * SH-5 supports Priority based interrupts only. - * Interrupt priorities are defined at platform level. - */ -#define set_ipr_data(a, b, c, d) -#define make_ipr_irq(a) -#define make_imask_irq(a) - -#endif /* __ASM_SH64_IRQ_H */ +#endif /* __ASM_SH_CPU_SH5_IRQ_H */ diff --git a/include/asm-sh/cpu-sh5/mmu_context.h b/include/asm-sh/cpu-sh5/mmu_context.h new file mode 100644 index 000000000000..df857fc09960 --- /dev/null +++ b/include/asm-sh/cpu-sh5/mmu_context.h @@ -0,0 +1,27 @@ +#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H +#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H + +/* Common defines */ +#define TLB_STEP 0x00000010 +#define TLB_PTEH 0x00000000 +#define TLB_PTEL 0x00000008 + +/* PTEH defines */ +#define PTEH_ASID_SHIFT 2 +#define PTEH_VALID 0x0000000000000001 +#define PTEH_SHARED 0x0000000000000002 +#define PTEH_MATCH_ASID 0x00000000000003ff + +#ifndef __ASSEMBLY__ +/* This has to be a common function because the next location to fill + * information is shared. */ +extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); + +/* Profiling counter. */ +#ifdef CONFIG_SH64_PROC_TLB +extern unsigned long long calls_to_do_fast_page_fault; +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */ diff --git a/include/asm-sh64/registers.h b/include/asm-sh/cpu-sh5/registers.h index 7eec666acf84..6664ea6f1566 100644 --- a/include/asm-sh64/registers.h +++ b/include/asm-sh/cpu-sh5/registers.h @@ -1,15 +1,15 @@ -#ifndef __ASM_SH64_REGISTERS_H -#define __ASM_SH64_REGISTERS_H +#ifndef __ASM_SH_CPU_SH5_REGISTERS_H +#define __ASM_SH_CPU_SH5_REGISTERS_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/registers.h + * include/asm-sh/cpu-sh5/registers.h * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ #ifdef __ASSEMBLY__ @@ -103,4 +103,4 @@ #define __USR __str(USR) #endif /* __ASSEMBLY__ */ -#endif /* __ASM_SH64_REGISTERS_H */ +#endif /* __ASM_SH_CPU_SH5_REGISTERS_H */ diff --git a/include/asm-sh/cpu-sh5/rtc.h b/include/asm-sh/cpu-sh5/rtc.h new file mode 100644 index 000000000000..12ea0ed144e1 --- /dev/null +++ b/include/asm-sh/cpu-sh5/rtc.h @@ -0,0 +1,8 @@ +#ifndef __ASM_SH_CPU_SH5_RTC_H +#define __ASM_SH_CPU_SH5_RTC_H + +#define rtc_reg_size sizeof(u32) +#define RTC_BIT_INVERTED 0 /* The SH-5 RTC is surprisingly sane! */ +#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR + +#endif /* __ASM_SH_CPU_SH5_RTC_H */ diff --git a/include/asm-sh/cpu-sh5/timer.h b/include/asm-sh/cpu-sh5/timer.h new file mode 100644 index 000000000000..88da9b341a36 --- /dev/null +++ b/include/asm-sh/cpu-sh5/timer.h @@ -0,0 +1,4 @@ +#ifndef __ASM_SH_CPU_SH5_TIMER_H +#define __ASM_SH_CPU_SH5_TIMER_H + +#endif /* __ASM_SH_CPU_SH5_TIMER_H */ diff --git a/include/asm-sh/delay.h b/include/asm-sh/delay.h index db599b2a5a9c..031db84f2aa1 100644 --- a/include/asm-sh/delay.h +++ b/include/asm-sh/delay.h @@ -6,7 +6,7 @@ * * Delay routines calling functions in arch/sh/lib/delay.c */ - + extern void __bad_udelay(void); extern void __bad_ndelay(void); @@ -15,13 +15,17 @@ extern void __ndelay(unsigned long nsecs); extern void __const_udelay(unsigned long usecs); extern void __delay(unsigned long loops); +#ifdef CONFIG_SUPERH32 #define udelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \ __udelay(n)) - #define ndelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ __ndelay(n)) +#else +extern void udelay(unsigned long usecs); +extern void ndelay(unsigned long nsecs); +#endif #endif /* __ASM_SH_DELAY_H */ diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index fcea067f7a9c..22cc419389fe 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h @@ -8,11 +8,6 @@ extern struct bus_type pci_bus_type; -/* arch/sh/mm/consistent.c */ -extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); -extern void consistent_free(void *vaddr, size_t size); -extern void consistent_sync(void *vaddr, size_t size, int direction); - #define dma_supported(dev, mask) (1) static inline int dma_set_mask(struct device *dev, u64 mask) @@ -25,44 +20,19 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - if (sh_mv.mv_consistent_alloc) { - void *ret; +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); - ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); - if (ret != NULL) - return ret; - } - - return consistent_alloc(flag, size, dma_handle); -} - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - if (sh_mv.mv_consistent_free) { - int ret; - - ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle); - if (ret == 0) - return; - } +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); - consistent_free(vaddr, size); -} +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_is_consistent(d, h) (1) -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - consistent_sync(vaddr, size, (int)dir); -} - static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) @@ -205,4 +175,18 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) { return dma_addr == 0; } + +#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY + +extern int +dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, int flags); + +extern void +dma_release_declared_memory(struct device *dev); + +extern void * +dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); + #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h index 12cc4b392bf0..05092da1aa59 100644 --- a/include/asm-sh/elf.h +++ b/include/asm-sh/elf.h @@ -5,7 +5,7 @@ #include <asm/ptrace.h> #include <asm/user.h> -/* SH relocation types */ +/* SH (particularly SHcompact) relocation types */ #define R_SH_NONE 0 #define R_SH_DIR32 1 #define R_SH_REL32 2 @@ -43,6 +43,11 @@ #define R_SH_RELATIVE 165 #define R_SH_GOTOFF 166 #define R_SH_GOTPC 167 +/* SHmedia relocs */ +#define R_SH_IMM_LOW16 246 +#define R_SH_IMM_LOW16_PCREL 247 +#define R_SH_IMM_MEDLOW16 248 +#define R_SH_IMM_MEDLOW16_PCREL 249 /* Keep this the last entry. */ #define R_SH_NUM 256 @@ -58,11 +63,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fpu_struct elf_fpregset_t; /* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ( (x)->e_machine == EM_SH ) - -/* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS32 @@ -73,6 +73,12 @@ typedef struct user_fpu_struct elf_fpregset_t; #endif #define ELF_ARCH EM_SH +#ifdef __KERNEL__ +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ( (x)->e_machine == EM_SH ) + #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE @@ -83,7 +89,6 @@ typedef struct user_fpu_struct elf_fpregset_t; #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) - #define ELF_CORE_COPY_REGS(_dest,_regs) \ memcpy((char *) &_dest, (char *) _regs, \ sizeof(struct pt_regs)); @@ -101,16 +106,38 @@ typedef struct user_fpu_struct elf_fpregset_t; For the moment, we have only optimizations for the Intel generations, but that could change... */ -#define ELF_PLATFORM (NULL) +#define ELF_PLATFORM (utsname()->machine) +#ifdef __SH5__ +#define ELF_PLAT_INIT(_r, load_addr) \ + do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ + _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ + _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ + _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \ + _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \ + _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \ + _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \ + _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \ + _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \ + _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \ + _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \ + _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \ + _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \ + _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \ + _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \ + _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \ + _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \ + _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \ + _r->sr = SR_FD | SR_MMU; } while (0) +#else #define ELF_PLAT_INIT(_r, load_addr) \ do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \ _r->sr = SR_FD; } while (0) +#endif -#ifdef __KERNEL__ #define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) struct task_struct; extern int dump_task_regs (struct task_struct *, elf_gregset_t *); @@ -118,7 +145,6 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) -#endif #ifdef CONFIG_VSYSCALL /* vDSO has arch_setup_additional_pages */ @@ -133,12 +159,35 @@ extern void __kernel_vsyscall; #define VDSO_BASE ((unsigned long)current->mm->context.vdso) #define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x)) +#define VSYSCALL_AUX_ENT \ + if (vdso_enabled) \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); +#else +#define VSYSCALL_AUX_ENT +#endif /* CONFIG_VSYSCALL */ + +#ifdef CONFIG_SH_FPU +#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT) +#else +#define FPU_AUX_ENT +#endif + +extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; + /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ - if (vdso_enabled) \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ + /* Optional FPU initialization */ \ + FPU_AUX_ENT; \ + \ + /* Optional vsyscall entry */ \ + VSYSCALL_AUX_ENT; \ + \ + /* Cache desc */ \ + NEW_AUX_ENT(AT_L1I_CACHESHAPE, l1i_cache_shape); \ + NEW_AUX_ENT(AT_L1D_CACHESHAPE, l1d_cache_shape); \ + NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape); \ } while (0) -#endif /* CONFIG_VSYSCALL */ +#endif /* __KERNEL__ */ #endif /* __ASM_SH_ELF_H */ diff --git a/include/asm-sh/fixmap.h b/include/asm-sh/fixmap.h index 8a566177ad96..721fcc4d5e98 100644 --- a/include/asm-sh/fixmap.h +++ b/include/asm-sh/fixmap.h @@ -49,6 +49,7 @@ enum fixed_addresses { #define FIX_N_COLOURS 16 FIX_CMAP_BEGIN, FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + FIX_UNCACHED, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, @@ -73,7 +74,11 @@ extern void __set_fixmap(enum fixed_addresses idx, * the start of the fixmap, and leave one page empty * at the top of mem.. */ +#ifdef CONFIG_SUPERH32 #define FIXADDR_TOP (P4SEG - PAGE_SIZE) +#else +#define FIXADDR_TOP (0xff000000 - PAGE_SIZE) +#endif #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) diff --git a/include/asm-sh/flat.h b/include/asm-sh/flat.h index dc4f5950dafa..0cc800299e06 100644 --- a/include/asm-sh/flat.h +++ b/include/asm-sh/flat.h @@ -19,6 +19,6 @@ #define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp) #define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) #define flat_get_relocate_addr(rel) (rel) -#define flat_set_persistent(relval, p) 0 +#define flat_set_persistent(relval, p) ({ (void)p; 0; }) #endif /* __ASM_SH_FLAT_H */ diff --git a/include/asm-sh/fpu.h b/include/asm-sh/fpu.h new file mode 100644 index 000000000000..f8429880a270 --- /dev/null +++ b/include/asm-sh/fpu.h @@ -0,0 +1,46 @@ +#ifndef __ASM_SH_FPU_H +#define __ASM_SH_FPU_H + +#define SR_FD 0x00008000 + +#ifndef __ASSEMBLY__ +#include <asm/ptrace.h> + +#ifdef CONFIG_SH_FPU +static inline void release_fpu(struct pt_regs *regs) +{ + regs->sr |= SR_FD; +} + +static inline void grab_fpu(struct pt_regs *regs) +{ + regs->sr &= ~SR_FD; +} + +struct task_struct; + +extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); +#else +#define release_fpu(regs) do { } while (0) +#define grab_fpu(regs) do { } while (0) +#define save_fpu(tsk, regs) do { } while (0) +#endif + +extern int do_fpu_inst(unsigned short, struct pt_regs *); + +#define unlazy_fpu(tsk, regs) do { \ + if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ + save_fpu(tsk, regs); \ + } \ +} while (0) + +#define clear_fpu(tsk, regs) do { \ + if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ + clear_tsk_thread_flag(tsk, TIF_USEDFPU); \ + release_fpu(regs); \ + } \ +} while (0) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_SH_FPU_H */ diff --git a/include/asm-sh/hd64461.h b/include/asm-sh/hd64461.h index 342ca55a266a..8c1353baf00f 100644 --- a/include/asm-sh/hd64461.h +++ b/include/asm-sh/hd64461.h @@ -46,10 +46,10 @@ /* CPU Data Bus Control Register */ #define HD64461_SCPUCR (CONFIG_HD64461_IOBASE + 0x04) -/* Base Adress Register */ +/* Base Address Register */ #define HD64461_LCDCBAR (CONFIG_HD64461_IOBASE + 0x1000) -/* Line increment adress */ +/* Line increment address */ #define HD64461_LCDCLOR (CONFIG_HD64461_IOBASE + 0x1002) /* Controls LCD controller */ @@ -80,9 +80,9 @@ #define HD64461_LDR3 (CONFIG_HD64461_IOBASE + 0x101e) /* Palette Registers */ -#define HD64461_CPTWAR (CONFIG_HD64461_IOBASE + 0x1030) /* Color Palette Write Adress Register */ +#define HD64461_CPTWAR (CONFIG_HD64461_IOBASE + 0x1030) /* Color Palette Write Address Register */ #define HD64461_CPTWDR (CONFIG_HD64461_IOBASE + 0x1032) /* Color Palette Write Data Register */ -#define HD64461_CPTRAR (CONFIG_HD64461_IOBASE + 0x1034) /* Color Palette Read Adress Register */ +#define HD64461_CPTRAR (CONFIG_HD64461_IOBASE + 0x1034) /* Color Palette Read Address Register */ #define HD64461_CPTRDR (CONFIG_HD64461_IOBASE + 0x1036) /* Color Palette Read Data Register */ #define HD64461_GRDOR (CONFIG_HD64461_IOBASE + 0x1040) /* Display Resolution Offset Register */ @@ -97,8 +97,8 @@ #define HD64461_GRCFGR_COLORDEPTH8 0x01 /* Sets Colordepth 8 for Accelerator */ /* Line Drawing Registers */ -#define HD64461_LNSARH (CONFIG_HD64461_IOBASE + 0x1046) /* Line Start Adress Register (H) */ -#define HD64461_LNSARL (CONFIG_HD64461_IOBASE + 0x1048) /* Line Start Adress Register (L) */ +#define HD64461_LNSARH (CONFIG_HD64461_IOBASE + 0x1046) /* Line Start Address Register (H) */ +#define HD64461_LNSARL (CONFIG_HD64461_IOBASE + 0x1048) /* Line Start Address Register (L) */ #define HD64461_LNAXLR (CONFIG_HD64461_IOBASE + 0x104a) /* Axis Pixel Length Register */ #define HD64461_LNDGR (CONFIG_HD64461_IOBASE + 0x104c) /* Diagonal Register */ #define HD64461_LNAXR (CONFIG_HD64461_IOBASE + 0x104e) /* Axial Register */ @@ -106,16 +106,16 @@ #define HD64461_LNMDR (CONFIG_HD64461_IOBASE + 0x1052) /* Line Mode Register */ /* BitBLT Registers */ -#define HD64461_BBTSSARH (CONFIG_HD64461_IOBASE + 0x1054) /* Source Start Adress Register (H) */ -#define HD64461_BBTSSARL (CONFIG_HD64461_IOBASE + 0x1056) /* Source Start Adress Register (L) */ -#define HD64461_BBTDSARH (CONFIG_HD64461_IOBASE + 0x1058) /* Destination Start Adress Register (H) */ -#define HD64461_BBTDSARL (CONFIG_HD64461_IOBASE + 0x105a) /* Destination Start Adress Register (L) */ +#define HD64461_BBTSSARH (CONFIG_HD64461_IOBASE + 0x1054) /* Source Start Address Register (H) */ +#define HD64461_BBTSSARL (CONFIG_HD64461_IOBASE + 0x1056) /* Source Start Address Register (L) */ +#define HD64461_BBTDSARH (CONFIG_HD64461_IOBASE + 0x1058) /* Destination Start Address Register (H) */ +#define HD64461_BBTDSARL (CONFIG_HD64461_IOBASE + 0x105a) /* Destination Start Address Register (L) */ #define HD64461_BBTDWR (CONFIG_HD64461_IOBASE + 0x105c) /* Destination Block Width Register */ #define HD64461_BBTDHR (CONFIG_HD64461_IOBASE + 0x105e) /* Destination Block Height Register */ -#define HD64461_BBTPARH (CONFIG_HD64461_IOBASE + 0x1060) /* Pattern Start Adress Register (H) */ -#define HD64461_BBTPARL (CONFIG_HD64461_IOBASE + 0x1062) /* Pattern Start Adress Register (L) */ -#define HD64461_BBTMARH (CONFIG_HD64461_IOBASE + 0x1064) /* Mask Start Adress Register (H) */ -#define HD64461_BBTMARL (CONFIG_HD64461_IOBASE + 0x1066) /* Mask Start Adress Register (L) */ +#define HD64461_BBTPARH (CONFIG_HD64461_IOBASE + 0x1060) /* Pattern Start Address Register (H) */ +#define HD64461_BBTPARL (CONFIG_HD64461_IOBASE + 0x1062) /* Pattern Start Address Register (L) */ +#define HD64461_BBTMARH (CONFIG_HD64461_IOBASE + 0x1064) /* Mask Start Address Register (H) */ +#define HD64461_BBTMARL (CONFIG_HD64461_IOBASE + 0x1066) /* Mask Start Address Register (L) */ #define HD64461_BBTROPR (CONFIG_HD64461_IOBASE + 0x1068) /* ROP Register */ #define HD64461_BBTMDR (CONFIG_HD64461_IOBASE + 0x106a) /* BitBLT Mode Register */ diff --git a/include/asm-sh/hs7751rvoip.h b/include/asm-sh/hs7751rvoip.h deleted file mode 100644 index c4cff9d33927..000000000000 --- a/include/asm-sh/hs7751rvoip.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef __ASM_SH_RENESAS_HS7751RVOIP_H -#define __ASM_SH_RENESAS_HS7751RVOIP_H - -/* - * linux/include/asm-sh/hs7751rvoip/hs7751rvoip.h - * - * Copyright (C) 2000 Atom Create Engineering Co., Ltd. - * - * Renesas Technology Sales HS7751RVoIP support - */ - -/* Box specific addresses. */ - -#define PA_BCR 0xa4000000 /* FPGA */ -#define PA_SLICCNTR1 0xa4000006 /* SLIC PIO Control 1 */ -#define PA_SLICCNTR2 0xa4000008 /* SLIC PIO Control 2 */ -#define PA_DMACNTR 0xa400000a /* USB DMA Control */ -#define PA_INPORTR 0xa400000c /* Input Port Register */ -#define PA_OUTPORTR 0xa400000e /* Output Port Reguster */ -#define PA_VERREG 0xa4000014 /* FPGA Version Register */ - -#define PA_IDE_OFFSET 0x1f0 /* CF IDE Offset */ - -#define IRLCNTR1 (PA_BCR + 0) /* Interrupt Control Register1 */ -#define IRLCNTR2 (PA_BCR + 2) /* Interrupt Control Register2 */ -#define IRLCNTR3 (PA_BCR + 4) /* Interrupt Control Register3 */ -#define IRLCNTR4 (PA_BCR + 16) /* Interrupt Control Register4 */ -#define IRLCNTR5 (PA_BCR + 18) /* Interrupt Control Register5 */ - -#define IRQ_PCIETH 6 /* PCI Ethernet IRQ */ -#define IRQ_PCIHUB 7 /* PCI Ethernet Hub IRQ */ -#define IRQ_USBCOM 8 /* USB Comunication IRQ */ -#define IRQ_USBCON 9 /* USB Connect IRQ */ -#define IRQ_USBDMA 10 /* USB DMA IRQ */ -#define IRQ_CFCARD 11 /* CF Card IRQ */ -#define IRQ_PCMCIA 12 /* PCMCIA IRQ */ -#define IRQ_PCISLOT 13 /* PCI Slot #1 IRQ */ -#define IRQ_ONHOOK1 0 /* ON HOOK1 IRQ */ -#define IRQ_OFFHOOK1 1 /* OFF HOOK1 IRQ */ -#define IRQ_ONHOOK2 2 /* ON HOOK2 IRQ */ -#define IRQ_OFFHOOK2 3 /* OFF HOOK2 IRQ */ -#define IRQ_RINGING 4 /* Ringing IRQ */ -#define IRQ_CODEC 5 /* CODEC IRQ */ - -#define __IO_PREFIX hs7751rvoip -#include <asm/io_generic.h> - -/* arch/sh/boards/renesas/hs7751rvoip/irq.c */ -void init_hs7751rvoip_IRQ(void); - -/* arch/sh/boards/renesas/hs7751rvoip/io.c */ -void *hs7751rvoip_ioremap(unsigned long, unsigned long); - -#endif /* __ASM_SH_RENESAS_HS7751RVOIP */ diff --git a/include/asm-sh/hw_irq.h b/include/asm-sh/hw_irq.h index cb0b6c9f7020..c958fdaa0095 100644 --- a/include/asm-sh/hw_irq.h +++ b/include/asm-sh/hw_irq.h @@ -33,13 +33,6 @@ struct intc_vect { #define INTC_VECT(enum_id, vect) { enum_id, vect } #define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq)) -struct intc_prio { - intc_enum enum_id; - unsigned char priority; -}; - -#define INTC_PRIO(enum_id, prio) { enum_id, prio } - struct intc_group { intc_enum enum_id; intc_enum enum_ids[32]; @@ -79,8 +72,6 @@ struct intc_desc { unsigned int nr_vectors; struct intc_group *groups; unsigned int nr_groups; - struct intc_prio *priorities; - unsigned int nr_priorities; struct intc_mask_reg *mask_regs; unsigned int nr_mask_regs; struct intc_prio_reg *prio_regs; @@ -92,10 +83,9 @@ struct intc_desc { #define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) #define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \ - priorities, mask_regs, prio_regs, sense_regs) \ + mask_regs, prio_regs, sense_regs) \ struct intc_desc symbol __initdata = { \ _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ - _INTC_ARRAY(priorities), \ _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ _INTC_ARRAY(sense_regs), \ chipname, \ diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h index 6ed34d8eac5f..94900c089519 100644 --- a/include/asm-sh/io.h +++ b/include/asm-sh/io.h @@ -191,6 +191,8 @@ __BUILD_MEMORY_STRING(w, u16) #define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */ +#define IO_SPACE_LIMIT 0xffffffff + /* * This function provides a method for the generic case where a board-specific * ioport_map simply needs to return the port + some arbitrary port base. @@ -226,6 +228,11 @@ static inline unsigned int ctrl_inl(unsigned long addr) return *(volatile unsigned long*)addr; } +static inline unsigned long long ctrl_inq(unsigned long addr) +{ + return *(volatile unsigned long long*)addr; +} + static inline void ctrl_outb(unsigned char b, unsigned long addr) { *(volatile unsigned char*)addr = b; @@ -241,49 +248,52 @@ static inline void ctrl_outl(unsigned int b, unsigned long addr) *(volatile unsigned long*)addr = b; } +static inline void ctrl_outq(unsigned long long b, unsigned long addr) +{ + *(volatile unsigned long long*)addr = b; +} + static inline void ctrl_delay(void) { +#ifdef P2SEG ctrl_inw(P2SEG); +#endif } -#define IO_SPACE_LIMIT 0xffffffff +/* Quad-word real-mode I/O, don't ask.. */ +unsigned long long peek_real_address_q(unsigned long long addr); +unsigned long long poke_real_address_q(unsigned long long addr, + unsigned long long val); -#ifdef CONFIG_MMU -/* - * Change virtual addresses to physical addresses and vv. - * These are trivial on the 1:1 Linux/SuperH mapping - */ -static inline unsigned long virt_to_phys(volatile void *address) -{ - return PHYSADDR(address); -} +/* arch/sh/mm/ioremap_64.c */ +unsigned long onchip_remap(unsigned long addr, unsigned long size, + const char *name); +extern void onchip_unmap(unsigned long vaddr); -static inline void *phys_to_virt(unsigned long address) -{ - return (void *)P1SEGADDR(address); -} -#else -#define phys_to_virt(address) ((void *)(address)) +#if !defined(CONFIG_MMU) #define virt_to_phys(address) ((unsigned long)(address)) +#define phys_to_virt(address) ((void *)(address)) +#else +#define virt_to_phys(address) (__pa(address)) +#define phys_to_virt(address) (__va(address)) #endif /* - * readX/writeX() are used to access memory mapped devices. On some - * architectures the memory mapped IO stuff needs to be accessed - * differently. On the x86 architecture, we just read/write the - * memory location directly. + * On 32-bit SH, we traditionally have the whole physical address space + * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do + * not need to do anything but place the address in the proper segment. + * This is true for P1 and P2 addresses, as well as some P3 ones. + * However, most of the P3 addresses and newer cores using extended + * addressing need to map through page tables, so the ioremap() + * implementation becomes a bit more complicated. * - * On SH, we traditionally have the whole physical address space mapped - * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not - * need to do anything but place the address in the proper segment. This - * is true for P1 and P2 addresses, as well as some P3 ones. However, - * most of the P3 addresses and newer cores using extended addressing - * need to map through page tables, so the ioremap() implementation - * becomes a bit more complicated. See arch/sh/mm/ioremap.c for - * additional notes on this. + * See arch/sh/mm/ioremap.c for additional notes on this. * * We cheat a bit and always return uncachable areas until we've fixed * the drivers to handle caching properly. + * + * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply + * doesn't exist, so everything must go through page tables. */ #ifdef CONFIG_MMU void __iomem *__ioremap(unsigned long offset, unsigned long size, @@ -297,6 +307,7 @@ void __iounmap(void __iomem *addr); static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { +#ifdef CONFIG_SUPERH32 unsigned long last_addr = offset + size - 1; /* @@ -311,6 +322,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) return (void __iomem *)P2SEGADDR(offset); } +#endif return __ioremap(offset, size, flags); } diff --git a/include/asm-sh/irqflags.h b/include/asm-sh/irqflags.h index 9dedc1b693e3..46e71da5be6b 100644 --- a/include/asm-sh/irqflags.h +++ b/include/asm-sh/irqflags.h @@ -1,81 +1,11 @@ #ifndef __ASM_SH_IRQFLAGS_H #define __ASM_SH_IRQFLAGS_H -static inline void raw_local_irq_enable(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %1, %0\n\t" -#ifdef CONFIG_CPU_HAS_SR_RB - "stc r6_bank, %1\n\t" - "or %1, %0\n\t" +#ifdef CONFIG_SUPERH32 +#include "irqflags_32.h" +#else +#include "irqflags_64.h" #endif - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x000000f0) - : "memory" - ); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); -} - -static inline void set_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or %2, %0\n\t" - "and %3, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "r" (0x10000000), "r" (0xffffff0f) - : "memory" - ); -} - -static inline void clear_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %2, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x10000000) - : "memory" - ); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); - - return flags; -} #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) @@ -92,25 +22,6 @@ static inline int raw_irqs_disabled(void) return raw_irqs_disabled_flags(flags); } -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags, __dummy; - - __asm__ __volatile__ ( - "stc sr, %1\n\t" - "mov %1, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - "mov %1, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags), "=&r" (__dummy) - : /* no inputs */ - : "memory" - ); - - return flags; -} - #define raw_local_irq_save(flags) \ do { (flags) = __raw_local_irq_save(); } while (0) diff --git a/include/asm-sh/irqflags_32.h b/include/asm-sh/irqflags_32.h new file mode 100644 index 000000000000..60218f541340 --- /dev/null +++ b/include/asm-sh/irqflags_32.h @@ -0,0 +1,99 @@ +#ifndef __ASM_SH_IRQFLAGS_32_H +#define __ASM_SH_IRQFLAGS_32_H + +static inline void raw_local_irq_enable(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %1, %0\n\t" +#ifdef CONFIG_CPU_HAS_SR_RB + "stc r6_bank, %1\n\t" + "or %1, %0\n\t" +#endif + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x000000f0) + : "memory" + ); +} + +static inline void raw_local_irq_disable(void) +{ + unsigned long flags; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "or #0xf0, %0\n\t" + "ldc %0, sr\n\t" + : "=&z" (flags) + : /* no inputs */ + : "memory" + ); +} + +static inline void set_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "or %2, %0\n\t" + "and %3, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "r" (0x10000000), "r" (0xffffff0f) + : "memory" + ); +} + +static inline void clear_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %2, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x10000000) + : "memory" + ); +} + +static inline unsigned long __raw_local_save_flags(void) +{ + unsigned long flags; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and #0xf0, %0\n\t" + : "=&z" (flags) + : /* no inputs */ + : "memory" + ); + + return flags; +} + +static inline unsigned long __raw_local_irq_save(void) +{ + unsigned long flags, __dummy; + + __asm__ __volatile__ ( + "stc sr, %1\n\t" + "mov %1, %0\n\t" + "or #0xf0, %0\n\t" + "ldc %0, sr\n\t" + "mov %1, %0\n\t" + "and #0xf0, %0\n\t" + : "=&z" (flags), "=&r" (__dummy) + : /* no inputs */ + : "memory" + ); + + return flags; +} + +#endif /* __ASM_SH_IRQFLAGS_32_H */ diff --git a/include/asm-sh/irqflags_64.h b/include/asm-sh/irqflags_64.h new file mode 100644 index 000000000000..4f6b8a56e7bd --- /dev/null +++ b/include/asm-sh/irqflags_64.h @@ -0,0 +1,85 @@ +#ifndef __ASM_SH_IRQFLAGS_64_H +#define __ASM_SH_IRQFLAGS_64_H + +#include <asm/cpu/registers.h> + +#define SR_MASK_LL 0x00000000000000f0LL +#define SR_BL_LL 0x0000000010000000LL + +static inline void raw_local_irq_enable(void) +{ + unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "and %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} + +static inline void raw_local_irq_disable(void) +{ + unsigned long long __dummy0, __dummy1 = SR_MASK_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "or %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} + +static inline void set_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "or %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); + +} + +static inline void clear_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "and %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} + +static inline unsigned long __raw_local_save_flags(void) +{ + unsigned long long __dummy = SR_MASK_LL; + unsigned long flags; + + __asm__ __volatile__ ( + "getcon " __SR ", %0\n\t" + "and %0, %1, %0" + : "=&r" (flags) + : "r" (__dummy)); + + return flags; +} + +static inline unsigned long __raw_local_irq_save(void) +{ + unsigned long long __dummy0, __dummy1 = SR_MASK_LL; + unsigned long flags; + + __asm__ __volatile__ ( + "getcon " __SR ", %1\n\t" + "or %1, r63, %0\n\t" + "or %1, %2, %1\n\t" + "putcon %1, " __SR "\n\t" + "and %0, %2, %0" + : "=&r" (flags), "=&r" (__dummy0) + : "r" (__dummy1)); + + return flags; +} + +#endif /* __ASM_SH_IRQFLAGS_64_H */ diff --git a/include/asm-sh/machvec.h b/include/asm-sh/machvec.h index 088698bacf2f..b2e4124070ae 100644 --- a/include/asm-sh/machvec.h +++ b/include/asm-sh/machvec.h @@ -56,9 +56,6 @@ struct sh_machine_vector { void (*mv_heartbeat)(void); - void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, gfp_t); - int (*mv_consistent_free)(struct device *, size_t, void *, dma_addr_t); - void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); void (*mv_ioport_unmap)(void __iomem *); }; @@ -68,6 +65,6 @@ extern struct sh_machine_vector sh_mv; #define get_system_type() sh_mv.mv_name #define __initmv \ - __attribute_used__ __attribute__((__section__ (".machvec.init"))) + __used __section(.machvec.init) #endif /* _ASM_SH_MACHVEC_H */ diff --git a/include/asm-sh/microdev.h b/include/asm-sh/microdev.h index 018332a9e590..1aed15856e11 100644 --- a/include/asm-sh/microdev.h +++ b/include/asm-sh/microdev.h @@ -17,7 +17,7 @@ extern void microdev_print_fpga_intc_status(void); /* * The following are useful macros for manipulating the interrupt * controller (INTC) on the CPU-board FPGA. should be noted that there - * is an INTC on the FPGA, and a seperate INTC on the SH4-202 core - + * is an INTC on the FPGA, and a separate INTC on the SH4-202 core - * these are two different things, both of which need to be prorammed to * correctly route - unfortunately, they have the same name and * abbreviations! @@ -25,7 +25,7 @@ extern void microdev_print_fpga_intc_status(void); #define MICRODEV_FPGA_INTC_BASE 0xa6110000ul /* INTC base address on CPU-board FPGA */ #define MICRODEV_FPGA_INTENB_REG (MICRODEV_FPGA_INTC_BASE+0ul) /* Interrupt Enable Register on INTC on CPU-board FPGA */ #define MICRODEV_FPGA_INTDSB_REG (MICRODEV_FPGA_INTC_BASE+8ul) /* Interrupt Disable Register on INTC on CPU-board FPGA */ -#define MICRODEV_FPGA_INTC_MASK(n) (1ul<<(n)) /* Interupt mask to enable/disable INTC in CPU-board FPGA */ +#define MICRODEV_FPGA_INTC_MASK(n) (1ul<<(n)) /* Interrupt mask to enable/disable INTC in CPU-board FPGA */ #define MICRODEV_FPGA_INTPRI_REG(n) (MICRODEV_FPGA_INTC_BASE+0x10+((n)/8)*8)/* Interrupt Priority Register on INTC on CPU-board FPGA */ #define MICRODEV_FPGA_INTPRI_LEVEL(n,x) ((x)<<(((n)%8)*4)) /* MICRODEV_FPGA_INTPRI_LEVEL(int_number, int_level) */ #define MICRODEV_FPGA_INTPRI_MASK(n) (MICRODEV_FPGA_INTPRI_LEVEL((n),0xful)) /* Interrupt Priority Mask on INTC on CPU-board FPGA */ diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h index 199662bb35c6..fe58d00b250c 100644 --- a/include/asm-sh/mmu_context.h +++ b/include/asm-sh/mmu_context.h @@ -1,13 +1,13 @@ /* * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2003 - 2006 Paul Mundt + * Copyright (C) 2003 - 2007 Paul Mundt * * ASID handling idea taken from MIPS implementation. */ #ifndef __ASM_SH_MMU_CONTEXT_H #define __ASM_SH_MMU_CONTEXT_H -#ifdef __KERNEL__ +#ifdef __KERNEL__ #include <asm/cpu/mmu_context.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> @@ -19,7 +19,6 @@ * (a) TLB cache version (or round, cycle whatever expression you like) * (b) ASID (Address Space IDentifier) */ - #define MMU_CONTEXT_ASID_MASK 0x000000ff #define MMU_CONTEXT_VERSION_MASK 0xffffff00 #define MMU_CONTEXT_FIRST_VERSION 0x00000100 @@ -28,10 +27,11 @@ /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 -#define cpu_context(cpu, mm) ((mm)->context.id[cpu]) -#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \ - MMU_CONTEXT_ASID_MASK) #define asid_cache(cpu) (cpu_data[cpu].asid_cache) +#define cpu_context(cpu, mm) ((mm)->context.id[cpu]) + +#define cpu_asid(cpu, mm) \ + (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) /* * Virtual Page Number mask @@ -39,6 +39,12 @@ #define MMU_VPN_MASK 0xfffff000 #ifdef CONFIG_MMU +#if defined(CONFIG_SUPERH32) +#include "mmu_context_32.h" +#else +#include "mmu_context_64.h" +#endif + /* * Get MMU context if needed. */ @@ -59,6 +65,14 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) */ flush_tlb_all(); +#ifdef CONFIG_SUPERH64 + /* + * The SH-5 cache uses the ASIDs, requiring both the I and D + * cache to be flushed when the ASID is exhausted. Weak. + */ + flush_cache_all(); +#endif + /* * Fix version; Note that we avoid version #0 * to distingush NO_CONTEXT. @@ -86,39 +100,6 @@ static inline int init_new_context(struct task_struct *tsk, } /* - * Destroy context related info for an mm_struct that is about - * to be put to rest. - */ -static inline void destroy_context(struct mm_struct *mm) -{ - /* Do nothing */ -} - -static inline void set_asid(unsigned long asid) -{ - unsigned long __dummy; - - __asm__ __volatile__ ("mov.l %2, %0\n\t" - "and %3, %0\n\t" - "or %1, %0\n\t" - "mov.l %0, %2" - : "=&r" (__dummy) - : "r" (asid), "m" (__m(MMU_PTEH)), - "r" (0xffffff00)); -} - -static inline unsigned long get_asid(void) -{ - unsigned long asid; - - __asm__ __volatile__ ("mov.l %1, %0" - : "=r" (asid) - : "m" (__m(MMU_PTEH))); - asid &= MMU_CONTEXT_ASID_MASK; - return asid; -} - -/* * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ @@ -128,17 +109,6 @@ static inline void activate_context(struct mm_struct *mm, unsigned int cpu) set_asid(cpu_asid(cpu, mm)); } -/* MMU_TTB is used for optimizing the fault handling. */ -static inline void set_TTB(pgd_t *pgd) -{ - ctrl_outl((unsigned long)pgd, MMU_TTB); -} - -static inline pgd_t *get_TTB(void) -{ - return (pgd_t *)ctrl_inl(MMU_TTB); -} - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) @@ -153,17 +123,7 @@ static inline void switch_mm(struct mm_struct *prev, if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) activate_context(next, cpu); } - -#define deactivate_mm(tsk,mm) do { } while (0) - -#define activate_mm(prev, next) \ - switch_mm((prev),(next),NULL) - -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} -#else /* !CONFIG_MMU */ +#else #define get_mmu_context(mm) do { } while (0) #define init_new_context(tsk,mm) (0) #define destroy_context(mm) do { } while (0) @@ -173,10 +133,11 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) #define get_TTB() (0) #define activate_context(mm,cpu) do { } while (0) #define switch_mm(prev,next,tsk) do { } while (0) +#endif /* CONFIG_MMU */ + +#define activate_mm(prev, next) switch_mm((prev),(next),NULL) #define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) do { } while (0) #define enter_lazy_tlb(mm,tsk) do { } while (0) -#endif /* CONFIG_MMU */ #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) /* diff --git a/include/asm-sh/mmu_context_32.h b/include/asm-sh/mmu_context_32.h new file mode 100644 index 000000000000..f4f9aebd68b7 --- /dev/null +++ b/include/asm-sh/mmu_context_32.h @@ -0,0 +1,47 @@ +#ifndef __ASM_SH_MMU_CONTEXT_32_H +#define __ASM_SH_MMU_CONTEXT_32_H + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{ + /* Do nothing */ +} + +static inline void set_asid(unsigned long asid) +{ + unsigned long __dummy; + + __asm__ __volatile__ ("mov.l %2, %0\n\t" + "and %3, %0\n\t" + "or %1, %0\n\t" + "mov.l %0, %2" + : "=&r" (__dummy) + : "r" (asid), "m" (__m(MMU_PTEH)), + "r" (0xffffff00)); +} + +static inline unsigned long get_asid(void) +{ + unsigned long asid; + + __asm__ __volatile__ ("mov.l %1, %0" + : "=r" (asid) + : "m" (__m(MMU_PTEH))); + asid &= MMU_CONTEXT_ASID_MASK; + return asid; +} + +/* MMU_TTB is used for optimizing the fault handling. */ +static inline void set_TTB(pgd_t *pgd) +{ + ctrl_outl((unsigned long)pgd, MMU_TTB); +} + +static inline pgd_t *get_TTB(void) +{ + return (pgd_t *)ctrl_inl(MMU_TTB); +} +#endif /* __ASM_SH_MMU_CONTEXT_32_H */ diff --git a/include/asm-sh/mmu_context_64.h b/include/asm-sh/mmu_context_64.h new file mode 100644 index 000000000000..020be744b088 --- /dev/null +++ b/include/asm-sh/mmu_context_64.h @@ -0,0 +1,75 @@ +#ifndef __ASM_SH_MMU_CONTEXT_64_H +#define __ASM_SH_MMU_CONTEXT_64_H + +/* + * sh64-specific mmu_context interface. + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 - 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <asm/cpu/registers.h> +#include <asm/cacheflush.h> + +#define SR_ASID_MASK 0xffffffffff00ffffULL +#define SR_ASID_SHIFT 16 + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{ + /* Well, at least free TLB entries */ + flush_tlb_mm(mm); +} + +static inline unsigned long get_asid(void) +{ + unsigned long long sr; + + asm volatile ("getcon " __SR ", %0\n\t" + : "=r" (sr)); + + sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK; + return (unsigned long) sr; +} + +/* Set ASID into SR */ +static inline void set_asid(unsigned long asid) +{ + unsigned long long sr, pc; + + asm volatile ("getcon " __SR ", %0" : "=r" (sr)); + + sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT); + + /* + * It is possible that this function may be inlined and so to avoid + * the assembler reporting duplicate symbols we make use of the + * gas trick of generating symbols using numerics and forward + * reference. + */ + asm volatile ("movi 1, %1\n\t" + "shlli %1, 28, %1\n\t" + "or %0, %1, %1\n\t" + "putcon %1, " __SR "\n\t" + "putcon %0, " __SSR "\n\t" + "movi 1f, %1\n\t" + "ori %1, 1 , %1\n\t" + "putcon %1, " __SPC "\n\t" + "rte\n" + "1:\n\t" + : "=r" (sr), "=r" (pc) : "0" (sr)); +} + +/* No spare register to twiddle, so use a software cache */ +extern pgd_t *mmu_pdtp_cache; + +#define set_TTB(pgd) (mmu_pdtp_cache = (pgd)) +#define get_TTB() (mmu_pdtp_cache) + +#endif /* __ASM_SH_MMU_CONTEXT_64_H */ diff --git a/include/asm-sh/module.h b/include/asm-sh/module.h index 118d5a2b228f..46eccd331660 100644 --- a/include/asm-sh/module.h +++ b/include/asm-sh/module.h @@ -20,6 +20,8 @@ struct mod_arch_specific { # define MODULE_PROC_FAMILY "SH3LE " # elif defined CONFIG_CPU_SH4 # define MODULE_PROC_FAMILY "SH4LE " +# elif defined CONFIG_CPU_SH5 +# define MODULE_PROC_FAMILY "SH5LE " # else # error unknown processor family # endif @@ -30,6 +32,8 @@ struct mod_arch_specific { # define MODULE_PROC_FAMILY "SH3BE " # elif defined CONFIG_CPU_SH4 # define MODULE_PROC_FAMILY "SH4BE " +# elif defined CONFIG_CPU_SH5 +# define MODULE_PROC_FAMILY "SH5BE " # else # error unknown processor family # endif diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index d00a8fde7c7f..002e64a4f049 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -5,13 +5,7 @@ * Copyright (C) 1999 Niibe Yutaka */ -/* - [ P0/U0 (virtual) ] 0x00000000 <------ User space - [ P1 (fixed) cached ] 0x80000000 <------ Kernel space - [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access - [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area - [ P4 control ] 0xE0000000 - */ +#include <linux/const.h> #ifdef __KERNEL__ @@ -26,15 +20,13 @@ # error "Bogus kernel page size?" #endif -#ifdef __ASSEMBLY__ -#define PAGE_SIZE (1 << PAGE_SHIFT) -#else -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#endif - +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #define PTE_MASK PAGE_MASK +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) #define HPAGE_SHIFT 16 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) @@ -45,6 +37,8 @@ #define HPAGE_SHIFT 22 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) #define HPAGE_SHIFT 26 +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) +#define HPAGE_SHIFT 29 #endif #ifdef CONFIG_HUGETLB_PAGE @@ -55,20 +49,12 @@ #ifndef __ASSEMBLY__ -extern void (*clear_page)(void *to); -extern void (*copy_page)(void *to, void *from); - extern unsigned long shm_align_mask; extern unsigned long max_low_pfn, min_low_pfn; extern unsigned long memory_start, memory_end; -#ifdef CONFIG_MMU -extern void clear_page_slow(void *to); -extern void copy_page_slow(void *to, void *from); -#else -extern void clear_page_nommu(void *to); -extern void copy_page_nommu(void *to, void *from); -#endif +extern void clear_page(void *to); +extern void copy_page(void *to, void *from); #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) @@ -96,12 +82,18 @@ typedef struct { unsigned long long pgd; } pgd_t; ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) #define __pte(x) \ ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) -#else +#elif defined(CONFIG_SUPERH32) typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgd; } pgd_t; #define pte_val(x) ((x).pte_low) -#define __pte(x) ((pte_t) { (x) } ) +#define __pte(x) ((pte_t) { (x) } ) +#else +typedef struct { unsigned long long pte_low; } pte_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct { unsigned long pgd; } pgd_t; +#define pte_val(x) ((x).pte_low) +#define __pte(x) ((pte_t) { (x) } ) #endif #define pgd_val(x) ((x).pgd) @@ -112,28 +104,44 @@ typedef struct { unsigned long pgd; } pgd_t; #endif /* !__ASSEMBLY__ */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - /* - * IF YOU CHANGE THIS, PLEASE ALSO CHANGE - * - * arch/sh/kernel/vmlinux.lds.S - * - * which has the same constant encoded.. + * __MEMORY_START and SIZE are the physical addresses and size of RAM. */ - #define __MEMORY_START CONFIG_MEMORY_START #define __MEMORY_SIZE CONFIG_MEMORY_SIZE +/* + * PAGE_OFFSET is the virtual address of the start of kernel address + * space. + */ #define PAGE_OFFSET CONFIG_PAGE_OFFSET -#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +/* + * Virtual to physical RAM address translation. + * + * In 29 bit mode, the physical offset of RAM from address 0 is visible in + * the kernel virtual address space, and thus we don't have to take + * this into account when translating. However in 32 bit mode this offset + * is not visible (it is part of the PMB mapping) and so needs to be + * added or subtracted as required. + */ +#ifdef CONFIG_32BIT +#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) +#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) +#else +#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) +#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) +#endif + +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) -/* PFN start number, because of __MEMORY_START */ +/* + * PFN = physical frame number (ie PFN 0 == physical address 0) + * PFN_START is the PFN of the first page of RAM. By defining this we + * don't have struct page entries for the portion of address space + * between physical address 0 and the start of RAM. + */ #define PFN_START (__MEMORY_START >> PAGE_SHIFT) #define ARCH_PFN_OFFSET (PFN_START) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) @@ -154,11 +162,21 @@ typedef struct { unsigned long pgd; } pgd_t; #endif /* - * Slub defaults to 8-byte alignment, we're only interested in 4. - * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. + * Some drivers need to perform DMA into kmalloc'ed buffers + * and so we have to increase the kmalloc minalign for this. */ -#define ARCH_KMALLOC_MINALIGN 4 -#define ARCH_SLAB_MINALIGN 4 +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + +#ifdef CONFIG_SUPERH64 +/* + * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still + * happily generate {ld/st}.q pairs, requiring us to have 8-byte + * alignment to avoid traps. The kmalloc alignment is gauranteed by + * virtue of L1_CACHE_BYTES, requiring this to only be special cased + * for slab caches. + */ +#define ARCH_SLAB_MINALIGN 8 +#endif #endif /* __KERNEL__ */ #endif /* __ASM_SH_PAGE_H */ diff --git a/include/asm-sh/param.h b/include/asm-sh/param.h index 1012296e07ab..ae245afdfd6a 100644 --- a/include/asm-sh/param.h +++ b/include/asm-sh/param.h @@ -2,11 +2,7 @@ #define __ASM_SH_PARAM_H #ifdef __KERNEL__ -# ifdef CONFIG_SH_WDT -# define HZ 1000 /* Needed for high-res WOVF */ -# else -# define HZ CONFIG_HZ -# endif +# define HZ CONFIG_HZ # define USER_HZ 100 /* User interfaces are in "ticks" */ # define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */ #endif diff --git a/include/asm-sh/pci.h b/include/asm-sh/pci.h index 2757ce096ff7..df1d383e18a5 100644 --- a/include/asm-sh/pci.h +++ b/include/asm-sh/pci.h @@ -38,9 +38,12 @@ extern struct pci_channel board_pci_channels[]; #if defined(CONFIG_CPU_SUBTYPE_SH7780) || defined(CONFIG_CPU_SUBTYPE_SH7785) #define PCI_IO_AREA 0xFE400000 #define PCI_IO_SIZE 0x00400000 +#elif defined(CONFIG_CPU_SH5) +extern unsigned long PCI_IO_AREA; +#define PCI_IO_SIZE 0x00010000 #else #define PCI_IO_AREA 0xFE240000 -#define PCI_IO_SIZE 0X00040000 +#define PCI_IO_SIZE 0x00040000 #endif #define PCI_MEM_SIZE 0x01000000 diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index 8f1e8be8d15d..a4a8f8b93463 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -3,7 +3,7 @@ * use the SuperH page table tree. * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002 - 2005 Paul Mundt + * Copyright (C) 2002 - 2007 Paul Mundt * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this @@ -29,10 +29,27 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #endif /* !__ASSEMBLY__ */ /* + * Effective and physical address definitions, to aid with sign + * extension. + */ +#define NEFF 32 +#define NEFF_SIGN (1LL << (NEFF - 1)) +#define NEFF_MASK (-1LL << NEFF) + +#ifdef CONFIG_29BIT +#define NPHYS 29 +#else +#define NPHYS 32 +#endif + +#define NPHYS_SIGN (1LL << (NPHYS - 1)) +#define NPHYS_MASK (-1LL << NPHYS) + +/* * traditional two-level paging structure */ /* PTE bits */ -#ifdef CONFIG_X2TLB +#if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64) # define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ #else # define PTE_MAGNITUDE 2 /* 32-bit PTEs */ @@ -52,283 +69,27 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 -#define PTE_PHYS_MASK (0x20000000 - PAGE_SIZE) - -#define VMALLOC_START (P3SEG) -#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) - -/* - * Linux PTEL encoding. - * - * Hardware and software bit definitions for the PTEL value (see below for - * notes on SH-X2 MMUs and 64-bit PTEs): - * - * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). - * - * - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the - * hardware PTEL value can't have the SH-bit set when MMUCR.IX is set, - * which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT). - * - * In order to keep this relatively clean, do not use these for defining - * SH-3 specific flags until all of the other unused bits have been - * exhausted. - * - * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE. - * - * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages. - * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused. - * - * - Bits 31, 30, and 29 remain unused by everyone and can be used for future - * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS. - * - * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day. - * - * SH-X2 MMUs and extended PTEs - * - * SH-X2 supports an extended mode TLB with split data arrays due to the - * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and - * SZ bit placeholders still exist in data array 1, but are implemented as - * reserved bits, with the real logic existing in data array 2. - * - * The downside to this is that we can no longer fit everything in to a 32-bit - * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus - * side, this gives us quite a few spare bits to play with for future usage. - */ -/* Legacy and compat mode bits */ -#define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ -#define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ -#define _PAGE_DIRTY 0x004 /* D-bit : page changed */ -#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ -#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ -#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ -#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/ -#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ -#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ -#define _PAGE_PROTNONE 0x200 /* software: if not present */ -#define _PAGE_ACCESSED 0x400 /* software: page referenced */ -#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ - -#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1) -#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER) - -/* Extended mode bits */ -#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */ -#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */ -#define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */ -#define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */ - -#define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */ -#define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */ -#define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */ - -#define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */ -#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ -#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ - -/* Wrapper for extended mode pgprot twiddling */ -#define _PAGE_EXT(x) ((unsigned long long)(x) << 32) - -/* software: moves to PTEA.TC (Timing Control) */ -#define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ -#define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ - -/* software: moves to PTEA.SA[2:0] (Space Attributes) */ -#define _PAGE_PCC_IODYN 0x00000001 /* IO space, dynamically sized bus */ -#define _PAGE_PCC_IO8 0x20000000 /* IO space, 8 bit bus */ -#define _PAGE_PCC_IO16 0x20000001 /* IO space, 16 bit bus */ -#define _PAGE_PCC_COM8 0x40000000 /* Common Memory space, 8 bit bus */ -#define _PAGE_PCC_COM16 0x40000001 /* Common Memory space, 16 bit bus */ -#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ -#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ - -/* Mask which drops unused bits from the PTEL value */ -#if defined(CONFIG_CPU_SH3) -#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \ - _PAGE_FILE | _PAGE_SZ1 | \ - _PAGE_HW_SHARED) -#elif defined(CONFIG_X2TLB) -/* Get rid of the legacy PR/SZ bits when using extended mode */ -#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \ - _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK) +#ifdef CONFIG_32BIT +#define PHYS_ADDR_MASK 0xffffffff #else -#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) +#define PHYS_ADDR_MASK 0x1fffffff #endif -#define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) +#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) -/* Hardware flags, page size encoding */ -#if defined(CONFIG_X2TLB) -# if defined(CONFIG_PAGE_SIZE_4KB) -# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0) -# elif defined(CONFIG_PAGE_SIZE_8KB) -# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1) -# elif defined(CONFIG_PAGE_SIZE_64KB) -# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2) -# endif +#ifdef CONFIG_SUPERH32 +#define VMALLOC_START (P3SEG) #else -# if defined(CONFIG_PAGE_SIZE_4KB) -# define _PAGE_FLAGS_HARD _PAGE_SZ0 -# elif defined(CONFIG_PAGE_SIZE_64KB) -# define _PAGE_FLAGS_HARD _PAGE_SZ1 -# endif +#define VMALLOC_START (0xf0000000) #endif +#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) -#if defined(CONFIG_X2TLB) -# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ3) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) -# endif +#if defined(CONFIG_SUPERH32) +#include <asm/pgtable_32.h> #else -# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -# define _PAGE_SZHUGE (_PAGE_SZ1) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) -# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) -# endif -#endif - -/* - * Stub out _PAGE_SZHUGE if we don't have a good definition for it, - * to make pte_mkhuge() happy. - */ -#ifndef _PAGE_SZHUGE -# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) -#endif - -#define _PAGE_CHG_MASK \ - (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) - -#ifndef __ASSEMBLY__ - -#if defined(CONFIG_X2TLB) /* SH-X2 TLB */ -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_USER_READ | \ - _PAGE_EXT_USER_WRITE)) - -#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \ - _PAGE_EXT_KERN_READ | \ - _PAGE_EXT_USER_EXEC | \ - _PAGE_EXT_USER_READ)) - -#define PAGE_COPY PAGE_EXECREAD - -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_USER_READ)) - -#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_USER_WRITE)) - -#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_EXEC | \ - _PAGE_EXT_USER_WRITE | \ - _PAGE_EXT_USER_READ | \ - _PAGE_EXT_USER_EXEC)) - -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_EXEC)) - -#define PAGE_KERNEL_NOCACHE \ - __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_HW_SHARED | \ - _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_EXEC)) - -#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_EXEC)) - -#define PAGE_KERNEL_PCC(slot, type) \ - __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_EXEC) \ - (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ - (type)) - -#elif defined(CONFIG_MMU) /* SH-X TLB */ -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ - _PAGE_CACHABLE | _PAGE_ACCESSED | \ - _PAGE_FLAGS_HARD) - -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_EXECREAD PAGE_READONLY -#define PAGE_RWX PAGE_SHARED -#define PAGE_WRITEONLY PAGE_SHARED - -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) - -#define PAGE_KERNEL_NOCACHE \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_HW_SHARED | \ - _PAGE_FLAGS_HARD) - -#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) - -#define PAGE_KERNEL_PCC(slot, type) \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ - (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ - (type)) -#else /* no mmu */ -#define PAGE_NONE __pgprot(0) -#define PAGE_SHARED __pgprot(0) -#define PAGE_COPY __pgprot(0) -#define PAGE_EXECREAD __pgprot(0) -#define PAGE_RWX __pgprot(0) -#define PAGE_READONLY __pgprot(0) -#define PAGE_WRITEONLY __pgprot(0) -#define PAGE_KERNEL __pgprot(0) -#define PAGE_KERNEL_NOCACHE __pgprot(0) -#define PAGE_KERNEL_RO __pgprot(0) - -#define PAGE_KERNEL_PCC(slot, type) \ - __pgprot(0) +#include <asm/pgtable_64.h> #endif -#endif /* __ASSEMBLY__ */ - /* * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page * protection for execute, and considers it the same as a read. Also, write @@ -357,208 +118,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define __S110 PAGE_RWX #define __S111 PAGE_RWX -#ifndef __ASSEMBLY__ - -/* - * Certain architectures need to do special things when PTEs - * within a page table are directly modified. Thus, the following - * hook is made available. - */ -#ifdef CONFIG_X2TLB -static inline void set_pte(pte_t *ptep, pte_t pte) -{ - ptep->pte_high = pte.pte_high; - smp_wmb(); - ptep->pte_low = pte.pte_low; -} -#else -#define set_pte(pteptr, pteval) (*(pteptr) = pteval) -#endif - -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) - -/* - * (pmds are folded into pgds so this doesn't get actually called, - * but the define is needed for a generic inline function.) - */ -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) - -#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) - -#define pfn_pte(pfn, prot) \ - __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) -#define pfn_pmd(pfn, prot) \ - __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) - -#define pte_none(x) (!pte_val(x)) -#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) - -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) - -#define pmd_none(x) (!pmd_val(x)) -#define pmd_present(x) (pmd_val(x)) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) -#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK) - -#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) -#define pte_page(x) pfn_to_page(pte_pfn(x)) - -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT)) -#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY) -#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED) -#define pte_file(pte) ((pte).pte_low & _PAGE_FILE) - -#ifdef CONFIG_X2TLB -#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) -#else -#define pte_write(pte) ((pte).pte_low & _PAGE_RW) -#endif - -#define PTE_BIT_FUNC(h,fn,op) \ -static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } - -#ifdef CONFIG_X2TLB -/* - * We cheat a bit in the SH-X2 TLB case. As the permission bits are - * individually toggled (and user permissions are entirely decoupled from - * kernel permissions), we attempt to couple them a bit more sanely here. - */ -PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); -PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); -PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); -#else -PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW); -PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW); -PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE); -#endif - -PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); -PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); -PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); -PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); - -/* - * Macro and implementation to make a page protection as uncachable. - */ -#define pgprot_writecombine(prot) \ - __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) - -#define pgprot_noncached pgprot_writecombine - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - * - * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) - */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - pte.pte_low &= _PAGE_CHG_MASK; - pte.pte_low |= pgprot_val(newprot); - -#ifdef CONFIG_X2TLB - pte.pte_high |= pgprot_val(newprot) >> 32; -#endif - - return pte; -} - -#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd)) -#define pmd_page(pmd) (virt_to_page(pmd_val(pmd))) - -/* to find an entry in a page-table-directory. */ -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - -/* Find an entry in the third-level page table.. */ -#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) -#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) -#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) - -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) - -#ifdef CONFIG_X2TLB -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \ - &(e), (e).pte_high, (e).pte_low) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) -#else -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) -#endif - -struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); - -/* - * Encode and de-code a swap entry - * - * Constraints: - * _PAGE_FILE at bit 0 - * _PAGE_PRESENT at bit 8 - * _PAGE_PROTNONE at bit 9 - * - * For the normal case, we encode the swap type into bits 0:7 and the - * swap offset into bits 10:30. For the 64-bit PTE case, we keep the - * preserved bits in the low 32-bits and use the upper 32 as the swap - * offset (along with a 5-bit type), following the same approach as x86 - * PAE. This keeps the logic quite simple, and allows for a full 32 - * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with - * in the pte_low case. - * - * As is evident by the Alpha code, if we ever get a 64-bit unsigned - * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes - * much cleaner.. - * - * NOTE: We should set ZEROs at the position of _PAGE_PRESENT - * and _PAGE_PROTNONE bits - */ -#ifdef CONFIG_X2TLB -#define __swp_type(x) ((x).val & 0x1f) -#define __swp_offset(x) ((x).val >> 5) -#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5}) -#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) -#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) - -/* - * Encode and decode a nonlinear file mapping entry - */ -#define pte_to_pgoff(pte) ((pte).pte_high) -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) - -#define PTE_FILE_MAX_BITS 32 -#else -#define __swp_type(x) ((x).val & 0xff) -#define __swp_offset(x) ((x).val >> 10) -#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10}) - -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) - -/* - * Encode and decode a nonlinear file mapping entry - */ -#define PTE_FILE_MAX_BITS 29 -#define pte_to_pgoff(pte) (pte_val(pte) >> 1) -#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE }) -#endif - typedef pte_t *pte_addr_t; #define kern_addr_valid(addr) (1) @@ -566,27 +125,28 @@ typedef pte_t *pte_addr_t; #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -struct mm_struct; +#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) /* * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) -#ifndef CONFIG_MMU -extern unsigned int kobjsize(const void *objp); -#endif /* !CONFIG_MMU */ - #if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ defined(CONFIG_SH7705_CACHE_32KB)) +struct mm_struct; #define __HAVE_ARCH_PTEP_GET_AND_CLEAR -extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); #endif +struct vm_area_struct; +extern void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte); extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); +extern void page_table_range_init(unsigned long start, unsigned long end, + pgd_t *pgd); #include <asm-generic/pgtable.h> -#endif /* !__ASSEMBLY__ */ -#endif /* __ASM_SH_PAGE_H */ +#endif /* __ASM_SH_PGTABLE_H */ diff --git a/include/asm-sh/pgtable_32.h b/include/asm-sh/pgtable_32.h new file mode 100644 index 000000000000..3e3557c53c55 --- /dev/null +++ b/include/asm-sh/pgtable_32.h @@ -0,0 +1,474 @@ +#ifndef __ASM_SH_PGTABLE_32_H +#define __ASM_SH_PGTABLE_32_H + +/* + * Linux PTEL encoding. + * + * Hardware and software bit definitions for the PTEL value (see below for + * notes on SH-X2 MMUs and 64-bit PTEs): + * + * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). + * + * - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the + * hardware PTEL value can't have the SH-bit set when MMUCR.IX is set, + * which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT). + * + * In order to keep this relatively clean, do not use these for defining + * SH-3 specific flags until all of the other unused bits have been + * exhausted. + * + * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE. + * + * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages. + * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused. + * + * - On 29 bit platforms, bits 31 to 29 are used for the space attributes + * and timing control which (together with bit 0) are moved into the + * old-style PTEA on the parts that support it. + * + * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day. + * + * SH-X2 MMUs and extended PTEs + * + * SH-X2 supports an extended mode TLB with split data arrays due to the + * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and + * SZ bit placeholders still exist in data array 1, but are implemented as + * reserved bits, with the real logic existing in data array 2. + * + * The downside to this is that we can no longer fit everything in to a 32-bit + * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus + * side, this gives us quite a few spare bits to play with for future usage. + */ +/* Legacy and compat mode bits */ +#define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ +#define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ +#define _PAGE_DIRTY 0x004 /* D-bit : page changed */ +#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ +#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ +#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ +#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/ +#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ +#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ +#define _PAGE_PROTNONE 0x200 /* software: if not present */ +#define _PAGE_ACCESSED 0x400 /* software: page referenced */ +#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ + +#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1) +#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER) + +/* Extended mode bits */ +#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */ +#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */ +#define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */ +#define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */ + +#define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */ +#define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */ +#define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */ + +#define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */ +#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ +#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ + +/* Wrapper for extended mode pgprot twiddling */ +#define _PAGE_EXT(x) ((unsigned long long)(x) << 32) + +/* software: moves to PTEA.TC (Timing Control) */ +#define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ +#define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ + +/* software: moves to PTEA.SA[2:0] (Space Attributes) */ +#define _PAGE_PCC_IODYN 0x00000001 /* IO space, dynamically sized bus */ +#define _PAGE_PCC_IO8 0x20000000 /* IO space, 8 bit bus */ +#define _PAGE_PCC_IO16 0x20000001 /* IO space, 16 bit bus */ +#define _PAGE_PCC_COM8 0x40000000 /* Common Memory space, 8 bit bus */ +#define _PAGE_PCC_COM16 0x40000001 /* Common Memory space, 16 bit bus */ +#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ +#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ + +/* Mask which drops unused bits from the PTEL value */ +#if defined(CONFIG_CPU_SH3) +#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \ + _PAGE_FILE | _PAGE_SZ1 | \ + _PAGE_HW_SHARED) +#elif defined(CONFIG_X2TLB) +/* Get rid of the legacy PR/SZ bits when using extended mode */ +#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \ + _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK) +#else +#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) +#endif + +#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) + +/* Hardware flags, page size encoding */ +#if defined(CONFIG_X2TLB) +# if defined(CONFIG_PAGE_SIZE_4KB) +# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0) +# elif defined(CONFIG_PAGE_SIZE_8KB) +# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1) +# elif defined(CONFIG_PAGE_SIZE_64KB) +# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2) +# endif +#else +# if defined(CONFIG_PAGE_SIZE_4KB) +# define _PAGE_FLAGS_HARD _PAGE_SZ0 +# elif defined(CONFIG_PAGE_SIZE_64KB) +# define _PAGE_FLAGS_HARD _PAGE_SZ1 +# endif +#endif + +#if defined(CONFIG_X2TLB) +# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2) +# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) +# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2) +# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) +# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2) +# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) +# define _PAGE_SZHUGE (_PAGE_EXT_ESZ3) +# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) +# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) +# endif +#else +# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +# define _PAGE_SZHUGE (_PAGE_SZ1) +# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) +# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) +# endif +#endif + +/* + * Stub out _PAGE_SZHUGE if we don't have a good definition for it, + * to make pte_mkhuge() happy. + */ +#ifndef _PAGE_SZHUGE +# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) +#endif + +#define _PAGE_CHG_MASK \ + (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) + +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_X2TLB) /* SH-X2 TLB */ +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ + _PAGE_ACCESSED | _PAGE_FLAGS_HARD) + +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ + _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_READ | \ + _PAGE_EXT_KERN_WRITE | \ + _PAGE_EXT_USER_READ | \ + _PAGE_EXT_USER_WRITE)) + +#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ + _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \ + _PAGE_EXT_KERN_READ | \ + _PAGE_EXT_USER_EXEC | \ + _PAGE_EXT_USER_READ)) + +#define PAGE_COPY PAGE_EXECREAD + +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ + _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_READ | \ + _PAGE_EXT_USER_READ)) + +#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ + _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \ + _PAGE_EXT_USER_WRITE)) + +#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ + _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \ + _PAGE_EXT_KERN_READ | \ + _PAGE_EXT_KERN_EXEC | \ + _PAGE_EXT_USER_WRITE | \ + _PAGE_EXT_USER_READ | \ + _PAGE_EXT_USER_EXEC)) + +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ + _PAGE_DIRTY | _PAGE_ACCESSED | \ + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_READ | \ + _PAGE_EXT_KERN_WRITE | \ + _PAGE_EXT_KERN_EXEC)) + +#define PAGE_KERNEL_NOCACHE \ + __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_HW_SHARED | \ + _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_READ | \ + _PAGE_EXT_KERN_WRITE | \ + _PAGE_EXT_KERN_EXEC)) + +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ + _PAGE_DIRTY | _PAGE_ACCESSED | \ + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_READ | \ + _PAGE_EXT_KERN_EXEC)) + +#define PAGE_KERNEL_PCC(slot, type) \ + __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ + _PAGE_EXT(_PAGE_EXT_KERN_READ | \ + _PAGE_EXT_KERN_WRITE | \ + _PAGE_EXT_KERN_EXEC) \ + (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ + (type)) + +#elif defined(CONFIG_MMU) /* SH-X TLB */ +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ + _PAGE_ACCESSED | _PAGE_FLAGS_HARD) + +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_CACHABLE | _PAGE_ACCESSED | \ + _PAGE_FLAGS_HARD) + +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ + _PAGE_ACCESSED | _PAGE_FLAGS_HARD) + +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ + _PAGE_ACCESSED | _PAGE_FLAGS_HARD) + +#define PAGE_EXECREAD PAGE_READONLY +#define PAGE_RWX PAGE_SHARED +#define PAGE_WRITEONLY PAGE_SHARED + +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \ + _PAGE_DIRTY | _PAGE_ACCESSED | \ + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) + +#define PAGE_KERNEL_NOCACHE \ + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_HW_SHARED | \ + _PAGE_FLAGS_HARD) + +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ + _PAGE_DIRTY | _PAGE_ACCESSED | \ + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) + +#define PAGE_KERNEL_PCC(slot, type) \ + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ + (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ + (type)) +#else /* no mmu */ +#define PAGE_NONE __pgprot(0) +#define PAGE_SHARED __pgprot(0) +#define PAGE_COPY __pgprot(0) +#define PAGE_EXECREAD __pgprot(0) +#define PAGE_RWX __pgprot(0) +#define PAGE_READONLY __pgprot(0) +#define PAGE_WRITEONLY __pgprot(0) +#define PAGE_KERNEL __pgprot(0) +#define PAGE_KERNEL_NOCACHE __pgprot(0) +#define PAGE_KERNEL_RO __pgprot(0) + +#define PAGE_KERNEL_PCC(slot, type) \ + __pgprot(0) +#endif + +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +/* + * Certain architectures need to do special things when PTEs + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +#ifdef CONFIG_X2TLB +static inline void set_pte(pte_t *ptep, pte_t pte) +{ + ptep->pte_high = pte.pte_high; + smp_wmb(); + ptep->pte_low = pte.pte_low; +} +#else +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#endif + +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* + * (pmds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) + +#define pfn_pte(pfn, prot) \ + __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) \ + __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define pte_none(x) (!pte_val(x)) +#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) + +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) + +#define pmd_none(x) (!pmd_val(x)) +#define pmd_present(x) (pmd_val(x)) +#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK) + +#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT)) +#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY) +#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED) +#define pte_file(pte) ((pte).pte_low & _PAGE_FILE) + +#ifdef CONFIG_X2TLB +#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) +#else +#define pte_write(pte) ((pte).pte_low & _PAGE_RW) +#endif + +#define PTE_BIT_FUNC(h,fn,op) \ +static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } + +#ifdef CONFIG_X2TLB +/* + * We cheat a bit in the SH-X2 TLB case. As the permission bits are + * individually toggled (and user permissions are entirely decoupled from + * kernel permissions), we attempt to couple them a bit more sanely here. + */ +PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); +PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); +PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); +#else +PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW); +PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW); +PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE); +#endif + +PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); +PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); +PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); +PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); + +/* + * Macro and implementation to make a page protection as uncachable. + */ +#define pgprot_writecombine(prot) \ + __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) + +#define pgprot_noncached pgprot_writecombine + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte.pte_low &= _PAGE_CHG_MASK; + pte.pte_low |= pgprot_val(newprot); + +#ifdef CONFIG_X2TLB + pte.pte_high |= pgprot_val(newprot) >> 32; +#endif + + return pte; +} + +#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd)) +#define pmd_page(pmd) (virt_to_page(pmd_val(pmd))) + +/* to find an entry in a page-table-directory. */ +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* Find an entry in the third-level page table.. */ +#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) +#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) +#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) + +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + +#ifdef CONFIG_X2TLB +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \ + &(e), (e).pte_high, (e).pte_low) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) +#else +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) +#endif + +/* + * Encode and de-code a swap entry + * + * Constraints: + * _PAGE_FILE at bit 0 + * _PAGE_PRESENT at bit 8 + * _PAGE_PROTNONE at bit 9 + * + * For the normal case, we encode the swap type into bits 0:7 and the + * swap offset into bits 10:30. For the 64-bit PTE case, we keep the + * preserved bits in the low 32-bits and use the upper 32 as the swap + * offset (along with a 5-bit type), following the same approach as x86 + * PAE. This keeps the logic quite simple, and allows for a full 32 + * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with + * in the pte_low case. + * + * As is evident by the Alpha code, if we ever get a 64-bit unsigned + * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes + * much cleaner.. + * + * NOTE: We should set ZEROs at the position of _PAGE_PRESENT + * and _PAGE_PROTNONE bits + */ +#ifdef CONFIG_X2TLB +#define __swp_type(x) ((x).val & 0x1f) +#define __swp_offset(x) ((x).val >> 5) +#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5}) +#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) +#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) + +/* + * Encode and decode a nonlinear file mapping entry + */ +#define pte_to_pgoff(pte) ((pte).pte_high) +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) + +#define PTE_FILE_MAX_BITS 32 +#else +#define __swp_type(x) ((x).val & 0xff) +#define __swp_offset(x) ((x).val >> 10) +#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10}) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) + +/* + * Encode and decode a nonlinear file mapping entry + */ +#define PTE_FILE_MAX_BITS 29 +#define pte_to_pgoff(pte) (pte_val(pte) >> 1) +#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE }) +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_SH_PGTABLE_32_H */ diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh/pgtable_64.h index 3488fe32e436..972211671c9a 100644 --- a/include/asm-sh64/pgtable.h +++ b/include/asm-sh/pgtable_64.h @@ -1,136 +1,40 @@ -#ifndef __ASM_SH64_PGTABLE_H -#define __ASM_SH64_PGTABLE_H - -#include <asm-generic/4level-fixup.h> +#ifndef __ASM_SH_PGTABLE_64_H +#define __ASM_SH_PGTABLE_64_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * include/asm-sh/pgtable_64.h * - * include/asm-sh64/pgtable.h + * This file contains the functions and defines necessary to modify and use + * the SuperH page table tree. * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003, 2004 Paul Mundt * Copyright (C) 2003, 2004 Richard Curnow * - * This file contains the functions and defines necessary to modify and use - * the SuperH page table tree. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ - -#ifndef __ASSEMBLY__ +#include <linux/threads.h> #include <asm/processor.h> #include <asm/page.h> -#include <linux/threads.h> - -struct vm_area_struct; - -extern void paging_init(void); - -/* We provide our own get_unmapped_area to avoid cache synonym issue */ -#define HAVE_ARCH_UNMAPPED_AREA - -/* - * Basically we have the same two-level (which is the logical three level - * Linux page table layout folded) page tables as the i386. - */ - -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern unsigned char empty_zero_page[PAGE_SIZE]; -#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) - -#endif /* !__ASSEMBLY__ */ - -/* - * NEFF and NPHYS related defines. - * FIXME : These need to be model-dependent. For now this is OK, SH5-101 and SH5-103 - * implement 32 bits effective and 32 bits physical. But future implementations may - * extend beyond this. - */ -#define NEFF 32 -#define NEFF_SIGN (1LL << (NEFF - 1)) -#define NEFF_MASK (-1LL << NEFF) - -#define NPHYS 32 -#define NPHYS_SIGN (1LL << (NPHYS - 1)) -#define NPHYS_MASK (-1LL << NPHYS) - -/* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond - that 3-level would be appropriate. */ -#if defined(CONFIG_SH64_PGTABLE_2_LEVEL) -/* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */ -#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) -#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) - -/* top level: PMD. */ -#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) -#define PGD_BITS (NEFF - PGDIR_SHIFT) -#define PTRS_PER_PGD (1<<PGD_BITS) - -/* middle level: PMD. This doesn't do anything for the 2-level case. */ -#define PTRS_PER_PMD (1) - -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define PMD_SHIFT PGDIR_SHIFT -#define PMD_SIZE PGDIR_SIZE -#define PMD_MASK PGDIR_MASK - -#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) -/* - * three-level asymmetric paging structure: PGD is top level. - * The asymmetry comes from 32-bit pointers and 64-bit PTEs. - */ -/* bottom level: PTE. It's 9 bits = 512 pointers */ -#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) -#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) - -/* middle level: PMD. It's 10 bits = 1024 pointers */ -#define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *)) -#define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */ -#define PMD_SHIFT (PTE_SHIFT + PTE_BITS) -#define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE) - -/* top level: PMD. It's 1 bit = 2 pointers */ -#define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS) -#define PGD_BITS (NEFF - PGDIR_SHIFT) -#define PTRS_PER_PGD (1<<PGD_BITS) - -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -#else -#error "No defined number of page table levels" -#endif /* * Error outputs. */ #define pte_ERROR(e) \ printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) /* * Table setting routines. Used within arch/mm only. */ -#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) { - unsigned long long x = ((unsigned long long) pteval.pte); + unsigned long long x = ((unsigned long long) pteval.pte_low); unsigned long long *xp = (unsigned long long *) pteptr; /* * Sign-extend based on NPHYS. @@ -157,61 +61,6 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) #define pgd_offset_k(address) pgd_offset(&init_mm, address) /* - * PGD level access routines. - * - * Note1: - * There's no need to use physical addresses since the tree walk is all - * in performed in software, until the PTE translation. - * - * Note 2: - * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad, - * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain - * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must - * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and - * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a - * bad pgd that must be notified via printk(). - * - */ -#define _PGD_EMPTY 0x0 - -#if defined(CONFIG_SH64_PGTABLE_2_LEVEL) -static inline int pgd_none(pgd_t pgd) { return 0; } -static inline int pgd_bad(pgd_t pgd) { return 0; } -#define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0) -#define pgd_clear(xx) do { } while(0) - -#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) -#define pgd_present(pgd_entry) (1) -#define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY) -/* TODO: Think later about what a useful definition of 'bad' would be now. */ -#define pgd_bad(pgd_entry) (0) -#define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY))) - -#endif - - -#define pgd_page_vaddr(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK)) -#define pgd_page(pgd) (virt_to_page(pgd_val(pgd))) - - -/* - * PMD defines. Middle level. - */ - -/* PGD to PMD dereferencing */ -#if defined(CONFIG_SH64_PGTABLE_2_LEVEL) -static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) -{ - return (pmd_t *) dir; -} -#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) -#define __pmd_offset(address) \ - (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) -#define pmd_offset(dir, addr) \ - ((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr))) -#endif - -/* * PMD level access routines. Same notes as above. */ #define _PMD_EMPTY 0x0 @@ -239,15 +88,7 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) -/* Round it up ! */ -#define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE) -#define FIRST_USER_ADDRESS 0 - #ifndef __ASSEMBLY__ -#define VMALLOC_END 0xff000000 -#define VMALLOC_START 0xf0000000 -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) - #define IOBASE_VADDR 0xff000000 #define IOBASE_END 0xffffffff @@ -315,43 +156,28 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +/* + * We have full permissions (Read/Write/Execute/Shared). + */ +#define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_CACHABLE | _PAGE_ACCESSED) + #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ - _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \ +#define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \ _PAGE_SHARED) -/* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default - * protection mode for the stack. */ -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE) -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_USER) -#define PAGE_KERNEL __pgprot(_KERNPG_TABLE) - +#define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE) /* - * In ST50 we have full permissions (Read/Write/Execute/Shared). - * Just match'em all. These are for mmap(), therefore all at least - * User/Cachable/Present/Accessed. No point in making Fault on Write. + * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default + * protection mode for the stack. */ -#define __MMAP_COMMON (_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED) - /* sxwr */ -#define __P000 __pgprot(__MMAP_COMMON) -#define __P001 __pgprot(__MMAP_COMMON | _PAGE_READ) -#define __P010 __pgprot(__MMAP_COMMON) -#define __P011 __pgprot(__MMAP_COMMON | _PAGE_READ) -#define __P100 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) -#define __P101 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) -#define __P110 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) -#define __P111 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) - -#define __S000 __pgprot(__MMAP_COMMON | _PAGE_SHARED) -#define __S001 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ) -#define __S010 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE) -#define __S011 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE) -#define __S100 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE) -#define __S101 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ) -#define __S110 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE) -#define __S111 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE) +#define PAGE_COPY PAGE_EXECREAD + +#define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ) +#define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE) +#define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \ + _PAGE_WRITE | _PAGE_EXECUTE) +#define PAGE_KERNEL __pgprot(_KERNPG_TABLE) /* Make it a device mapping for maximum safety (e.g. for mapping device registers into user-space via /dev/map). */ @@ -453,12 +279,6 @@ static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } -typedef pte_t *pte_addr_t; -#define pgtable_cache_init() do { } while (0) - -extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); - /* Encode and decode a swap entry */ #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) #define __swp_offset(x) ((x).val >> 8) @@ -471,26 +291,9 @@ extern void update_mmu_cache(struct vm_area_struct * vma, #define pte_to_pgoff(pte) (pte_val(pte)) #define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE }) -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -#define PageSkip(page) (0) -#define kern_addr_valid(addr) (1) - -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - #endif /* !__ASSEMBLY__ */ -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) - -#define pte_pfn(x) (((unsigned long)((x).pte)) >> PAGE_SHIFT) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; - -#include <asm-generic/pgtable.h> - -#endif /* __ASM_SH64_PGTABLE_H */ +#endif /* __ASM_SH_PGTABLE_64_H */ diff --git a/include/asm-sh/posix_types.h b/include/asm-sh/posix_types.h index 0a3d2f54ab27..4b9d11c9fc77 100644 --- a/include/asm-sh/posix_types.h +++ b/include/asm-sh/posix_types.h @@ -1,122 +1,7 @@ -#ifndef __ASM_SH_POSIX_TYPES_H -#define __ASM_SH_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc. Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned long __kernel_ino_t; -typedef unsigned short __kernel_mode_t; -typedef unsigned short __kernel_nlink_t; -typedef long __kernel_off_t; -typedef int __kernel_pid_t; -typedef unsigned short __kernel_ipc_pid_t; -typedef unsigned short __kernel_uid_t; -typedef unsigned short __kernel_gid_t; -typedef unsigned int __kernel_size_t; -typedef int __kernel_ssize_t; -typedef int __kernel_ptrdiff_t; -typedef long __kernel_time_t; -typedef long __kernel_suseconds_t; -typedef long __kernel_clock_t; -typedef int __kernel_timer_t; -typedef int __kernel_clockid_t; -typedef int __kernel_daddr_t; -typedef char * __kernel_caddr_t; -typedef unsigned short __kernel_uid16_t; -typedef unsigned short __kernel_gid16_t; -typedef unsigned int __kernel_uid32_t; -typedef unsigned int __kernel_gid32_t; - -typedef unsigned short __kernel_old_uid_t; -typedef unsigned short __kernel_old_gid_t; -typedef unsigned short __kernel_old_dev_t; - -#ifdef __GNUC__ -typedef long long __kernel_loff_t; -#endif - -typedef struct { -#if defined(__KERNEL__) || defined(__USE_ALL) - int val[2]; -#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ - int __val[2]; -#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ -} __kernel_fsid_t; - -#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) - -#undef __FD_SET -static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) -{ - unsigned long __tmp = __fd / __NFDBITS; - unsigned long __rem = __fd % __NFDBITS; - __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); -} - -#undef __FD_CLR -static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) -{ - unsigned long __tmp = __fd / __NFDBITS; - unsigned long __rem = __fd % __NFDBITS; - __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); -} - - -#undef __FD_ISSET -static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) -{ - unsigned long __tmp = __fd / __NFDBITS; - unsigned long __rem = __fd % __NFDBITS; - return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; -} - -/* - * This will unroll the loop for the normal constant case (8 ints, - * for a 256-bit fd_set) - */ -#undef __FD_ZERO -static __inline__ void __FD_ZERO(__kernel_fd_set *__p) -{ - unsigned long *__tmp = __p->fds_bits; - int __i; - - if (__builtin_constant_p(__FDSET_LONGS)) { - switch (__FDSET_LONGS) { - case 16: - __tmp[ 0] = 0; __tmp[ 1] = 0; - __tmp[ 2] = 0; __tmp[ 3] = 0; - __tmp[ 4] = 0; __tmp[ 5] = 0; - __tmp[ 6] = 0; __tmp[ 7] = 0; - __tmp[ 8] = 0; __tmp[ 9] = 0; - __tmp[10] = 0; __tmp[11] = 0; - __tmp[12] = 0; __tmp[13] = 0; - __tmp[14] = 0; __tmp[15] = 0; - return; - - case 8: - __tmp[ 0] = 0; __tmp[ 1] = 0; - __tmp[ 2] = 0; __tmp[ 3] = 0; - __tmp[ 4] = 0; __tmp[ 5] = 0; - __tmp[ 6] = 0; __tmp[ 7] = 0; - return; - - case 4: - __tmp[ 0] = 0; __tmp[ 1] = 0; - __tmp[ 2] = 0; __tmp[ 3] = 0; - return; - } - } - __i = __FDSET_LONGS; - while (__i) { - __i--; - *__tmp = 0; - __tmp++; - } -} - -#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ - -#endif /* __ASM_SH_POSIX_TYPES_H */ +#ifdef __KERNEL__ +# ifdef CONFIG_SUPERH32 +# include "posix_types_32.h" +# else +# include "posix_types_64.h" +# endif +#endif /* __KERNEL__ */ diff --git a/include/asm-sh/posix_types_32.h b/include/asm-sh/posix_types_32.h new file mode 100644 index 000000000000..0a3d2f54ab27 --- /dev/null +++ b/include/asm-sh/posix_types_32.h @@ -0,0 +1,122 @@ +#ifndef __ASM_SH_POSIX_TYPES_H +#define __ASM_SH_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned short __kernel_mode_t; +typedef unsigned short __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef unsigned short __kernel_ipc_pid_t; +typedef unsigned short __kernel_uid_t; +typedef unsigned short __kernel_gid_t; +typedef unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +typedef unsigned short __kernel_old_dev_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { +#if defined(__KERNEL__) || defined(__USE_ALL) + int val[2]; +#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ + int __val[2]; +#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ +} __kernel_fsid_t; + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#undef __FD_SET +static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); +} + +#undef __FD_CLR +static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); +} + + +#undef __FD_ISSET +static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; +} + +/* + * This will unroll the loop for the normal constant case (8 ints, + * for a 256-bit fd_set) + */ +#undef __FD_ZERO +static __inline__ void __FD_ZERO(__kernel_fd_set *__p) +{ + unsigned long *__tmp = __p->fds_bits; + int __i; + + if (__builtin_constant_p(__FDSET_LONGS)) { + switch (__FDSET_LONGS) { + case 16: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + __tmp[ 8] = 0; __tmp[ 9] = 0; + __tmp[10] = 0; __tmp[11] = 0; + __tmp[12] = 0; __tmp[13] = 0; + __tmp[14] = 0; __tmp[15] = 0; + return; + + case 8: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + return; + + case 4: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + return; + } + } + __i = __FDSET_LONGS; + while (__i) { + __i--; + *__tmp = 0; + __tmp++; + } +} + +#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ + +#endif /* __ASM_SH_POSIX_TYPES_H */ diff --git a/include/asm-sh64/posix_types.h b/include/asm-sh/posix_types_64.h index 0620317a6f0f..0620317a6f0f 100644 --- a/include/asm-sh64/posix_types.h +++ b/include/asm-sh/posix_types_64.h diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h index fda68480f377..c9b14161f73d 100644 --- a/include/asm-sh/processor.h +++ b/include/asm-sh/processor.h @@ -1,32 +1,10 @@ -/* - * include/asm-sh/processor.h - * - * Copyright (C) 1999, 2000 Niibe Yutaka - * Copyright (C) 2002, 2003 Paul Mundt - */ - #ifndef __ASM_SH_PROCESSOR_H #define __ASM_SH_PROCESSOR_H -#ifdef __KERNEL__ -#include <linux/compiler.h> -#include <asm/page.h> -#include <asm/types.h> -#include <asm/cache.h> -#include <asm/ptrace.h> #include <asm/cpu-features.h> +#include <asm/fpu.h> -/* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). - */ -#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; }) - -/* Core Processor Version Register */ -#define CCN_PVR 0xff000030 -#define CCN_CVR 0xff000040 -#define CCN_PRR 0xff000044 - +#ifndef __ASSEMBLY__ /* * CPU type and hardware bug flags. Kept separately for each CPU. * @@ -39,247 +17,49 @@ enum cpu_type { CPU_SH7619, /* SH-2A types */ - CPU_SH7206, + CPU_SH7203, CPU_SH7206, CPU_SH7263, /* SH-3 types */ CPU_SH7705, CPU_SH7706, CPU_SH7707, CPU_SH7708, CPU_SH7708S, CPU_SH7708R, CPU_SH7709, CPU_SH7709A, CPU_SH7710, CPU_SH7712, - CPU_SH7720, CPU_SH7729, + CPU_SH7720, CPU_SH7721, CPU_SH7729, /* SH-4 types */ CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R, CPU_SH7760, CPU_SH4_202, CPU_SH4_501, /* SH-4A types */ - CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SHX3, + CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SHX3, /* SH4AL-DSP types */ CPU_SH7343, CPU_SH7722, + /* SH-5 types */ + CPU_SH5_101, CPU_SH5_103, + /* Unknown subtype */ CPU_SH_NONE }; -struct sh_cpuinfo { - unsigned int type; - unsigned long loops_per_jiffy; - unsigned long asid_cache; - - struct cache_info icache; /* Primary I-cache */ - struct cache_info dcache; /* Primary D-cache */ - struct cache_info scache; /* Secondary cache */ - - unsigned long flags; -} __attribute__ ((aligned(L1_CACHE_BYTES))); - -extern struct sh_cpuinfo cpu_data[]; -#define boot_cpu_data cpu_data[0] -#define current_cpu_data cpu_data[smp_processor_id()] -#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] - -/* - * User space process size: 2GB. - * - * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff - */ -#define TASK_SIZE 0x7c000000UL - -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) - -/* - * Bit of SR register - * - * FD-bit: - * When it's set, it means the processor doesn't have right to use FPU, - * and it results exception when the floating operation is executed. - * - * IMASK-bit: - * Interrupt level mask - */ -#define SR_FD 0x00008000 -#define SR_DSP 0x00001000 -#define SR_IMASK 0x000000f0 - -/* - * FPU structure and data - */ - -struct sh_fpu_hard_struct { - unsigned long fp_regs[16]; - unsigned long xfp_regs[16]; - unsigned long fpscr; - unsigned long fpul; - - long status; /* software status information */ -}; - -/* Dummy fpu emulator */ -struct sh_fpu_soft_struct { - unsigned long fp_regs[16]; - unsigned long xfp_regs[16]; - unsigned long fpscr; - unsigned long fpul; - - unsigned char lookahead; - unsigned long entry_pc; -}; - -union sh_fpu_union { - struct sh_fpu_hard_struct hard; - struct sh_fpu_soft_struct soft; -}; - -struct thread_struct { - /* Saved registers when thread is descheduled */ - unsigned long sp; - unsigned long pc; - - /* Hardware debugging registers */ - unsigned long ubc_pc; - - /* floating point info */ - union sh_fpu_union fpu; -}; - -typedef struct { - unsigned long seg; -} mm_segment_t; - -/* Count of active tasks with UBC settings */ -extern int ubc_usercnt; +/* Forward decl */ +struct sh_cpuinfo; -#define INIT_THREAD { \ - .sp = sizeof(init_stack) + (long) &init_stack, \ -} - -/* - * Do necessary setup to start up a newly executed thread. - */ -#define start_thread(regs, new_pc, new_sp) \ - set_fs(USER_DS); \ - regs->pr = 0; \ - regs->sr = SR_FD; /* User mode. */ \ - regs->pc = new_pc; \ - regs->regs[15] = new_sp - -/* Forward declaration, a strange C thing */ -struct task_struct; -struct mm_struct; - -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) - -/* - * create a kernel thread without removing it from tasklists - */ -extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); - -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while(0) -#define release_segments(mm) do { } while(0) - -/* - * FPU lazy state save handling. - */ - -static __inline__ void disable_fpu(void) -{ - unsigned long __dummy; - - /* Set FD flag in SR */ - __asm__ __volatile__("stc sr, %0\n\t" - "or %1, %0\n\t" - "ldc %0, sr" - : "=&r" (__dummy) - : "r" (SR_FD)); -} - -static __inline__ void enable_fpu(void) -{ - unsigned long __dummy; - - /* Clear out FD flag in SR */ - __asm__ __volatile__("stc sr, %0\n\t" - "and %1, %0\n\t" - "ldc %0, sr" - : "=&r" (__dummy) - : "r" (~SR_FD)); -} - -static __inline__ void release_fpu(struct pt_regs *regs) -{ - regs->sr |= SR_FD; -} - -static __inline__ void grab_fpu(struct pt_regs *regs) -{ - regs->sr &= ~SR_FD; -} - -extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); - -#define unlazy_fpu(tsk, regs) do { \ - if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ - save_fpu(tsk, regs); \ - } \ -} while (0) - -#define clear_fpu(tsk, regs) do { \ - if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ - clear_tsk_thread_flag(tsk, TIF_USEDFPU); \ - release_fpu(regs); \ - } \ -} while (0) - -/* Double presision, NANS as NANS, rounding to nearest, no exceptions */ -#define FPSCR_INIT 0x00080000 - -#define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */ -#define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */ - -/* - * Return saved PC of a blocked thread. - */ -#define thread_saved_pc(tsk) (tsk->thread.pc) - -void show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs); -extern unsigned long get_wchan(struct task_struct *p); - -#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) -#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) - -#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") -#define cpu_relax() barrier() - -#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ - defined(CONFIG_CPU_SH4) -#define PREFETCH_STRIDE L1_CACHE_BYTES -#define ARCH_HAS_PREFETCH -#define ARCH_HAS_PREFETCHW -static inline void prefetch(void *x) -{ - __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory"); -} - -#define prefetchw(x) prefetch(x) -#endif +/* arch/sh/kernel/setup.c */ +const char *get_cpu_subtype(struct sh_cpuinfo *c); #ifdef CONFIG_VSYSCALL -extern int vsyscall_init(void); +int vsyscall_init(void); #else #define vsyscall_init() do { } while (0) #endif -/* arch/sh/kernel/setup.c */ -const char *get_cpu_subtype(struct sh_cpuinfo *c); +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_SUPERH32 +# include "processor_32.h" +#else +# include "processor_64.h" +#endif -#endif /* __KERNEL__ */ #endif /* __ASM_SH_PROCESSOR_H */ diff --git a/include/asm-sh/processor_32.h b/include/asm-sh/processor_32.h new file mode 100644 index 000000000000..a7edaa1a870c --- /dev/null +++ b/include/asm-sh/processor_32.h @@ -0,0 +1,215 @@ +/* + * include/asm-sh/processor.h + * + * Copyright (C) 1999, 2000 Niibe Yutaka + * Copyright (C) 2002, 2003 Paul Mundt + */ + +#ifndef __ASM_SH_PROCESSOR_32_H +#define __ASM_SH_PROCESSOR_32_H +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <asm/page.h> +#include <asm/types.h> +#include <asm/cache.h> +#include <asm/ptrace.h> + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; }) + +/* Core Processor Version Register */ +#define CCN_PVR 0xff000030 +#define CCN_CVR 0xff000040 +#define CCN_PRR 0xff000044 + +struct sh_cpuinfo { + unsigned int type; + unsigned long loops_per_jiffy; + unsigned long asid_cache; + + struct cache_info icache; /* Primary I-cache */ + struct cache_info dcache; /* Primary D-cache */ + struct cache_info scache; /* Secondary cache */ + + unsigned long flags; +} __attribute__ ((aligned(L1_CACHE_BYTES))); + +extern struct sh_cpuinfo cpu_data[]; +#define boot_cpu_data cpu_data[0] +#define current_cpu_data cpu_data[smp_processor_id()] +#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] + +/* + * User space process size: 2GB. + * + * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff + */ +#define TASK_SIZE 0x7c000000UL + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) + +/* + * Bit of SR register + * + * FD-bit: + * When it's set, it means the processor doesn't have right to use FPU, + * and it results exception when the floating operation is executed. + * + * IMASK-bit: + * Interrupt level mask + */ +#define SR_DSP 0x00001000 +#define SR_IMASK 0x000000f0 + +/* + * FPU structure and data + */ + +struct sh_fpu_hard_struct { + unsigned long fp_regs[16]; + unsigned long xfp_regs[16]; + unsigned long fpscr; + unsigned long fpul; + + long status; /* software status information */ +}; + +/* Dummy fpu emulator */ +struct sh_fpu_soft_struct { + unsigned long fp_regs[16]; + unsigned long xfp_regs[16]; + unsigned long fpscr; + unsigned long fpul; + + unsigned char lookahead; + unsigned long entry_pc; +}; + +union sh_fpu_union { + struct sh_fpu_hard_struct hard; + struct sh_fpu_soft_struct soft; +}; + +struct thread_struct { + /* Saved registers when thread is descheduled */ + unsigned long sp; + unsigned long pc; + + /* Hardware debugging registers */ + unsigned long ubc_pc; + + /* floating point info */ + union sh_fpu_union fpu; +}; + +typedef struct { + unsigned long seg; +} mm_segment_t; + +/* Count of active tasks with UBC settings */ +extern int ubc_usercnt; + +#define INIT_THREAD { \ + .sp = sizeof(init_stack) + (long) &init_stack, \ +} + +/* + * Do necessary setup to start up a newly executed thread. + */ +#define start_thread(regs, new_pc, new_sp) \ + set_fs(USER_DS); \ + regs->pr = 0; \ + regs->sr = SR_FD; /* User mode. */ \ + regs->pc = new_pc; \ + regs->regs[15] = new_sp + +/* Forward declaration, a strange C thing */ +struct task_struct; +struct mm_struct; + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *); + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +/* + * create a kernel thread without removing it from tasklists + */ +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +/* Copy and release all segment info associated with a VM */ +#define copy_segments(p, mm) do { } while(0) +#define release_segments(mm) do { } while(0) + +/* + * FPU lazy state save handling. + */ + +static __inline__ void disable_fpu(void) +{ + unsigned long __dummy; + + /* Set FD flag in SR */ + __asm__ __volatile__("stc sr, %0\n\t" + "or %1, %0\n\t" + "ldc %0, sr" + : "=&r" (__dummy) + : "r" (SR_FD)); +} + +static __inline__ void enable_fpu(void) +{ + unsigned long __dummy; + + /* Clear out FD flag in SR */ + __asm__ __volatile__("stc sr, %0\n\t" + "and %1, %0\n\t" + "ldc %0, sr" + : "=&r" (__dummy) + : "r" (~SR_FD)); +} + +/* Double presision, NANS as NANS, rounding to nearest, no exceptions */ +#define FPSCR_INIT 0x00080000 + +#define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */ +#define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */ + +/* + * Return saved PC of a blocked thread. + */ +#define thread_saved_pc(tsk) (tsk->thread.pc) + +void show_trace(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs); +extern unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) + +#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") +#define cpu_relax() barrier() + +#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ + defined(CONFIG_CPU_SH4) +#define PREFETCH_STRIDE L1_CACHE_BYTES +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +static inline void prefetch(void *x) +{ + __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory"); +} + +#define prefetchw(x) prefetch(x) +#endif + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_PROCESSOR_32_H */ diff --git a/include/asm-sh64/processor.h b/include/asm-sh/processor_64.h index eb2bee4b47b9..99c22b14a85b 100644 --- a/include/asm-sh64/processor.h +++ b/include/asm-sh/processor_64.h @@ -1,28 +1,25 @@ -#ifndef __ASM_SH64_PROCESSOR_H -#define __ASM_SH64_PROCESSOR_H +#ifndef __ASM_SH_PROCESSOR_64_H +#define __ASM_SH_PROCESSOR_64_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/processor.h + * include/asm-sh/processor_64.h * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 Paul Mundt * Copyright (C) 2004 Richard Curnow * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ - -#include <asm/page.h> - #ifndef __ASSEMBLY__ +#include <linux/compiler.h> +#include <asm/page.h> #include <asm/types.h> #include <asm/cache.h> -#include <asm/registers.h> -#include <linux/threads.h> -#include <linux/compiler.h> +#include <asm/ptrace.h> +#include <asm/cpu/registers.h> /* * Default implementation of macro that returns current @@ -40,15 +37,6 @@ __asm__("gettr tr0, %1\n\t" \ pc; }) /* - * CPU type and hardware bug flags. Kept separately for each CPU. - */ -enum cpu_type { - CPU_SH5_101, - CPU_SH5_103, - CPU_SH_NONE -}; - -/* * TLB information structure * * Defined for both I and D tlb, per-processor. @@ -67,28 +55,26 @@ struct tlb_info { struct sh_cpuinfo { enum cpu_type type; unsigned long loops_per_jiffy; + unsigned long asid_cache; - char hard_math; - - unsigned long *pgd_quick; - unsigned long *pmd_quick; - unsigned long *pte_quick; - unsigned long pgtable_cache_sz; unsigned int cpu_clock, master_clock, bus_clock, module_clock; /* Cache info */ struct cache_info icache; struct cache_info dcache; + struct cache_info scache; /* TLB info */ struct tlb_info itlb; struct tlb_info dtlb; -}; -extern struct sh_cpuinfo boot_cpu_data; + unsigned long flags; +}; -#define cpu_data (&boot_cpu_data) -#define current_cpu_data boot_cpu_data +extern struct sh_cpuinfo cpu_data[]; +#define boot_cpu_data cpu_data[0] +#define current_cpu_data cpu_data[smp_processor_id()] +#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] #endif @@ -116,8 +102,6 @@ extern struct sh_cpuinfo boot_cpu_data; * Single step bit * */ -#define SR_FD 0x00008000 - #if defined(CONFIG_SH64_SR_WATCH) #define SR_MMU 0x84000000 #else @@ -178,6 +162,10 @@ struct thread_struct { union sh_fpu_union fpu; }; +typedef struct { + unsigned long seg; +} mm_segment_t; + #define INIT_MMAP \ { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } @@ -200,12 +188,12 @@ extern struct pt_regs fake_swapper_regs; */ #define SR_USER (SR_MMU | SR_FD) -#define start_thread(regs, new_pc, new_sp) \ - set_fs(USER_DS); \ - regs->sr = SR_USER; /* User mode. */ \ +#define start_thread(regs, new_pc, new_sp) \ + set_fs(USER_DS); \ + regs->sr = SR_USER; /* User mode. */ \ regs->pc = new_pc - 4; /* Compensate syscall exit */ \ regs->pc |= 1; /* Set SHmedia ! */ \ - regs->regs[18] = 0; \ + regs->regs[18] = 0; \ regs->regs[15] = new_sp /* Forward declaration, a strange C thing */ @@ -229,7 +217,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); * FPU lazy state save handling. */ -static inline void release_fpu(void) +static inline void disable_fpu(void) { unsigned long long __dummy; @@ -241,7 +229,7 @@ static inline void release_fpu(void) : "r" (SR_FD)); } -static inline void grab_fpu(void) +static inline void enable_fpu(void) { unsigned long long __dummy; @@ -262,11 +250,12 @@ static inline void grab_fpu(void) #define FPSCR_INIT 0x00000000 #endif -/* Save the current FP regs */ -void fpsave(struct sh_fpu_hard_struct *fpregs); - +#ifdef CONFIG_SH_FPU /* Initialise the FP state of a task */ void fpinit(struct sh_fpu_hard_struct *fpregs); +#else +#define fpinit(fpregs) do { } while (0) +#endif extern struct task_struct *last_task_used_math; @@ -283,5 +272,4 @@ extern unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() #endif /* __ASSEMBLY__ */ -#endif /* __ASM_SH64_PROCESSOR_H */ - +#endif /* __ASM_SH_PROCESSOR_64_H */ diff --git a/include/asm-sh/ptrace.h b/include/asm-sh/ptrace.h index b9789c8b4d15..8d6c92b3e770 100644 --- a/include/asm-sh/ptrace.h +++ b/include/asm-sh/ptrace.h @@ -5,7 +5,16 @@ * Copyright (C) 1999, 2000 Niibe Yutaka * */ - +#if defined(__SH5__) || defined(CONFIG_SUPERH64) +struct pt_regs { + unsigned long long pc; + unsigned long long sr; + unsigned long long syscall_nr; + unsigned long long regs[63]; + unsigned long long tregs[8]; + unsigned long long pad[2]; +}; +#else /* * GCC defines register number like this: * ----------------------------- @@ -28,7 +37,7 @@ #define REG_PR 17 #define REG_SR 18 -#define REG_GBR 19 +#define REG_GBR 19 #define REG_MACH 20 #define REG_MACL 21 @@ -80,10 +89,14 @@ struct pt_dspregs { #define PTRACE_GETDSPREGS 55 #define PTRACE_SETDSPREGS 56 +#endif #ifdef __KERNEL__ -#define user_mode(regs) (((regs)->sr & 0x40000000)==0) -#define instruction_pointer(regs) ((regs)->pc) +#include <asm/addrspace.h> + +#define user_mode(regs) (((regs)->sr & 0x40000000)==0) +#define instruction_pointer(regs) ((unsigned long)(regs)->pc) + extern void show_regs(struct pt_regs *); #ifdef CONFIG_SH_DSP @@ -100,10 +113,13 @@ static inline unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); - if (pc >= 0xa0000000UL && pc < 0xc0000000UL) +#ifdef P2SEG + if (pc >= P2SEG && pc < P3SEG) pc -= 0x20000000; +#endif + return pc; } -#endif +#endif /* __KERNEL__ */ #endif /* __ASM_SH_PTRACE_H */ diff --git a/include/asm-sh/r7780rp.h b/include/asm-sh/r7780rp.h index de37f933aa42..bdecea0840a0 100644 --- a/include/asm-sh/r7780rp.h +++ b/include/asm-sh/r7780rp.h @@ -121,21 +121,6 @@ #define IRLCNTR1 (PA_BCR + 0) /* Interrupt Control Register1 */ -#define IRQ_PCISLOT1 0 /* PCI Slot #1 IRQ */ -#define IRQ_PCISLOT2 1 /* PCI Slot #2 IRQ */ -#define IRQ_PCISLOT3 2 /* PCI Slot #3 IRQ */ -#define IRQ_PCISLOT4 3 /* PCI Slot #4 IRQ */ -#define IRQ_CFINST 5 /* CF Card Insert IRQ */ -#define IRQ_M66596 6 /* M66596 IRQ */ -#define IRQ_SDCARD 7 /* SD Card IRQ */ -#define IRQ_TUCHPANEL 8 /* Touch Panel IRQ */ -#define IRQ_SCI 9 /* SCI IRQ */ -#define IRQ_2SERIAL 10 /* Serial IRQ */ -#define IRQ_EXTENTION 11 /* EXTn IRQ */ -#define IRQ_ONETH 12 /* On board Ethernet IRQ */ -#define IRQ_PSW 13 /* Push Switch IRQ */ -#define IRQ_ZIGBEE 14 /* Ziggbee IO IRQ */ - #define IVDR_CK_ON 8 /* iVDR Clock ON */ #elif defined(CONFIG_SH_R7785RP) @@ -192,13 +177,19 @@ #define IRQ_AX88796 (HL_FPGA_IRQ_BASE + 0) #define IRQ_CF (HL_FPGA_IRQ_BASE + 1) -#ifndef IRQ_PSW #define IRQ_PSW (HL_FPGA_IRQ_BASE + 2) -#endif -#define IRQ_EXT1 (HL_FPGA_IRQ_BASE + 3) -#define IRQ_EXT4 (HL_FPGA_IRQ_BASE + 4) - -void make_r7780rp_irq(unsigned int irq); +#define IRQ_EXT0 (HL_FPGA_IRQ_BASE + 3) +#define IRQ_EXT1 (HL_FPGA_IRQ_BASE + 4) +#define IRQ_EXT2 (HL_FPGA_IRQ_BASE + 5) +#define IRQ_EXT3 (HL_FPGA_IRQ_BASE + 6) +#define IRQ_EXT4 (HL_FPGA_IRQ_BASE + 7) +#define IRQ_EXT5 (HL_FPGA_IRQ_BASE + 8) +#define IRQ_EXT6 (HL_FPGA_IRQ_BASE + 9) +#define IRQ_EXT7 (HL_FPGA_IRQ_BASE + 10) +#define IRQ_SMBUS (HL_FPGA_IRQ_BASE + 11) +#define IRQ_TP (HL_FPGA_IRQ_BASE + 12) +#define IRQ_RTC (HL_FPGA_IRQ_BASE + 13) +#define IRQ_TH_ALERT (HL_FPGA_IRQ_BASE + 14) unsigned char *highlander_init_irq_r7780mp(void); unsigned char *highlander_init_irq_r7780rp(void); diff --git a/include/asm-sh/rtc.h b/include/asm-sh/rtc.h index 858da99d37e0..ec45ba8e11d9 100644 --- a/include/asm-sh/rtc.h +++ b/include/asm-sh/rtc.h @@ -11,4 +11,6 @@ struct sh_rtc_platform_info { unsigned long capabilities; }; +#include <asm/cpu/rtc.h> + #endif /* _ASM_RTC_H */ diff --git a/include/asm-sh/scatterlist.h b/include/asm-sh/scatterlist.h index a7d0d1856a99..2084d0373693 100644 --- a/include/asm-sh/scatterlist.h +++ b/include/asm-sh/scatterlist.h @@ -13,7 +13,7 @@ struct scatterlist { unsigned int length; }; -#define ISA_DMA_THRESHOLD (0x1fffffff) +#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. diff --git a/include/asm-sh/sdk7780.h b/include/asm-sh/sdk7780.h new file mode 100644 index 000000000000..697dc865f21b --- /dev/null +++ b/include/asm-sh/sdk7780.h @@ -0,0 +1,81 @@ +#ifndef __ASM_SH_RENESAS_SDK7780_H +#define __ASM_SH_RENESAS_SDK7780_H + +/* + * linux/include/asm-sh/sdk7780.h + * + * Renesas Solutions SH7780 SDK Support + * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <asm/addrspace.h> + +/* Box specific addresses. */ +#define SE_AREA0_WIDTH 4 /* Area0: 32bit */ +#define PA_ROM 0xa0000000 /* EPROM */ +#define PA_ROM_SIZE 0x00400000 /* EPROM size 4M byte */ +#define PA_FROM 0xa0800000 /* Flash-ROM */ +#define PA_FROM_SIZE 0x00400000 /* Flash-ROM size 4M byte */ +#define PA_EXT1 0xa4000000 +#define PA_EXT1_SIZE 0x04000000 +#define PA_SDRAM 0xa8000000 /* DDR-SDRAM(Area2/3) 128MB */ +#define PA_SDRAM_SIZE 0x08000000 + +#define PA_EXT4 0xb0000000 +#define PA_EXT4_SIZE 0x04000000 +#define PA_EXT_USER PA_EXT4 /* User Expansion Space */ + +#define PA_PERIPHERAL PA_AREA5_IO + +/* SRAM/Reserved */ +#define PA_RESERVED (PA_PERIPHERAL + 0) +/* FPGA base address */ +#define PA_FPGA (PA_PERIPHERAL + 0x01000000) +/* SMC LAN91C111 */ +#define PA_LAN (PA_PERIPHERAL + 0x01800000) + + +#define FPGA_SRSTR (PA_FPGA + 0x000) /* System reset */ +#define FPGA_IRQ0SR (PA_FPGA + 0x010) /* IRQ0 status */ +#define FPGA_IRQ0MR (PA_FPGA + 0x020) /* IRQ0 mask */ +#define FPGA_BDMR (PA_FPGA + 0x030) /* Board operating mode */ +#define FPGA_INTT0PRTR (PA_FPGA + 0x040) /* Interrupt test mode0 port */ +#define FPGA_INTT0SELR (PA_FPGA + 0x050) /* Int. test mode0 select */ +#define FPGA_INTT1POLR (PA_FPGA + 0x060) /* Int. test mode0 polarity */ +#define FPGA_NMIR (PA_FPGA + 0x070) /* NMI source */ +#define FPGA_NMIMR (PA_FPGA + 0x080) /* NMI mask */ +#define FPGA_IRQR (PA_FPGA + 0x090) /* IRQX source */ +#define FPGA_IRQMR (PA_FPGA + 0x0A0) /* IRQX mask */ +#define FPGA_SLEDR (PA_FPGA + 0x0B0) /* LED control */ +#define PA_LED FPGA_SLEDR +#define FPGA_MAPSWR (PA_FPGA + 0x0C0) /* Map switch */ +#define FPGA_FPVERR (PA_FPGA + 0x0D0) /* FPGA version */ +#define FPGA_FPDATER (PA_FPGA + 0x0E0) /* FPGA date */ +#define FPGA_RSE (PA_FPGA + 0x100) /* Reset source */ +#define FPGA_EASR (PA_FPGA + 0x110) /* External area select */ +#define FPGA_SPER (PA_FPGA + 0x120) /* Serial port enable */ +#define FPGA_IMSR (PA_FPGA + 0x130) /* Interrupt mode select */ +#define FPGA_PCIMR (PA_FPGA + 0x140) /* PCI Mode */ +#define FPGA_DIPSWMR (PA_FPGA + 0x150) /* DIPSW monitor */ +#define FPGA_FPODR (PA_FPGA + 0x160) /* Output port data */ +#define FPGA_ATAESR (PA_FPGA + 0x170) /* ATA extended bus status */ +#define FPGA_IRQPOLR (PA_FPGA + 0x180) /* IRQx polarity */ + + +#define SDK7780_NR_IRL 15 +/* IDE/ATA interrupt */ +#define IRQ_CFCARD 14 +/* SMC interrupt */ +#define IRQ_ETHERNET 6 + + +/* arch/sh/boards/renesas/sdk7780/irq.c */ +void init_sdk7780_IRQ(void); + +#define __IO_PREFIX sdk7780 +#include <asm/io_generic.h> + +#endif /* __ASM_SH_RENESAS_SDK7780_H */ diff --git a/include/asm-sh/sections.h b/include/asm-sh/sections.h index bd9cbc967c2a..8f8f4ad400df 100644 --- a/include/asm-sh/sections.h +++ b/include/asm-sh/sections.h @@ -4,6 +4,7 @@ #include <asm-generic/sections.h> extern long __machvec_start, __machvec_end; +extern char __uncached_start, __uncached_end; extern char _ebss[]; #endif /* __ASM_SH_SECTIONS_H */ diff --git a/include/asm-sh/sigcontext.h b/include/asm-sh/sigcontext.h index eb8effba2e80..8ce1435bc0bf 100644 --- a/include/asm-sh/sigcontext.h +++ b/include/asm-sh/sigcontext.h @@ -4,6 +4,18 @@ struct sigcontext { unsigned long oldmask; +#if defined(__SH5__) || defined(CONFIG_CPU_SH5) + /* CPU registers */ + unsigned long long sc_regs[63]; + unsigned long long sc_tregs[8]; + unsigned long long sc_pc; + unsigned long long sc_sr; + + /* FPU registers */ + unsigned long long sc_fpregs[32]; + unsigned int sc_fpscr; + unsigned int sc_fpvalid; +#else /* CPU registers */ unsigned long sc_regs[16]; unsigned long sc_pc; @@ -13,7 +25,8 @@ struct sigcontext { unsigned long sc_mach; unsigned long sc_macl; -#if defined(__SH4__) || defined(CONFIG_CPU_SH4) +#if defined(__SH4__) || defined(CONFIG_CPU_SH4) || \ + defined(__SH2A__) || defined(CONFIG_CPU_SH2A) /* FPU registers */ unsigned long sc_fpregs[16]; unsigned long sc_xfpregs[16]; @@ -21,6 +34,7 @@ struct sigcontext { unsigned int sc_fpul; unsigned int sc_ownedfp; #endif +#endif }; #endif /* __ASM_SH_SIGCONTEXT_H */ diff --git a/include/asm-sh/spi.h b/include/asm-sh/spi.h new file mode 100644 index 000000000000..e96f5b0953c8 --- /dev/null +++ b/include/asm-sh/spi.h @@ -0,0 +1,13 @@ +#ifndef __ASM_SPI_H__ +#define __ASM_SPI_H__ + +struct sh_spi_info; + +struct sh_spi_info { + int bus_num; + int num_chipselect; + + void (*chip_select)(struct sh_spi_info *spi, int cs, int state); +}; + +#endif /* __ASM_SPI_H__ */ diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h index 6d6ad26e3a2a..e1810cc6e3da 100644 --- a/include/asm-sh/stat.h +++ b/include/asm-sh/stat.h @@ -15,6 +15,66 @@ struct __old_kernel_stat { unsigned long st_ctime; }; +#if defined(__SH5__) || defined(CONFIG_CPU_SH5) +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc2.1, hence the absolutely + * insane amounts of padding around dev_t's. + */ +struct stat64 { + unsigned short st_dev; + unsigned char __pad0[10]; + + unsigned long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + + unsigned long st_uid; + unsigned long st_gid; + + unsigned short st_rdev; + unsigned char __pad3[10]; + + long long st_size; + unsigned long st_blksize; + + unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long __pad4; /* future possible st_blocks high bits */ + + unsigned long st_atime; + unsigned long st_atime_nsec; + + unsigned long st_mtime; + unsigned long st_mtime_nsec; + + unsigned long st_ctime; + unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */ + + unsigned long __unused1; + unsigned long __unused2; +}; +#else struct stat { unsigned long st_dev; unsigned long st_ino; @@ -67,11 +127,12 @@ struct stat64 { unsigned long st_mtime_nsec; unsigned long st_ctime; - unsigned long st_ctime_nsec; + unsigned long st_ctime_nsec; unsigned long long st_ino; }; #define STAT_HAVE_NSEC 1 +#endif #endif /* __ASM_SH_STAT_H */ diff --git a/include/asm-sh/string.h b/include/asm-sh/string.h index 55f8db6bc1d7..8c1ea21dc0ae 100644 --- a/include/asm-sh/string.h +++ b/include/asm-sh/string.h @@ -1,131 +1,5 @@ -#ifndef __ASM_SH_STRING_H -#define __ASM_SH_STRING_H - -#ifdef __KERNEL__ - -/* - * Copyright (C) 1999 Niibe Yutaka - * But consider these trivial functions to be public domain. - */ - -#define __HAVE_ARCH_STRCPY -static inline char *strcpy(char *__dest, const char *__src) -{ - register char *__xdest = __dest; - unsigned long __dummy; - - __asm__ __volatile__("1:\n\t" - "mov.b @%1+, %2\n\t" - "mov.b %2, @%0\n\t" - "cmp/eq #0, %2\n\t" - "bf/s 1b\n\t" - " add #1, %0\n\t" - : "=r" (__dest), "=r" (__src), "=&z" (__dummy) - : "0" (__dest), "1" (__src) - : "memory", "t"); - - return __xdest; -} - -#define __HAVE_ARCH_STRNCPY -static inline char *strncpy(char *__dest, const char *__src, size_t __n) -{ - register char *__xdest = __dest; - unsigned long __dummy; - - if (__n == 0) - return __xdest; - - __asm__ __volatile__( - "1:\n" - "mov.b @%1+, %2\n\t" - "mov.b %2, @%0\n\t" - "cmp/eq #0, %2\n\t" - "bt/s 2f\n\t" - " cmp/eq %5,%1\n\t" - "bf/s 1b\n\t" - " add #1, %0\n" - "2:" - : "=r" (__dest), "=r" (__src), "=&z" (__dummy) - : "0" (__dest), "1" (__src), "r" (__src+__n) - : "memory", "t"); - - return __xdest; -} - -#define __HAVE_ARCH_STRCMP -static inline int strcmp(const char *__cs, const char *__ct) -{ - register int __res; - unsigned long __dummy; - - __asm__ __volatile__( - "mov.b @%1+, %3\n" - "1:\n\t" - "mov.b @%0+, %2\n\t" - "cmp/eq #0, %3\n\t" - "bt 2f\n\t" - "cmp/eq %2, %3\n\t" - "bt/s 1b\n\t" - " mov.b @%1+, %3\n\t" - "add #-2, %1\n\t" - "mov.b @%1, %3\n\t" - "sub %3, %2\n" - "2:" - : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy) - : "0" (__cs), "1" (__ct) - : "t"); - - return __res; -} - -#define __HAVE_ARCH_STRNCMP -static inline int strncmp(const char *__cs, const char *__ct, size_t __n) -{ - register int __res; - unsigned long __dummy; - - if (__n == 0) - return 0; - - __asm__ __volatile__( - "mov.b @%1+, %3\n" - "1:\n\t" - "mov.b @%0+, %2\n\t" - "cmp/eq %6, %0\n\t" - "bt/s 2f\n\t" - " cmp/eq #0, %3\n\t" - "bt/s 3f\n\t" - " cmp/eq %3, %2\n\t" - "bt/s 1b\n\t" - " mov.b @%1+, %3\n\t" - "add #-2, %1\n\t" - "mov.b @%1, %3\n" - "2:\n\t" - "sub %3, %2\n" - "3:" - :"=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy) - : "0" (__cs), "1" (__ct), "r" (__cs+__n) - : "t"); - - return __res; -} - -#define __HAVE_ARCH_MEMSET -extern void *memset(void *__s, int __c, size_t __count); - -#define __HAVE_ARCH_MEMCPY -extern void *memcpy(void *__to, __const__ void *__from, size_t __n); - -#define __HAVE_ARCH_MEMMOVE -extern void *memmove(void *__dest, __const__ void *__src, size_t __n); - -#define __HAVE_ARCH_MEMCHR -extern void *memchr(const void *__s, int __c, size_t __n); - -#define __HAVE_ARCH_STRLEN -extern size_t strlen(const char *); - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH_STRING_H */ +#ifdef CONFIG_SUPERH32 +# include "string_32.h" +#else +# include "string_64.h" +#endif diff --git a/include/asm-sh/string_32.h b/include/asm-sh/string_32.h new file mode 100644 index 000000000000..55f8db6bc1d7 --- /dev/null +++ b/include/asm-sh/string_32.h @@ -0,0 +1,131 @@ +#ifndef __ASM_SH_STRING_H +#define __ASM_SH_STRING_H + +#ifdef __KERNEL__ + +/* + * Copyright (C) 1999 Niibe Yutaka + * But consider these trivial functions to be public domain. + */ + +#define __HAVE_ARCH_STRCPY +static inline char *strcpy(char *__dest, const char *__src) +{ + register char *__xdest = __dest; + unsigned long __dummy; + + __asm__ __volatile__("1:\n\t" + "mov.b @%1+, %2\n\t" + "mov.b %2, @%0\n\t" + "cmp/eq #0, %2\n\t" + "bf/s 1b\n\t" + " add #1, %0\n\t" + : "=r" (__dest), "=r" (__src), "=&z" (__dummy) + : "0" (__dest), "1" (__src) + : "memory", "t"); + + return __xdest; +} + +#define __HAVE_ARCH_STRNCPY +static inline char *strncpy(char *__dest, const char *__src, size_t __n) +{ + register char *__xdest = __dest; + unsigned long __dummy; + + if (__n == 0) + return __xdest; + + __asm__ __volatile__( + "1:\n" + "mov.b @%1+, %2\n\t" + "mov.b %2, @%0\n\t" + "cmp/eq #0, %2\n\t" + "bt/s 2f\n\t" + " cmp/eq %5,%1\n\t" + "bf/s 1b\n\t" + " add #1, %0\n" + "2:" + : "=r" (__dest), "=r" (__src), "=&z" (__dummy) + : "0" (__dest), "1" (__src), "r" (__src+__n) + : "memory", "t"); + + return __xdest; +} + +#define __HAVE_ARCH_STRCMP +static inline int strcmp(const char *__cs, const char *__ct) +{ + register int __res; + unsigned long __dummy; + + __asm__ __volatile__( + "mov.b @%1+, %3\n" + "1:\n\t" + "mov.b @%0+, %2\n\t" + "cmp/eq #0, %3\n\t" + "bt 2f\n\t" + "cmp/eq %2, %3\n\t" + "bt/s 1b\n\t" + " mov.b @%1+, %3\n\t" + "add #-2, %1\n\t" + "mov.b @%1, %3\n\t" + "sub %3, %2\n" + "2:" + : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy) + : "0" (__cs), "1" (__ct) + : "t"); + + return __res; +} + +#define __HAVE_ARCH_STRNCMP +static inline int strncmp(const char *__cs, const char *__ct, size_t __n) +{ + register int __res; + unsigned long __dummy; + + if (__n == 0) + return 0; + + __asm__ __volatile__( + "mov.b @%1+, %3\n" + "1:\n\t" + "mov.b @%0+, %2\n\t" + "cmp/eq %6, %0\n\t" + "bt/s 2f\n\t" + " cmp/eq #0, %3\n\t" + "bt/s 3f\n\t" + " cmp/eq %3, %2\n\t" + "bt/s 1b\n\t" + " mov.b @%1+, %3\n\t" + "add #-2, %1\n\t" + "mov.b @%1, %3\n" + "2:\n\t" + "sub %3, %2\n" + "3:" + :"=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy) + : "0" (__cs), "1" (__ct), "r" (__cs+__n) + : "t"); + + return __res; +} + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); + +#define __HAVE_ARCH_MEMCHR +extern void *memchr(const void *__s, int __c, size_t __n); + +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_SH_STRING_H */ diff --git a/include/asm-sh64/string.h b/include/asm-sh/string_64.h index 8a7357366ce8..aa1fef229c78 100644 --- a/include/asm-sh64/string.h +++ b/include/asm-sh/string_64.h @@ -1,21 +1,17 @@ -#ifndef __ASM_SH64_STRING_H -#define __ASM_SH64_STRING_H +#ifndef __ASM_SH_STRING_64_H +#define __ASM_SH_STRING_64_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/string.h + * include/asm-sh/string_64.h * * Copyright (C) 2000, 2001 Paolo Alberelli * - * Empty on purpose. ARCH SH64 ASM libs are out of the current project scope. - * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ #define __HAVE_ARCH_MEMCPY - extern void *memcpy(void *dest, const void *src, size_t count); -#endif +#endif /* __ASM_SH_STRING_64_H */ diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 4faa2fb88616..772cd1a0a674 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -12,60 +12,9 @@ #include <asm/types.h> #include <asm/ptrace.h> -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *next); +#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ -#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ -/* - * switch_to() should switch tasks to task nr n, first - */ - -#define switch_to(prev, next, last) do { \ - struct task_struct *__last; \ - register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ - register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ - register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ - register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ - register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ - register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ - __asm__ __volatile__ (".balign 4\n\t" \ - "stc.l gbr, @-r15\n\t" \ - "sts.l pr, @-r15\n\t" \ - "mov.l r8, @-r15\n\t" \ - "mov.l r9, @-r15\n\t" \ - "mov.l r10, @-r15\n\t" \ - "mov.l r11, @-r15\n\t" \ - "mov.l r12, @-r15\n\t" \ - "mov.l r13, @-r15\n\t" \ - "mov.l r14, @-r15\n\t" \ - "mov.l r15, @r1 ! save SP\n\t" \ - "mov.l @r6, r15 ! change to new stack\n\t" \ - "mova 1f, %0\n\t" \ - "mov.l %0, @r2 ! save PC\n\t" \ - "mov.l 2f, %0\n\t" \ - "jmp @%0 ! call __switch_to\n\t" \ - " lds r7, pr ! with return to new PC\n\t" \ - ".balign 4\n" \ - "2:\n\t" \ - ".long __switch_to\n" \ - "1:\n\t" \ - "mov.l @r15+, r14\n\t" \ - "mov.l @r15+, r13\n\t" \ - "mov.l @r15+, r12\n\t" \ - "mov.l @r15+, r11\n\t" \ - "mov.l @r15+, r10\n\t" \ - "mov.l @r15+, r9\n\t" \ - "mov.l @r15+, r8\n\t" \ - "lds.l @r15+, pr\n\t" \ - "ldc.l @r15+, gbr\n\t" \ - : "=z" (__last) \ - : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ - "r" (__ts5), "r" (__ts6), "r" (__ts7) \ - : "r3", "t"); \ - last = __last; \ -} while (0) - -#ifdef CONFIG_CPU_SH4A +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) #define __icbi() \ { \ unsigned long __addr; \ @@ -91,7 +40,7 @@ struct task_struct *__switch_to(struct task_struct *prev, * Historically we have only done this type of barrier for the MMUCR, but * it's also necessary for the CCR, so we make it generic here instead. */ -#ifdef CONFIG_CPU_SH4A +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() #define wmb() __asm__ __volatile__ ("synco": : :"memory") @@ -119,63 +68,11 @@ struct task_struct *__switch_to(struct task_struct *prev, #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) -/* - * Jump to P2 area. - * When handling TLB or caches, we need to do it from P2 area. - */ -#define jump_to_P2() \ -do { \ - unsigned long __dummy; \ - __asm__ __volatile__( \ - "mov.l 1f, %0\n\t" \ - "or %1, %0\n\t" \ - "jmp @%0\n\t" \ - " nop\n\t" \ - ".balign 4\n" \ - "1: .long 2f\n" \ - "2:" \ - : "=&r" (__dummy) \ - : "r" (0x20000000)); \ -} while (0) - -/* - * Back to P1 area. - */ -#define back_to_P1() \ -do { \ - unsigned long __dummy; \ - ctrl_barrier(); \ - __asm__ __volatile__( \ - "mov.l 1f, %0\n\t" \ - "jmp @%0\n\t" \ - " nop\n\t" \ - ".balign 4\n" \ - "1: .long 2f\n" \ - "2:" \ - : "=&r" (__dummy)); \ -} while (0) - -static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) -{ - unsigned long flags, retval; - - local_irq_save(flags); - retval = *m; - *m = val; - local_irq_restore(flags); - return retval; -} - -static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) -{ - unsigned long flags, retval; - - local_irq_save(flags); - retval = *m; - *m = val & 0xff; - local_irq_restore(flags); - return retval; -} +#ifdef CONFIG_GUSA_RB +#include <asm/cmpxchg-grb.h> +#else +#include <asm/cmpxchg-irq.h> +#endif extern void __xchg_called_with_bad_pointer(void); @@ -202,20 +99,6 @@ extern void __xchg_called_with_bad_pointer(void); #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) -static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, - unsigned long new) -{ - __u32 retval; - unsigned long flags; - - local_irq_save(flags); - retval = *m; - if (retval == old) - *m = new; - local_irq_restore(flags); /* implies memory barrier */ - return retval; -} - /* This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ extern void __cmpxchg_called_with_bad_pointer(void); @@ -255,10 +138,14 @@ static inline void *set_exception_table_evt(unsigned int evt, void *handler) */ #ifdef CONFIG_CPU_SH2A extern unsigned int instruction_size(unsigned int insn); -#else +#elif defined(CONFIG_SUPERH32) #define instruction_size(insn) (2) +#else +#define instruction_size(insn) (4) #endif +extern unsigned long cached_to_uncached; + /* XXX * disable hlt during certain critical i/o operations */ @@ -270,13 +157,35 @@ void default_idle(void); void per_cpu_trap_init(void); asmlinkage void break_point_trap(void); -asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs); -asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs); + +#ifdef CONFIG_SUPERH32 +#define BUILD_TRAP_HANDLER(name) \ +asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ + unsigned long r6, unsigned long r7, \ + struct pt_regs __regs) + +#define TRAP_HANDLER_DECL \ + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \ + unsigned int vec = regs->tra; \ + (void)vec; +#else +#define BUILD_TRAP_HANDLER(name) \ +asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) +#define TRAP_HANDLER_DECL +#endif + +BUILD_TRAP_HANDLER(address_error); +BUILD_TRAP_HANDLER(debug); +BUILD_TRAP_HANDLER(bug); +BUILD_TRAP_HANDLER(fpu_error); +BUILD_TRAP_HANDLER(fpu_state_restore); #define arch_align_stack(x) (x) +#ifdef CONFIG_SUPERH32 +# include "system_32.h" +#else +# include "system_64.h" +#endif + #endif diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h new file mode 100644 index 000000000000..7ff08d956ba8 --- /dev/null +++ b/include/asm-sh/system_32.h @@ -0,0 +1,99 @@ +#ifndef __ASM_SH_SYSTEM_32_H +#define __ASM_SH_SYSTEM_32_H + +#include <linux/types.h> + +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); + +/* + * switch_to() should switch tasks to task nr n, first + */ +#define switch_to(prev, next, last) \ +do { \ + register u32 *__ts1 __asm__ ("r1") = (u32 *)&prev->thread.sp; \ + register u32 *__ts2 __asm__ ("r2") = (u32 *)&prev->thread.pc; \ + register u32 *__ts4 __asm__ ("r4") = (u32 *)prev; \ + register u32 *__ts5 __asm__ ("r5") = (u32 *)next; \ + register u32 *__ts6 __asm__ ("r6") = (u32 *)&next->thread.sp; \ + register u32 __ts7 __asm__ ("r7") = next->thread.pc; \ + struct task_struct *__last; \ + \ + __asm__ __volatile__ ( \ + ".balign 4\n\t" \ + "stc.l gbr, @-r15\n\t" \ + "sts.l pr, @-r15\n\t" \ + "mov.l r8, @-r15\n\t" \ + "mov.l r9, @-r15\n\t" \ + "mov.l r10, @-r15\n\t" \ + "mov.l r11, @-r15\n\t" \ + "mov.l r12, @-r15\n\t" \ + "mov.l r13, @-r15\n\t" \ + "mov.l r14, @-r15\n\t" \ + "mov.l r15, @r1\t! save SP\n\t" \ + "mov.l @r6, r15\t! change to new stack\n\t" \ + "mova 1f, %0\n\t" \ + "mov.l %0, @r2\t! save PC\n\t" \ + "mov.l 2f, %0\n\t" \ + "jmp @%0\t! call __switch_to\n\t" \ + " lds r7, pr\t! with return to new PC\n\t" \ + ".balign 4\n" \ + "2:\n\t" \ + ".long __switch_to\n" \ + "1:\n\t" \ + "mov.l @r15+, r14\n\t" \ + "mov.l @r15+, r13\n\t" \ + "mov.l @r15+, r12\n\t" \ + "mov.l @r15+, r11\n\t" \ + "mov.l @r15+, r10\n\t" \ + "mov.l @r15+, r9\n\t" \ + "mov.l @r15+, r8\n\t" \ + "lds.l @r15+, pr\n\t" \ + "ldc.l @r15+, gbr\n\t" \ + : "=z" (__last) \ + : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ + "r" (__ts5), "r" (__ts6), "r" (__ts7) \ + : "r3", "t"); \ + \ + last = __last; \ +} while (0) + +#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text"))) + +/* + * Jump to uncached area. + * When handling TLB or caches, we need to do it from an uncached area. + */ +#define jump_to_uncached() \ +do { \ + unsigned long __dummy; \ + \ + __asm__ __volatile__( \ + "mova 1f, %0\n\t" \ + "add %1, %0\n\t" \ + "jmp @%0\n\t" \ + " nop\n\t" \ + ".balign 4\n" \ + "1:" \ + : "=&z" (__dummy) \ + : "r" (cached_to_uncached)); \ +} while (0) + +/* + * Back to cached area. + */ +#define back_to_cached() \ +do { \ + unsigned long __dummy; \ + ctrl_barrier(); \ + __asm__ __volatile__( \ + "mov.l 1f, %0\n\t" \ + "jmp @%0\n\t" \ + " nop\n\t" \ + ".balign 4\n" \ + "1: .long 2f\n" \ + "2:" \ + : "=&r" (__dummy)); \ +} while (0) + +#endif /* __ASM_SH_SYSTEM_32_H */ diff --git a/include/asm-sh/system_64.h b/include/asm-sh/system_64.h new file mode 100644 index 000000000000..943acf5ea07c --- /dev/null +++ b/include/asm-sh/system_64.h @@ -0,0 +1,40 @@ +#ifndef __ASM_SH_SYSTEM_64_H +#define __ASM_SH_SYSTEM_64_H + +/* + * include/asm-sh/system_64.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <asm/processor.h> + +/* + * switch_to() should switch tasks to task nr n, first + */ +struct task_struct *sh64_switch_to(struct task_struct *prev, + struct thread_struct *prev_thread, + struct task_struct *next, + struct thread_struct *next_thread); + +#define switch_to(prev,next,last) \ +do { \ + if (last_task_used_math != next) { \ + struct pt_regs *regs = next->thread.uregs; \ + if (regs) regs->sr |= SR_FD; \ + } \ + last = sh64_switch_to(prev, &prev->thread, next, \ + &next->thread); \ +} while (0) + +#define __uses_jump_to_uncached + +#define jump_to_uncached() do { } while (0) +#define back_to_cached() do { } while (0) + +#endif /* __ASM_SH_SYSTEM_64_H */ diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h index 1f7e1deb8d92..c50e5d35fe84 100644 --- a/include/asm-sh/thread_info.h +++ b/include/asm-sh/thread_info.h @@ -68,14 +68,16 @@ struct thread_info { #define init_stack (init_thread_union.stack) /* how to get the current stack pointer from C */ -register unsigned long current_stack_pointer asm("r15") __attribute_used__; +register unsigned long current_stack_pointer asm("r15") __used; /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; -#ifdef CONFIG_CPU_HAS_SR_RB - __asm__("stc r7_bank, %0" : "=r" (ti)); +#if defined(CONFIG_SUPERH64) + __asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti)); +#elif defined(CONFIG_CPU_HAS_SR_RB) + __asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti)); #else unsigned long __dummy; @@ -111,6 +113,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ #define TIF_SINGLESTEP 4 /* singlestepping active */ +#define TIF_SYSCALL_AUDIT 5 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 @@ -121,6 +124,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) +#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_USEDFPU (1<<TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1<<TIF_FREEZE) diff --git a/include/asm-sh/tlb.h b/include/asm-sh/tlb.h index 53d185bcf872..56ad1fb888a2 100644 --- a/include/asm-sh/tlb.h +++ b/include/asm-sh/tlb.h @@ -1,6 +1,12 @@ #ifndef __ASM_SH_TLB_H #define __ASM_SH_TLB_H +#ifdef CONFIG_SUPERH64 +# include "tlb_64.h" +#endif + +#ifndef __ASSEMBLY__ + #define tlb_start_vma(tlb, vma) \ flush_cache_range(vma, vma->vm_start, vma->vm_end) @@ -15,4 +21,6 @@ #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #include <asm-generic/tlb.h> -#endif + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_SH_TLB_H */ diff --git a/include/asm-sh64/tlb.h b/include/asm-sh/tlb_64.h index 4979408bd88c..0308e05fc57b 100644 --- a/include/asm-sh64/tlb.h +++ b/include/asm-sh/tlb_64.h @@ -1,20 +1,14 @@ /* - * include/asm-sh64/tlb.h + * include/asm-sh/tlb_64.h * * Copyright (C) 2003 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * - */ -#ifndef __ASM_SH64_TLB_H -#define __ASM_SH64_TLB_H - -/* - * Note! These are mostly unused, we just need the xTLB_LAST_VAR_UNRESTRICTED - * for head.S! Once this limitation is gone, we can clean the rest of this up. */ +#ifndef __ASM_SH_TLB_64_H +#define __ASM_SH_TLB_64_H /* ITLB defines */ #define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */ @@ -63,30 +57,13 @@ static inline void __flush_tlb_slot(unsigned long long slot) } /* arch/sh64/mm/tlb.c */ -extern int sh64_tlb_init(void); -extern unsigned long long sh64_next_free_dtlb_entry(void); -extern unsigned long long sh64_get_wired_dtlb_entry(void); -extern int sh64_put_wired_dtlb_entry(unsigned long long entry); - -extern void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr); -extern void sh64_teardown_tlb_slot(unsigned long long config_addr); - -#define tlb_start_vma(tlb, vma) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end) - -#define tlb_end_vma(tlb, vma) \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end) - -#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) - -/* - * Flush whole TLBs for MM - */ -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - -#include <asm-generic/tlb.h> +int sh64_tlb_init(void); +unsigned long long sh64_next_free_dtlb_entry(void); +unsigned long long sh64_get_wired_dtlb_entry(void); +int sh64_put_wired_dtlb_entry(unsigned long long entry); +void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, + unsigned long asid, unsigned long paddr); +void sh64_teardown_tlb_slot(unsigned long long config_addr); #endif /* __ASSEMBLY__ */ - -#endif /* __ASM_SH64_TLB_H */ - +#endif /* __ASM_SH_TLB_64_H */ diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h index 7ba69d9707ef..a6e1d4126e67 100644 --- a/include/asm-sh/types.h +++ b/include/asm-sh/types.h @@ -52,6 +52,12 @@ typedef unsigned long long u64; typedef u32 dma_addr_t; +#ifdef CONFIG_SUPERH32 +typedef u16 opcode_t; +#else +typedef u32 opcode_t; +#endif + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h index 77c391fa93d6..ff24ce95b238 100644 --- a/include/asm-sh/uaccess.h +++ b/include/asm-sh/uaccess.h @@ -1,563 +1,5 @@ -/* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $ - * - * User space memory access functions - * - * Copyright (C) 1999, 2002 Niibe Yutaka - * Copyright (C) 2003 Paul Mundt - * - * Based on: - * MIPS implementation version 1.15 by - * Copyright (C) 1996, 1997, 1998 by Ralf Baechle - * and i386 version. - */ -#ifndef __ASM_SH_UACCESS_H -#define __ASM_SH_UACCESS_H - -#include <linux/errno.h> -#include <linux/sched.h> - -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons (Data Segment Register?), these macros are misnamed. - */ - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) -#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) - -#define segment_eq(a,b) ((a).seg == (b).seg) - -#define get_ds() (KERNEL_DS) - -#if !defined(CONFIG_MMU) -/* NOMMU is always true */ -#define __addr_ok(addr) (1) - -static inline mm_segment_t get_fs(void) -{ - return USER_DS; -} - -static inline void set_fs(mm_segment_t s) -{ -} - -/* - * __access_ok: Check if address with size is OK or not. - * - * If we don't have an MMU (or if its disabled) the only thing we really have - * to look out for is if the address resides somewhere outside of what - * available RAM we have. - * - * TODO: This check could probably also stand to be restricted somewhat more.. - * though it still does the Right Thing(tm) for the time being. - */ -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - return ((addr >= memory_start) && ((addr + size) < memory_end)); -} -#else /* CONFIG_MMU */ -#define __addr_ok(addr) \ - ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -/* - * __access_ok: Check if address with size is OK or not. - * - * Uhhuh, this needs 33-bit arithmetic. We have a carry.. - * - * sum := addr + size; carry? --> flag = true; - * if (sum >= addr_limit) flag = true; - */ -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long flag, sum; - - __asm__("clrt\n\t" - "addc %3, %1\n\t" - "movt %0\n\t" - "cmp/hi %4, %1\n\t" - "rotcl %0" - :"=&r" (flag), "=r" (sum) - :"1" (addr), "r" (size), - "r" (current_thread_info()->addr_limit.seg) - :"t"); - return flag == 0; - -} -#endif /* CONFIG_MMU */ - -static inline int access_ok(int type, const void __user *p, unsigned long size) -{ - unsigned long addr = (unsigned long)p; - return __access_ok(addr, size); -} - -/* - * Uh, these should become the main single-value transfer routines ... - * They automatically use the right size if we just have the right - * pointer type ... - * - * As SuperH uses the same address space for kernel and user data, we - * can just do these as direct assignments. - * - * Careful to not - * (a) re-use the arguments for side effects (sizeof is ok) - * (b) require any knowledge of processes at this stage - */ -#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) -#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) - -/* - * The "__xxx" versions do not do address space checking, useful when - * doing multiple accesses to the same area (the user has to do the - * checks by hand with "access_ok()") - */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) - -struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct __user *)(x)) - -#define __get_user_size(x,ptr,size,retval) \ -do { \ - retval = 0; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: \ - __get_user_asm(x, ptr, retval, "b"); \ - break; \ - case 2: \ - __get_user_asm(x, ptr, retval, "w"); \ - break; \ - case 4: \ - __get_user_asm(x, ptr, retval, "l"); \ - break; \ - default: \ - __get_user_unknown(); \ - break; \ - } \ -} while (0) - -#define __get_user_nocheck(x,ptr,size) \ -({ \ - long __gu_err, __gu_val; \ - __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ - __gu_err; \ -}) - -#ifdef CONFIG_MMU -#define __get_user_check(x,ptr,size) \ -({ \ - long __gu_err, __gu_val; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: \ - __get_user_1(__gu_val, (ptr), __gu_err); \ - break; \ - case 2: \ - __get_user_2(__gu_val, (ptr), __gu_err); \ - break; \ - case 4: \ - __get_user_4(__gu_val, (ptr), __gu_err); \ - break; \ - default: \ - __get_user_unknown(); \ - break; \ - } \ - \ - (x) = (__typeof__(*(ptr)))__gu_val; \ - __gu_err; \ -}) - -#define __get_user_1(x,addr,err) ({ \ -__asm__("stc r7_bank, %1\n\t" \ - "mov.l @(8,%1), %1\n\t" \ - "and %2, %1\n\t" \ - "cmp/pz %1\n\t" \ - "bt/s 1f\n\t" \ - " mov #0, %0\n\t" \ - "0:\n" \ - "mov #-14, %0\n\t" \ - "bra 2f\n\t" \ - " mov #0, %1\n" \ - "1:\n\t" \ - "mov.b @%2, %1\n\t" \ - "extu.b %1, %1\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".long 1b, 0b\n\t" \ - ".previous" \ - : "=&r" (err), "=&r" (x) \ - : "r" (addr) \ - : "t"); \ -}) - -#define __get_user_2(x,addr,err) ({ \ -__asm__("stc r7_bank, %1\n\t" \ - "mov.l @(8,%1), %1\n\t" \ - "and %2, %1\n\t" \ - "cmp/pz %1\n\t" \ - "bt/s 1f\n\t" \ - " mov #0, %0\n\t" \ - "0:\n" \ - "mov #-14, %0\n\t" \ - "bra 2f\n\t" \ - " mov #0, %1\n" \ - "1:\n\t" \ - "mov.w @%2, %1\n\t" \ - "extu.w %1, %1\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".long 1b, 0b\n\t" \ - ".previous" \ - : "=&r" (err), "=&r" (x) \ - : "r" (addr) \ - : "t"); \ -}) - -#define __get_user_4(x,addr,err) ({ \ -__asm__("stc r7_bank, %1\n\t" \ - "mov.l @(8,%1), %1\n\t" \ - "and %2, %1\n\t" \ - "cmp/pz %1\n\t" \ - "bt/s 1f\n\t" \ - " mov #0, %0\n\t" \ - "0:\n" \ - "mov #-14, %0\n\t" \ - "bra 2f\n\t" \ - " mov #0, %1\n" \ - "1:\n\t" \ - "mov.l @%2, %1\n\t" \ - "2:\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".long 1b, 0b\n\t" \ - ".previous" \ - : "=&r" (err), "=&r" (x) \ - : "r" (addr) \ - : "t"); \ -}) -#else /* CONFIG_MMU */ -#define __get_user_check(x,ptr,size) \ -({ \ - long __gu_err, __gu_val; \ - if (__access_ok((unsigned long)(ptr), (size))) { \ - __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ - } else \ - __gu_err = -EFAULT; \ - __gu_err; \ -}) -#endif - -#define __get_user_asm(x, addr, err, insn) \ -({ \ -__asm__ __volatile__( \ - "1:\n\t" \ - "mov." insn " %2, %1\n\t" \ - "mov #0, %0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\n\t" \ - "mov #0, %1\n\t" \ - "mov.l 4f, %0\n\t" \ - "jmp @%0\n\t" \ - " mov %3, %0\n" \ - "4: .long 2b\n\t" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".long 1b, 3b\n\t" \ - ".previous" \ - :"=&r" (err), "=&r" (x) \ - :"m" (__m(addr)), "i" (-EFAULT)); }) - -extern void __get_user_unknown(void); - -#define __put_user_size(x,ptr,size,retval) \ -do { \ - retval = 0; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: \ - __put_user_asm(x, ptr, retval, "b"); \ - break; \ - case 2: \ - __put_user_asm(x, ptr, retval, "w"); \ - break; \ - case 4: \ - __put_user_asm(x, ptr, retval, "l"); \ - break; \ - case 8: \ - __put_user_u64(x, ptr, retval); \ - break; \ - default: \ - __put_user_unknown(); \ - } \ -} while (0) - -#define __put_user_nocheck(x,ptr,size) \ -({ \ - long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err); \ - __pu_err; \ -}) - -#define __put_user_check(x,ptr,size) \ -({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - \ - if (__access_ok((unsigned long)__pu_addr,size)) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ - __pu_err; \ -}) - -#define __put_user_asm(x, addr, err, insn) \ -({ \ -__asm__ __volatile__( \ - "1:\n\t" \ - "mov." insn " %1, %2\n\t" \ - "mov #0, %0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\n\t" \ - "nop\n\t" \ - "mov.l 4f, %0\n\t" \ - "jmp @%0\n\t" \ - "mov %3, %0\n" \ - "4: .long 2b\n\t" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".long 1b, 3b\n\t" \ - ".previous" \ - :"=&r" (err) \ - :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \ - :"memory"); }) - -#if defined(__LITTLE_ENDIAN__) -#define __put_user_u64(val,addr,retval) \ -({ \ -__asm__ __volatile__( \ - "1:\n\t" \ - "mov.l %R1,%2\n\t" \ - "mov.l %S1,%T2\n\t" \ - "mov #0,%0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\n\t" \ - "nop\n\t" \ - "mov.l 4f,%0\n\t" \ - "jmp @%0\n\t" \ - " mov %3,%0\n" \ - "4: .long 2b\n\t" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".long 1b, 3b\n\t" \ - ".previous" \ - : "=r" (retval) \ - : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ - : "memory"); }) +#ifdef CONFIG_SUPERH32 +# include "uaccess_32.h" #else -#define __put_user_u64(val,addr,retval) \ -({ \ -__asm__ __volatile__( \ - "1:\n\t" \ - "mov.l %S1,%2\n\t" \ - "mov.l %R1,%T2\n\t" \ - "mov #0,%0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\n\t" \ - "nop\n\t" \ - "mov.l 4f,%0\n\t" \ - "jmp @%0\n\t" \ - " mov %3,%0\n" \ - "4: .long 2b\n\t" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".long 1b, 3b\n\t" \ - ".previous" \ - : "=r" (retval) \ - : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ - : "memory"); }) +# include "uaccess_64.h" #endif - -extern void __put_user_unknown(void); - -/* Generic arbitrary sized copy. */ -/* Return the number of bytes NOT copied */ -__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); - -#define copy_to_user(to,from,n) ({ \ -void *__copy_to = (void *) (to); \ -__kernel_size_t __copy_size = (__kernel_size_t) (n); \ -__kernel_size_t __copy_res; \ -if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ -__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ -} else __copy_res = __copy_size; \ -__copy_res; }) - -#define copy_from_user(to,from,n) ({ \ -void *__copy_to = (void *) (to); \ -void *__copy_from = (void *) (from); \ -__kernel_size_t __copy_size = (__kernel_size_t) (n); \ -__kernel_size_t __copy_res; \ -if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ -__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ -} else __copy_res = __copy_size; \ -__copy_res; }) - -static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - return __copy_user(to, (__force void *)from, n); -} - -static __always_inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - return __copy_user((__force void *)to, from, n); -} - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -/* - * Clear the area and return remaining number of bytes - * (on failure. Usually it's 0.) - */ -extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); - -#define clear_user(addr,n) ({ \ -void * __cl_addr = (addr); \ -unsigned long __cl_size = (n); \ -if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ -__cl_size = __clear_user(__cl_addr, __cl_size); \ -__cl_size; }) - -static __inline__ int -__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) -{ - __kernel_size_t res; - unsigned long __dummy, _d, _s; - - __asm__ __volatile__( - "9:\n" - "mov.b @%2+, %1\n\t" - "cmp/eq #0, %1\n\t" - "bt/s 2f\n" - "1:\n" - "mov.b %1, @%3\n\t" - "dt %7\n\t" - "bf/s 9b\n\t" - " add #1, %3\n\t" - "2:\n\t" - "sub %7, %0\n" - "3:\n" - ".section .fixup,\"ax\"\n" - "4:\n\t" - "mov.l 5f, %1\n\t" - "jmp @%1\n\t" - " mov %8, %0\n\t" - ".balign 4\n" - "5: .long 3b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .balign 4\n" - " .long 9b,4b\n" - ".previous" - : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d) - : "0" (__count), "2" (__src), "3" (__dest), "r" (__count), - "i" (-EFAULT) - : "memory", "t"); - - return res; -} - -#define strncpy_from_user(dest,src,count) ({ \ -unsigned long __sfu_src = (unsigned long) (src); \ -int __sfu_count = (int) (count); \ -long __sfu_res = -EFAULT; \ -if(__access_ok(__sfu_src, __sfu_count)) { \ -__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ -} __sfu_res; }) - -/* - * Return the size of a string (including the ending 0!) - */ -static __inline__ long __strnlen_user(const char __user *__s, long __n) -{ - unsigned long res; - unsigned long __dummy; - - __asm__ __volatile__( - "9:\n" - "cmp/eq %4, %0\n\t" - "bt 2f\n" - "1:\t" - "mov.b @(%0,%3), %1\n\t" - "tst %1, %1\n\t" - "bf/s 9b\n\t" - " add #1, %0\n" - "2:\n" - ".section .fixup,\"ax\"\n" - "3:\n\t" - "mov.l 4f, %1\n\t" - "jmp @%1\n\t" - " mov #0, %0\n" - ".balign 4\n" - "4: .long 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .balign 4\n" - " .long 1b,3b\n" - ".previous" - : "=z" (res), "=&r" (__dummy) - : "0" (0), "r" (__s), "r" (__n) - : "t"); - return res; -} - -static __inline__ long strnlen_user(const char __user *s, long n) -{ - if (!__addr_ok(s)) - return 0; - else - return __strnlen_user(s, n); -} - -#define strlen_user(str) strnlen_user(str, ~0UL >> 1) - -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -extern int fixup_exception(struct pt_regs *regs); - -#endif /* __ASM_SH_UACCESS_H */ diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h new file mode 100644 index 000000000000..b6082f3c1dc4 --- /dev/null +++ b/include/asm-sh/uaccess_32.h @@ -0,0 +1,510 @@ +/* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $ + * + * User space memory access functions + * + * Copyright (C) 1999, 2002 Niibe Yutaka + * Copyright (C) 2003 Paul Mundt + * + * Based on: + * MIPS implementation version 1.15 by + * Copyright (C) 1996, 1997, 1998 by Ralf Baechle + * and i386 version. + */ +#ifndef __ASM_SH_UACCESS_H +#define __ASM_SH_UACCESS_H + +#include <linux/errno.h> +#include <linux/sched.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons (Data Segment Register?), these macros are misnamed. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) +#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) + +#define segment_eq(a,b) ((a).seg == (b).seg) + +#define get_ds() (KERNEL_DS) + +#if !defined(CONFIG_MMU) +/* NOMMU is always true */ +#define __addr_ok(addr) (1) + +static inline mm_segment_t get_fs(void) +{ + return USER_DS; +} + +static inline void set_fs(mm_segment_t s) +{ +} + +/* + * __access_ok: Check if address with size is OK or not. + * + * If we don't have an MMU (or if its disabled) the only thing we really have + * to look out for is if the address resides somewhere outside of what + * available RAM we have. + * + * TODO: This check could probably also stand to be restricted somewhat more.. + * though it still does the Right Thing(tm) for the time being. + */ +static inline int __access_ok(unsigned long addr, unsigned long size) +{ + return ((addr >= memory_start) && ((addr + size) < memory_end)); +} +#else /* CONFIG_MMU */ +#define __addr_ok(addr) \ + ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) + +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +/* + * __access_ok: Check if address with size is OK or not. + * + * Uhhuh, this needs 33-bit arithmetic. We have a carry.. + * + * sum := addr + size; carry? --> flag = true; + * if (sum >= addr_limit) flag = true; + */ +static inline int __access_ok(unsigned long addr, unsigned long size) +{ + unsigned long flag, sum; + + __asm__("clrt\n\t" + "addc %3, %1\n\t" + "movt %0\n\t" + "cmp/hi %4, %1\n\t" + "rotcl %0" + :"=&r" (flag), "=r" (sum) + :"1" (addr), "r" (size), + "r" (current_thread_info()->addr_limit.seg) + :"t"); + return flag == 0; +} +#endif /* CONFIG_MMU */ + +#define access_ok(type, addr, size) \ + (__chk_user_ptr(addr), \ + __access_ok((unsigned long __force)(addr), (size))) + +/* + * Uh, these should become the main single-value transfer routines ... + * They automatically use the right size if we just have the right + * pointer type ... + * + * As SuperH uses the same address space for kernel and user data, we + * can just do these as direct assignments. + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) +#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the user has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + __get_user_asm(x, ptr, retval, "b"); \ + break; \ + case 2: \ + __get_user_asm(x, ptr, retval, "w"); \ + break; \ + case 4: \ + __get_user_asm(x, ptr, retval, "l"); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ +} while (0) + +#define __get_user_nocheck(x,ptr,size) \ +({ \ + long __gu_err; \ + unsigned long __gu_val; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __chk_user_ptr(ptr); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x,ptr,size) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#define __get_user_asm(x, addr, err, insn) \ +({ \ +__asm__ __volatile__( \ + "1:\n\t" \ + "mov." insn " %2, %1\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n\t" \ + "mov #0, %1\n\t" \ + "mov.l 4f, %0\n\t" \ + "jmp @%0\n\t" \ + " mov %3, %0\n\t" \ + ".balign 4\n" \ + "4: .long 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n\t" \ + ".long 1b, 3b\n\t" \ + ".previous" \ + :"=&r" (err), "=&r" (x) \ + :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); }) + +extern void __get_user_unknown(void); + +#define __put_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + __put_user_asm(x, ptr, retval, "b"); \ + break; \ + case 2: \ + __put_user_asm(x, ptr, retval, "w"); \ + break; \ + case 4: \ + __put_user_asm(x, ptr, retval, "l"); \ + break; \ + case 8: \ + __put_user_u64(x, ptr, retval); \ + break; \ + default: \ + __put_user_unknown(); \ + } \ +} while (0) + +#define __put_user_nocheck(x,ptr,size) \ +({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __chk_user_ptr(ptr); \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x,ptr,size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ + __put_user_size((x), __pu_addr, (size), \ + __pu_err); \ + __pu_err; \ +}) + +#define __put_user_asm(x, addr, err, insn) \ +({ \ +__asm__ __volatile__( \ + "1:\n\t" \ + "mov." insn " %1, %2\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n\t" \ + "mov.l 4f, %0\n\t" \ + "jmp @%0\n\t" \ + " mov %3, %0\n\t" \ + ".balign 4\n" \ + "4: .long 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n\t" \ + ".long 1b, 3b\n\t" \ + ".previous" \ + :"=&r" (err) \ + :"r" (x), "m" (__m(addr)), "i" (-EFAULT), "0" (err) \ + :"memory"); }) + +#if defined(CONFIG_CPU_LITTLE_ENDIAN) +#define __put_user_u64(val,addr,retval) \ +({ \ +__asm__ __volatile__( \ + "1:\n\t" \ + "mov.l %R1,%2\n\t" \ + "mov.l %S1,%T2\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n\t" \ + "mov.l 4f,%0\n\t" \ + "jmp @%0\n\t" \ + " mov %3,%0\n\t" \ + ".balign 4\n" \ + "4: .long 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n\t" \ + ".long 1b, 3b\n\t" \ + ".previous" \ + : "=r" (retval) \ + : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \ + : "memory"); }) +#else +#define __put_user_u64(val,addr,retval) \ +({ \ +__asm__ __volatile__( \ + "1:\n\t" \ + "mov.l %S1,%2\n\t" \ + "mov.l %R1,%T2\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n\t" \ + "mov.l 4f,%0\n\t" \ + "jmp @%0\n\t" \ + " mov %3,%0\n\t" \ + ".balign 4\n" \ + "4: .long 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n\t" \ + ".long 1b, 3b\n\t" \ + ".previous" \ + : "=r" (retval) \ + : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \ + : "memory"); }) +#endif + +extern void __put_user_unknown(void); + +/* Generic arbitrary sized copy. */ +/* Return the number of bytes NOT copied */ +__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); + +#define copy_to_user(to,from,n) ({ \ +void *__copy_to = (void *) (to); \ +__kernel_size_t __copy_size = (__kernel_size_t) (n); \ +__kernel_size_t __copy_res; \ +if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ +__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ +} else __copy_res = __copy_size; \ +__copy_res; }) + +#define copy_from_user(to,from,n) ({ \ +void *__copy_to = (void *) (to); \ +void *__copy_from = (void *) (from); \ +__kernel_size_t __copy_size = (__kernel_size_t) (n); \ +__kernel_size_t __copy_res; \ +if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ +__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ +} else __copy_res = __copy_size; \ +__copy_res; }) + +static __always_inline unsigned long +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return __copy_user(to, (__force void *)from, n); +} + +static __always_inline unsigned long __must_check +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return __copy_user((__force void *)to, from, n); +} + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +/* + * Clear the area and return remaining number of bytes + * (on failure. Usually it's 0.) + */ +extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); + +#define clear_user(addr,n) ({ \ +void * __cl_addr = (addr); \ +unsigned long __cl_size = (n); \ +if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ +__cl_size = __clear_user(__cl_addr, __cl_size); \ +__cl_size; }) + +static __inline__ int +__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) +{ + __kernel_size_t res; + unsigned long __dummy, _d, _s, _c; + + __asm__ __volatile__( + "9:\n" + "mov.b @%2+, %1\n\t" + "cmp/eq #0, %1\n\t" + "bt/s 2f\n" + "1:\n" + "mov.b %1, @%3\n\t" + "dt %4\n\t" + "bf/s 9b\n\t" + " add #1, %3\n\t" + "2:\n\t" + "sub %4, %0\n" + "3:\n" + ".section .fixup,\"ax\"\n" + "4:\n\t" + "mov.l 5f, %1\n\t" + "jmp @%1\n\t" + " mov %9, %0\n\t" + ".balign 4\n" + "5: .long 3b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .balign 4\n" + " .long 9b,4b\n" + ".previous" + : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c) + : "0" (__count), "2" (__src), "3" (__dest), "4" (__count), + "i" (-EFAULT) + : "memory", "t"); + + return res; +} + +/** + * strncpy_from_user: - Copy a NUL terminated string from userspace. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +#define strncpy_from_user(dest,src,count) ({ \ +unsigned long __sfu_src = (unsigned long) (src); \ +int __sfu_count = (int) (count); \ +long __sfu_res = -EFAULT; \ +if(__access_ok(__sfu_src, __sfu_count)) { \ +__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ +} __sfu_res; }) + +/* + * Return the size of a string (including the ending 0 even when we have + * exceeded the maximum string length). + */ +static __inline__ long __strnlen_user(const char __user *__s, long __n) +{ + unsigned long res; + unsigned long __dummy; + + __asm__ __volatile__( + "1:\t" + "mov.b @(%0,%3), %1\n\t" + "cmp/eq %4, %0\n\t" + "bt/s 2f\n\t" + " add #1, %0\n\t" + "tst %1, %1\n\t" + "bf 1b\n\t" + "2:\n" + ".section .fixup,\"ax\"\n" + "3:\n\t" + "mov.l 4f, %1\n\t" + "jmp @%1\n\t" + " mov #0, %0\n" + ".balign 4\n" + "4: .long 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .balign 4\n" + " .long 1b,3b\n" + ".previous" + : "=z" (res), "=&r" (__dummy) + : "0" (0), "r" (__s), "r" (__n) + : "t"); + return res; +} + +/** + * strnlen_user: - Get the size of a string in user space. + * @s: The string to measure. + * @n: The maximum valid length + * + * Context: User context only. This function may sleep. + * + * Get the size of a NUL-terminated string in user space. + * + * Returns the size of the string INCLUDING the terminating NUL. + * On exception, returns 0. + * If the string is too long, returns a value greater than @n. + */ +static __inline__ long strnlen_user(const char __user *s, long n) +{ + if (!__addr_ok(s)) + return 0; + else + return __strnlen_user(s, n); +} + +/** + * strlen_user: - Get the size of a string in user space. + * @str: The string to measure. + * + * Context: User context only. This function may sleep. + * + * Get the size of a NUL-terminated string in user space. + * + * Returns the size of the string INCLUDING the terminating NUL. + * On exception, returns 0. + * + * If there is a limit on the length of a valid string, you may wish to + * consider using strnlen_user() instead. + */ +#define strlen_user(str) strnlen_user(str, ~0UL >> 1) + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +extern int fixup_exception(struct pt_regs *regs); + +#endif /* __ASM_SH_UACCESS_H */ diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh/uaccess_64.h index 644c67b65f94..d54ec082d25a 100644 --- a/include/asm-sh64/uaccess.h +++ b/include/asm-sh/uaccess_64.h @@ -1,12 +1,8 @@ -#ifndef __ASM_SH64_UACCESS_H -#define __ASM_SH64_UACCESS_H +#ifndef __ASM_SH_UACCESS_64_H +#define __ASM_SH_UACCESS_64_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/uaccess.h + * include/asm-sh/uaccess_64.h * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003, 2004 Paul Mundt @@ -20,8 +16,10 @@ * Copyright (C) 1996, 1997, 1998 by Ralf Baechle * and i386 version. * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ - #include <linux/errno.h> #include <linux/sched.h> @@ -297,20 +295,8 @@ struct exception_table_entry #define ARCH_HAS_SEARCH_EXTABLE -/* If gcc inlines memset, it will use st.q instructions. Therefore, we need - kmalloc allocations to be 8-byte aligned. Without this, the alignment - becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on - sh64 at the moment). */ -#define ARCH_KMALLOC_MINALIGN 8 - -/* - * We want 8-byte alignment for the slab caches as well, otherwise we have - * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). - */ -#define ARCH_SLAB_MINALIGN 8 - /* Returns 0 if exception not found and fixup.unit otherwise. */ extern unsigned long search_exception_table(unsigned long addr); extern const struct exception_table_entry *search_exception_tables (unsigned long addr); -#endif /* __ASM_SH64_UACCESS_H */ +#endif /* __ASM_SH_UACCESS_64_H */ diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h index b182b1cb05fd..4b21f369c28c 100644 --- a/include/asm-sh/unistd.h +++ b/include/asm-sh/unistd.h @@ -1,376 +1,5 @@ -#ifndef __ASM_SH_UNISTD_H -#define __ASM_SH_UNISTD_H - -/* - * Copyright (C) 1999 Niibe Yutaka - */ - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_lchown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl 110 -#define __NR_vhangup 111 -#define __NR_idle 112 -#define __NR_vm86old 113 -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_modify_ldt 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_vm86 166 -#define __NR_query_module 167 -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_chown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 -#define __NR_streams1 188 /* some people actually want it */ -#define __NR_streams2 189 /* some people actually want it */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_lchown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_chown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -#define __NR_mincore 218 -#define __NR_madvise 219 -#define __NR_getdents64 220 -#define __NR_fcntl64 221 -/* 223 is unused */ -#define __NR_gettid 224 -#define __NR_readahead 225 -#define __NR_setxattr 226 -#define __NR_lsetxattr 227 -#define __NR_fsetxattr 228 -#define __NR_getxattr 229 -#define __NR_lgetxattr 230 -#define __NR_fgetxattr 231 -#define __NR_listxattr 232 -#define __NR_llistxattr 233 -#define __NR_flistxattr 234 -#define __NR_removexattr 235 -#define __NR_lremovexattr 236 -#define __NR_fremovexattr 237 -#define __NR_tkill 238 -#define __NR_sendfile64 239 -#define __NR_futex 240 -#define __NR_sched_setaffinity 241 -#define __NR_sched_getaffinity 242 -#define __NR_set_thread_area 243 -#define __NR_get_thread_area 244 -#define __NR_io_setup 245 -#define __NR_io_destroy 246 -#define __NR_io_getevents 247 -#define __NR_io_submit 248 -#define __NR_io_cancel 249 -#define __NR_fadvise64 250 - -#define __NR_exit_group 252 -#define __NR_lookup_dcookie 253 -#define __NR_epoll_create 254 -#define __NR_epoll_ctl 255 -#define __NR_epoll_wait 256 -#define __NR_remap_file_pages 257 -#define __NR_set_tid_address 258 -#define __NR_timer_create 259 -#define __NR_timer_settime (__NR_timer_create+1) -#define __NR_timer_gettime (__NR_timer_create+2) -#define __NR_timer_getoverrun (__NR_timer_create+3) -#define __NR_timer_delete (__NR_timer_create+4) -#define __NR_clock_settime (__NR_timer_create+5) -#define __NR_clock_gettime (__NR_timer_create+6) -#define __NR_clock_getres (__NR_timer_create+7) -#define __NR_clock_nanosleep (__NR_timer_create+8) -#define __NR_statfs64 268 -#define __NR_fstatfs64 269 -#define __NR_tgkill 270 -#define __NR_utimes 271 -#define __NR_fadvise64_64 272 -#define __NR_vserver 273 -#define __NR_mbind 274 -#define __NR_get_mempolicy 275 -#define __NR_set_mempolicy 276 -#define __NR_mq_open 277 -#define __NR_mq_unlink (__NR_mq_open+1) -#define __NR_mq_timedsend (__NR_mq_open+2) -#define __NR_mq_timedreceive (__NR_mq_open+3) -#define __NR_mq_notify (__NR_mq_open+4) -#define __NR_mq_getsetattr (__NR_mq_open+5) -#define __NR_kexec_load 283 -#define __NR_waitid 284 -#define __NR_add_key 285 -#define __NR_request_key 286 -#define __NR_keyctl 287 -#define __NR_ioprio_set 288 -#define __NR_ioprio_get 289 -#define __NR_inotify_init 290 -#define __NR_inotify_add_watch 291 -#define __NR_inotify_rm_watch 292 -/* 293 is unused */ -#define __NR_migrate_pages 294 -#define __NR_openat 295 -#define __NR_mkdirat 296 -#define __NR_mknodat 297 -#define __NR_fchownat 298 -#define __NR_futimesat 299 -#define __NR_fstatat64 300 -#define __NR_unlinkat 301 -#define __NR_renameat 302 -#define __NR_linkat 303 -#define __NR_symlinkat 304 -#define __NR_readlinkat 305 -#define __NR_fchmodat 306 -#define __NR_faccessat 307 -#define __NR_pselect6 308 -#define __NR_ppoll 309 -#define __NR_unshare 310 -#define __NR_set_robust_list 311 -#define __NR_get_robust_list 312 -#define __NR_splice 313 -#define __NR_sync_file_range 314 -#define __NR_tee 315 -#define __NR_vmsplice 316 -#define __NR_move_pages 317 -#define __NR_getcpu 318 -#define __NR_epoll_pwait 319 -#define __NR_utimensat 320 -#define __NR_signalfd 321 -#define __NR_timerfd 322 -#define __NR_eventfd 323 -#define __NR_fallocate 324 - -#define NR_syscalls 325 - -#ifdef __KERNEL__ - -#define __ARCH_WANT_IPC_PARSE_VERSION -#define __ARCH_WANT_OLD_READDIR -#define __ARCH_WANT_OLD_STAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_GETHOSTNAME -#define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK -#define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME -#define __ARCH_WANT_SYS_WAITPID -#define __ARCH_WANT_SYS_SOCKETCALL -#define __ARCH_WANT_SYS_FADVISE64 -#define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK -#define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLD_GETRLIMIT -#define __ARCH_WANT_SYS_OLDUMOUNT -#define __ARCH_WANT_SYS_SIGPENDING -#define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND - -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#ifndef cond_syscall -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") +#ifdef CONFIG_SUPERH32 +# include "unistd_32.h" +#else +# include "unistd_64.h" #endif - -#endif /* __KERNEL__ */ -#endif /* __ASM_SH_UNISTD_H */ diff --git a/include/asm-sh/unistd_32.h b/include/asm-sh/unistd_32.h new file mode 100644 index 000000000000..b182b1cb05fd --- /dev/null +++ b/include/asm-sh/unistd_32.h @@ -0,0 +1,376 @@ +#ifndef __ASM_SH_UNISTD_H +#define __ASM_SH_UNISTD_H + +/* + * Copyright (C) 1999 Niibe Yutaka + */ + +/* + * This file contains the system call numbers. + */ + +#define __NR_restart_syscall 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_lchown 16 +#define __NR_break 17 +#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +#define __NR_stty 31 +#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 +#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_select 82 +#define __NR_symlink 83 +#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +#define __NR_olduname 109 +#define __NR_iopl 110 +#define __NR_vhangup 111 +#define __NR_idle 112 +#define __NR_vm86old 113 +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +#define __NR_modify_ldt 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 +#define __NR_mlock 150 +#define __NR_munlock 151 +#define __NR_mlockall 152 +#define __NR_munlockall 153 +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 +#define __NR_mremap 163 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +#define __NR_vm86 166 +#define __NR_query_module 167 +#define __NR_poll 168 +#define __NR_nfsservctl 169 +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_prctl 172 +#define __NR_rt_sigreturn 173 +#define __NR_rt_sigaction 174 +#define __NR_rt_sigprocmask 175 +#define __NR_rt_sigpending 176 +#define __NR_rt_sigtimedwait 177 +#define __NR_rt_sigqueueinfo 178 +#define __NR_rt_sigsuspend 179 +#define __NR_pread64 180 +#define __NR_pwrite64 181 +#define __NR_chown 182 +#define __NR_getcwd 183 +#define __NR_capget 184 +#define __NR_capset 185 +#define __NR_sigaltstack 186 +#define __NR_sendfile 187 +#define __NR_streams1 188 /* some people actually want it */ +#define __NR_streams2 189 /* some people actually want it */ +#define __NR_vfork 190 +#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#define __NR_lchown32 198 +#define __NR_getuid32 199 +#define __NR_getgid32 200 +#define __NR_geteuid32 201 +#define __NR_getegid32 202 +#define __NR_setreuid32 203 +#define __NR_setregid32 204 +#define __NR_getgroups32 205 +#define __NR_setgroups32 206 +#define __NR_fchown32 207 +#define __NR_setresuid32 208 +#define __NR_getresuid32 209 +#define __NR_setresgid32 210 +#define __NR_getresgid32 211 +#define __NR_chown32 212 +#define __NR_setuid32 213 +#define __NR_setgid32 214 +#define __NR_setfsuid32 215 +#define __NR_setfsgid32 216 +#define __NR_pivot_root 217 +#define __NR_mincore 218 +#define __NR_madvise 219 +#define __NR_getdents64 220 +#define __NR_fcntl64 221 +/* 223 is unused */ +#define __NR_gettid 224 +#define __NR_readahead 225 +#define __NR_setxattr 226 +#define __NR_lsetxattr 227 +#define __NR_fsetxattr 228 +#define __NR_getxattr 229 +#define __NR_lgetxattr 230 +#define __NR_fgetxattr 231 +#define __NR_listxattr 232 +#define __NR_llistxattr 233 +#define __NR_flistxattr 234 +#define __NR_removexattr 235 +#define __NR_lremovexattr 236 +#define __NR_fremovexattr 237 +#define __NR_tkill 238 +#define __NR_sendfile64 239 +#define __NR_futex 240 +#define __NR_sched_setaffinity 241 +#define __NR_sched_getaffinity 242 +#define __NR_set_thread_area 243 +#define __NR_get_thread_area 244 +#define __NR_io_setup 245 +#define __NR_io_destroy 246 +#define __NR_io_getevents 247 +#define __NR_io_submit 248 +#define __NR_io_cancel 249 +#define __NR_fadvise64 250 + +#define __NR_exit_group 252 +#define __NR_lookup_dcookie 253 +#define __NR_epoll_create 254 +#define __NR_epoll_ctl 255 +#define __NR_epoll_wait 256 +#define __NR_remap_file_pages 257 +#define __NR_set_tid_address 258 +#define __NR_timer_create 259 +#define __NR_timer_settime (__NR_timer_create+1) +#define __NR_timer_gettime (__NR_timer_create+2) +#define __NR_timer_getoverrun (__NR_timer_create+3) +#define __NR_timer_delete (__NR_timer_create+4) +#define __NR_clock_settime (__NR_timer_create+5) +#define __NR_clock_gettime (__NR_timer_create+6) +#define __NR_clock_getres (__NR_timer_create+7) +#define __NR_clock_nanosleep (__NR_timer_create+8) +#define __NR_statfs64 268 +#define __NR_fstatfs64 269 +#define __NR_tgkill 270 +#define __NR_utimes 271 +#define __NR_fadvise64_64 272 +#define __NR_vserver 273 +#define __NR_mbind 274 +#define __NR_get_mempolicy 275 +#define __NR_set_mempolicy 276 +#define __NR_mq_open 277 +#define __NR_mq_unlink (__NR_mq_open+1) +#define __NR_mq_timedsend (__NR_mq_open+2) +#define __NR_mq_timedreceive (__NR_mq_open+3) +#define __NR_mq_notify (__NR_mq_open+4) +#define __NR_mq_getsetattr (__NR_mq_open+5) +#define __NR_kexec_load 283 +#define __NR_waitid 284 +#define __NR_add_key 285 +#define __NR_request_key 286 +#define __NR_keyctl 287 +#define __NR_ioprio_set 288 +#define __NR_ioprio_get 289 +#define __NR_inotify_init 290 +#define __NR_inotify_add_watch 291 +#define __NR_inotify_rm_watch 292 +/* 293 is unused */ +#define __NR_migrate_pages 294 +#define __NR_openat 295 +#define __NR_mkdirat 296 +#define __NR_mknodat 297 +#define __NR_fchownat 298 +#define __NR_futimesat 299 +#define __NR_fstatat64 300 +#define __NR_unlinkat 301 +#define __NR_renameat 302 +#define __NR_linkat 303 +#define __NR_symlinkat 304 +#define __NR_readlinkat 305 +#define __NR_fchmodat 306 +#define __NR_faccessat 307 +#define __NR_pselect6 308 +#define __NR_ppoll 309 +#define __NR_unshare 310 +#define __NR_set_robust_list 311 +#define __NR_get_robust_list 312 +#define __NR_splice 313 +#define __NR_sync_file_range 314 +#define __NR_tee 315 +#define __NR_vmsplice 316 +#define __NR_move_pages 317 +#define __NR_getcpu 318 +#define __NR_epoll_pwait 319 +#define __NR_utimensat 320 +#define __NR_signalfd 321 +#define __NR_timerfd 322 +#define __NR_eventfd 323 +#define __NR_fallocate 324 + +#define NR_syscalls 325 + +#ifdef __KERNEL__ + +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#ifndef cond_syscall +#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") +#endif + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_UNISTD_H */ diff --git a/include/asm-sh64/unistd.h b/include/asm-sh/unistd_64.h index 1a5197f369b2..944511882cac 100644 --- a/include/asm-sh64/unistd.h +++ b/include/asm-sh/unistd_64.h @@ -1,21 +1,19 @@ -#ifndef __ASM_SH64_UNISTD_H -#define __ASM_SH64_UNISTD_H +#ifndef __ASM_SH_UNISTD_64_H +#define __ASM_SH_UNISTD_64_H /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * include/asm-sh/unistd_64.h * - * include/asm-sh64/unistd.h + * This file contains the system call numbers. * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 - 2007 Paul Mundt * Copyright (C) 2004 Sean McGoogan * - * This file contains the system call numbers. - * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ - #define __NR_restart_syscall 0 #define __NR_exit 1 #define __NR_fork 2 @@ -414,4 +412,4 @@ #endif #endif /* __KERNEL__ */ -#endif /* __ASM_SH64_UNISTD_H */ +#endif /* __ASM_SH_UNISTD_64_H */ diff --git a/include/asm-sh/user.h b/include/asm-sh/user.h index d1b8511d9d9f..1a4f43c75126 100644 --- a/include/asm-sh/user.h +++ b/include/asm-sh/user.h @@ -27,12 +27,19 @@ * to write an integer number of pages. */ +#if defined(__SH5__) || defined(CONFIG_CPU_SH5) +struct user_fpu_struct { + unsigned long fp_regs[32]; + unsigned int fpscr; +}; +#else struct user_fpu_struct { unsigned long fp_regs[16]; unsigned long xfp_regs[16]; unsigned long fpscr; unsigned long fpul; }; +#endif struct user { struct pt_regs regs; /* entire machine state */ diff --git a/include/asm-sh/voyagergx.h b/include/asm-sh/voyagergx.h deleted file mode 100644 index d825596562df..000000000000 --- a/include/asm-sh/voyagergx.h +++ /dev/null @@ -1,341 +0,0 @@ -/* -------------------------------------------------------------------- */ -/* voyagergx.h */ -/* -------------------------------------------------------------------- */ -/* This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - - Copyright 2003 (c) Lineo uSolutions,Inc. -*/ -/* -------------------------------------------------------------------- */ - -#ifndef _VOYAGER_GX_REG_H -#define _VOYAGER_GX_REG_H - -#define VOYAGER_BASE 0xb3e00000 -#define VOYAGER_USBH_BASE (0x40000 + VOYAGER_BASE) -#define VOYAGER_UART_BASE (0x30000 + VOYAGER_BASE) -#define VOYAGER_AC97_BASE (0xa0000 + VOYAGER_BASE) - -#define VOYAGER_IRQ_NUM 26 -#define VOYAGER_IRQ_BASE 200 - -#define IRQ_SM501_UP (VOYAGER_IRQ_BASE + 0) -#define IRQ_SM501_G54 (VOYAGER_IRQ_BASE + 1) -#define IRQ_SM501_G53 (VOYAGER_IRQ_BASE + 2) -#define IRQ_SM501_G52 (VOYAGER_IRQ_BASE + 3) -#define IRQ_SM501_G51 (VOYAGER_IRQ_BASE + 4) -#define IRQ_SM501_G50 (VOYAGER_IRQ_BASE + 5) -#define IRQ_SM501_G49 (VOYAGER_IRQ_BASE + 6) -#define IRQ_SM501_G48 (VOYAGER_IRQ_BASE + 7) -#define IRQ_SM501_I2C (VOYAGER_IRQ_BASE + 8) -#define IRQ_SM501_PW (VOYAGER_IRQ_BASE + 9) -#define IRQ_SM501_DMA (VOYAGER_IRQ_BASE + 10) -#define IRQ_SM501_PCI (VOYAGER_IRQ_BASE + 11) -#define IRQ_SM501_I2S (VOYAGER_IRQ_BASE + 12) -#define IRQ_SM501_AC (VOYAGER_IRQ_BASE + 13) -#define IRQ_SM501_US (VOYAGER_IRQ_BASE + 14) -#define IRQ_SM501_U1 (VOYAGER_IRQ_BASE + 15) -#define IRQ_SM501_U0 (VOYAGER_IRQ_BASE + 16) -#define IRQ_SM501_CV (VOYAGER_IRQ_BASE + 17) -#define IRQ_SM501_MC (VOYAGER_IRQ_BASE + 18) -#define IRQ_SM501_S1 (VOYAGER_IRQ_BASE + 19) -#define IRQ_SM501_S0 (VOYAGER_IRQ_BASE + 20) -#define IRQ_SM501_UH (VOYAGER_IRQ_BASE + 21) -#define IRQ_SM501_2D (VOYAGER_IRQ_BASE + 22) -#define IRQ_SM501_ZD (VOYAGER_IRQ_BASE + 23) -#define IRQ_SM501_PV (VOYAGER_IRQ_BASE + 24) -#define IRQ_SM501_CI (VOYAGER_IRQ_BASE + 25) - -/* ----- MISC controle register ------------------------------ */ -#define MISC_CTRL (0x000004 + VOYAGER_BASE) -#define MISC_CTRL_USBCLK_48 (3 << 28) -#define MISC_CTRL_USBCLK_96 (2 << 28) -#define MISC_CTRL_USBCLK_CRYSTAL (1 << 28) - -/* ----- GPIO[31:0] register --------------------------------- */ -#define GPIO_MUX_LOW (0x000008 + VOYAGER_BASE) -#define GPIO_MUX_LOW_AC97 0x1F000000 -#define GPIO_MUX_LOW_8051 0x0000ffff -#define GPIO_MUX_LOW_PWM (1 << 29) - -/* ----- GPIO[63:32] register --------------------------------- */ -#define GPIO_MUX_HIGH (0x00000C + VOYAGER_BASE) - -/* ----- DRAM controle register ------------------------------- */ -#define DRAM_CTRL (0x000010 + VOYAGER_BASE) -#define DRAM_CTRL_EMBEDDED (1 << 31) -#define DRAM_CTRL_CPU_BURST_1 (0 << 28) -#define DRAM_CTRL_CPU_BURST_2 (1 << 28) -#define DRAM_CTRL_CPU_BURST_4 (2 << 28) -#define DRAM_CTRL_CPU_BURST_8 (3 << 28) -#define DRAM_CTRL_CPU_CAS_LATENCY (1 << 27) -#define DRAM_CTRL_CPU_SIZE_2 (0 << 24) -#define DRAM_CTRL_CPU_SIZE_4 (1 << 24) -#define DRAM_CTRL_CPU_SIZE_64 (4 << 24) -#define DRAM_CTRL_CPU_SIZE_32 (5 << 24) -#define DRAM_CTRL_CPU_SIZE_16 (6 << 24) -#define DRAM_CTRL_CPU_SIZE_8 (7 << 24) -#define DRAM_CTRL_CPU_COLUMN_SIZE_1024 (0 << 22) -#define DRAM_CTRL_CPU_COLUMN_SIZE_512 (2 << 22) -#define DRAM_CTRL_CPU_COLUMN_SIZE_256 (3 << 22) -#define DRAM_CTRL_CPU_ACTIVE_PRECHARGE (1 << 21) -#define DRAM_CTRL_CPU_RESET (1 << 20) -#define DRAM_CTRL_CPU_BANKS (1 << 19) -#define DRAM_CTRL_CPU_WRITE_PRECHARGE (1 << 18) -#define DRAM_CTRL_BLOCK_WRITE (1 << 17) -#define DRAM_CTRL_REFRESH_COMMAND (1 << 16) -#define DRAM_CTRL_SIZE_4 (0 << 13) -#define DRAM_CTRL_SIZE_8 (1 << 13) -#define DRAM_CTRL_SIZE_16 (2 << 13) -#define DRAM_CTRL_SIZE_32 (3 << 13) -#define DRAM_CTRL_SIZE_64 (4 << 13) -#define DRAM_CTRL_SIZE_2 (5 << 13) -#define DRAM_CTRL_COLUMN_SIZE_256 (0 << 11) -#define DRAM_CTRL_COLUMN_SIZE_512 (2 << 11) -#define DRAM_CTRL_COLUMN_SIZE_1024 (3 << 11) -#define DRAM_CTRL_BLOCK_WRITE_TIME (1 << 10) -#define DRAM_CTRL_BLOCK_WRITE_PRECHARGE (1 << 9) -#define DRAM_CTRL_ACTIVE_PRECHARGE (1 << 8) -#define DRAM_CTRL_RESET (1 << 7) -#define DRAM_CTRL_REMAIN_ACTIVE (1 << 6) -#define DRAM_CTRL_BANKS (1 << 1) -#define DRAM_CTRL_WRITE_PRECHARGE (1 << 0) - -/* ----- Arvitration control register -------------------------- */ -#define ARBITRATION_CTRL (0x000014 + VOYAGER_BASE) -#define ARBITRATION_CTRL_CPUMEM (1 << 29) -#define ARBITRATION_CTRL_INTMEM (1 << 28) -#define ARBITRATION_CTRL_USB_OFF (0 << 24) -#define ARBITRATION_CTRL_USB_PRIORITY_1 (1 << 24) -#define ARBITRATION_CTRL_USB_PRIORITY_2 (2 << 24) -#define ARBITRATION_CTRL_USB_PRIORITY_3 (3 << 24) -#define ARBITRATION_CTRL_USB_PRIORITY_4 (4 << 24) -#define ARBITRATION_CTRL_USB_PRIORITY_5 (5 << 24) -#define ARBITRATION_CTRL_USB_PRIORITY_6 (6 << 24) -#define ARBITRATION_CTRL_USB_PRIORITY_7 (7 << 24) -#define ARBITRATION_CTRL_PANEL_OFF (0 << 20) -#define ARBITRATION_CTRL_PANEL_PRIORITY_1 (1 << 20) -#define ARBITRATION_CTRL_PANEL_PRIORITY_2 (2 << 20) -#define ARBITRATION_CTRL_PANEL_PRIORITY_3 (3 << 20) -#define ARBITRATION_CTRL_PANEL_PRIORITY_4 (4 << 20) -#define ARBITRATION_CTRL_PANEL_PRIORITY_5 (5 << 20) -#define ARBITRATION_CTRL_PANEL_PRIORITY_6 (6 << 20) -#define ARBITRATION_CTRL_PANEL_PRIORITY_7 (7 << 20) -#define ARBITRATION_CTRL_ZVPORT_OFF (0 << 16) -#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_1 (1 << 16) -#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_2 (2 << 16) -#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_3 (3 << 16) -#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_4 (4 << 16) -#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_5 (5 << 16) -#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_6 (6 << 16) -#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_7 (7 << 16) -#define ARBITRATION_CTRL_CMD_INTPR_OFF (0 << 12) -#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_1 (1 << 12) -#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_2 (2 << 12) -#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_3 (3 << 12) -#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_4 (4 << 12) -#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_5 (5 << 12) -#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_6 (6 << 12) -#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_7 (7 << 12) -#define ARBITRATION_CTRL_DMA_OFF (0 << 8) -#define ARBITRATION_CTRL_DMA_PRIORITY_1 (1 << 8) -#define ARBITRATION_CTRL_DMA_PRIORITY_2 (2 << 8) -#define ARBITRATION_CTRL_DMA_PRIORITY_3 (3 << 8) -#define ARBITRATION_CTRL_DMA_PRIORITY_4 (4 << 8) -#define ARBITRATION_CTRL_DMA_PRIORITY_5 (5 << 8) -#define ARBITRATION_CTRL_DMA_PRIORITY_6 (6 << 8) -#define ARBITRATION_CTRL_DMA_PRIORITY_7 (7 << 8) -#define ARBITRATION_CTRL_VIDEO_OFF (0 << 4) -#define ARBITRATION_CTRL_VIDEO_PRIORITY_1 (1 << 4) -#define ARBITRATION_CTRL_VIDEO_PRIORITY_2 (2 << 4) -#define ARBITRATION_CTRL_VIDEO_PRIORITY_3 (3 << 4) -#define ARBITRATION_CTRL_VIDEO_PRIORITY_4 (4 << 4) -#define ARBITRATION_CTRL_VIDEO_PRIORITY_5 (5 << 4) -#define ARBITRATION_CTRL_VIDEO_PRIORITY_6 (6 << 4) -#define ARBITRATION_CTRL_VIDEO_PRIORITY_7 (7 << 4) -#define ARBITRATION_CTRL_CRT_OFF (0 << 0) -#define ARBITRATION_CTRL_CRT_PRIORITY_1 (1 << 0) -#define ARBITRATION_CTRL_CRT_PRIORITY_2 (2 << 0) -#define ARBITRATION_CTRL_CRT_PRIORITY_3 (3 << 0) -#define ARBITRATION_CTRL_CRT_PRIORITY_4 (4 << 0) -#define ARBITRATION_CTRL_CRT_PRIORITY_5 (5 << 0) -#define ARBITRATION_CTRL_CRT_PRIORITY_6 (6 << 0) -#define ARBITRATION_CTRL_CRT_PRIORITY_7 (7 << 0) - -/* ----- Command list status register -------------------------- */ -#define CMD_INTPR_STATUS (0x000024 + VOYAGER_BASE) - -/* ----- Interrupt status register ----------------------------- */ -#define INT_STATUS (0x00002c + VOYAGER_BASE) -#define INT_STATUS_UH (1 << 6) -#define INT_STATUS_MC (1 << 10) -#define INT_STATUS_U0 (1 << 12) -#define INT_STATUS_U1 (1 << 13) -#define INT_STATUS_AC (1 << 17) - -/* ----- Interrupt mask register ------------------------------ */ -#define VOYAGER_INT_MASK (0x000030 + VOYAGER_BASE) -#define VOYAGER_INT_MASK_AC (1 << 17) - -/* ----- Current Gate register ---------------------------------*/ -#define CURRENT_GATE (0x000038 + VOYAGER_BASE) - -/* ----- Power mode 0 gate register --------------------------- */ -#define POWER_MODE0_GATE (0x000040 + VOYAGER_BASE) -#define POWER_MODE0_GATE_G (1 << 6) -#define POWER_MODE0_GATE_U0 (1 << 7) -#define POWER_MODE0_GATE_U1 (1 << 8) -#define POWER_MODE0_GATE_UH (1 << 11) -#define POWER_MODE0_GATE_AC (1 << 18) - -/* ----- Power mode 1 gate register --------------------------- */ -#define POWER_MODE1_GATE (0x000048 + VOYAGER_BASE) -#define POWER_MODE1_GATE_G (1 << 6) -#define POWER_MODE1_GATE_U0 (1 << 7) -#define POWER_MODE1_GATE_U1 (1 << 8) -#define POWER_MODE1_GATE_UH (1 << 11) -#define POWER_MODE1_GATE_AC (1 << 18) - -/* ----- Power mode 0 clock register -------------------------- */ -#define POWER_MODE0_CLOCK (0x000044 + VOYAGER_BASE) - -/* ----- Power mode 1 clock register -------------------------- */ -#define POWER_MODE1_CLOCK (0x00004C + VOYAGER_BASE) - -/* ----- Power mode controll register ------------------------- */ -#define POWER_MODE_CTRL (0x000054 + VOYAGER_BASE) - -/* ----- Miscellaneous Timing register ------------------------ */ -#define SYSTEM_DRAM_CTRL (0x000068 + VOYAGER_BASE) - -/* ----- PWM register ------------------------------------------*/ -#define PWM_0 (0x010020 + VOYAGER_BASE) -#define PWM_0_HC(x) (((x)&0x0fff)<<20) -#define PWM_0_LC(x) (((x)&0x0fff)<<8 ) -#define PWM_0_CLK_DEV(x) (((x)&0x000f)<<4 ) -#define PWM_0_EN (1<<0) - -/* ----- I2C register ----------------------------------------- */ -#define I2C_BYTECOUNT (0x010040 + VOYAGER_BASE) -#define I2C_CONTROL (0x010041 + VOYAGER_BASE) -#define I2C_STATUS (0x010042 + VOYAGER_BASE) -#define I2C_RESET (0x010042 + VOYAGER_BASE) -#define I2C_SADDRESS (0x010043 + VOYAGER_BASE) -#define I2C_DATA (0x010044 + VOYAGER_BASE) - -/* ----- Controle register bits ----------------------------------------- */ -#define I2C_CONTROL_E (1 << 0) -#define I2C_CONTROL_MODE (1 << 1) -#define I2C_CONTROL_STATUS (1 << 2) -#define I2C_CONTROL_INT (1 << 4) -#define I2C_CONTROL_INTACK (1 << 5) -#define I2C_CONTROL_REPEAT (1 << 6) - -/* ----- Status register bits ----------------------------------------- */ -#define I2C_STATUS_BUSY (1 << 0) -#define I2C_STATUS_ACK (1 << 1) -#define I2C_STATUS_ERROR (1 << 2) -#define I2C_STATUS_COMPLETE (1 << 3) - -/* ----- Reset register ---------------------------------------------- */ -#define I2C_RESET_ERROR (1 << 2) - -/* ----- transmission frequencies ------------------------------------- */ -#define I2C_SADDRESS_SELECT (1 << 0) - -/* ----- Display Controll register ----------------------------------------- */ -#define PANEL_DISPLAY_CTRL (0x080000 + VOYAGER_BASE) -#define PANEL_DISPLAY_CTRL_BIAS (1<<26) -#define PANEL_PAN_CTRL (0x080004 + VOYAGER_BASE) -#define PANEL_COLOR_KEY (0x080008 + VOYAGER_BASE) -#define PANEL_FB_ADDRESS (0x08000C + VOYAGER_BASE) -#define PANEL_FB_WIDTH (0x080010 + VOYAGER_BASE) -#define PANEL_WINDOW_WIDTH (0x080014 + VOYAGER_BASE) -#define PANEL_WINDOW_HEIGHT (0x080018 + VOYAGER_BASE) -#define PANEL_PLANE_TL (0x08001C + VOYAGER_BASE) -#define PANEL_PLANE_BR (0x080020 + VOYAGER_BASE) -#define PANEL_HORIZONTAL_TOTAL (0x080024 + VOYAGER_BASE) -#define PANEL_HORIZONTAL_SYNC (0x080028 + VOYAGER_BASE) -#define PANEL_VERTICAL_TOTAL (0x08002C + VOYAGER_BASE) -#define PANEL_VERTICAL_SYNC (0x080030 + VOYAGER_BASE) -#define PANEL_CURRENT_LINE (0x080034 + VOYAGER_BASE) -#define VIDEO_DISPLAY_CTRL (0x080040 + VOYAGER_BASE) -#define VIDEO_FB_0_ADDRESS (0x080044 + VOYAGER_BASE) -#define VIDEO_FB_WIDTH (0x080048 + VOYAGER_BASE) -#define VIDEO_FB_0_LAST_ADDRESS (0x08004C + VOYAGER_BASE) -#define VIDEO_PLANE_TL (0x080050 + VOYAGER_BASE) -#define VIDEO_PLANE_BR (0x080054 + VOYAGER_BASE) -#define VIDEO_SCALE (0x080058 + VOYAGER_BASE) -#define VIDEO_INITIAL_SCALE (0x08005C + VOYAGER_BASE) -#define VIDEO_YUV_CONSTANTS (0x080060 + VOYAGER_BASE) -#define VIDEO_FB_1_ADDRESS (0x080064 + VOYAGER_BASE) -#define VIDEO_FB_1_LAST_ADDRESS (0x080068 + VOYAGER_BASE) -#define VIDEO_ALPHA_DISPLAY_CTRL (0x080080 + VOYAGER_BASE) -#define VIDEO_ALPHA_FB_ADDRESS (0x080084 + VOYAGER_BASE) -#define VIDEO_ALPHA_FB_WIDTH (0x080088 + VOYAGER_BASE) -#define VIDEO_ALPHA_FB_LAST_ADDRESS (0x08008C + VOYAGER_BASE) -#define VIDEO_ALPHA_PLANE_TL (0x080090 + VOYAGER_BASE) -#define VIDEO_ALPHA_PLANE_BR (0x080094 + VOYAGER_BASE) -#define VIDEO_ALPHA_SCALE (0x080098 + VOYAGER_BASE) -#define VIDEO_ALPHA_INITIAL_SCALE (0x08009C + VOYAGER_BASE) -#define VIDEO_ALPHA_CHROMA_KEY (0x0800A0 + VOYAGER_BASE) -#define PANEL_HWC_ADDRESS (0x0800F0 + VOYAGER_BASE) -#define PANEL_HWC_LOCATION (0x0800F4 + VOYAGER_BASE) -#define PANEL_HWC_COLOR_12 (0x0800F8 + VOYAGER_BASE) -#define PANEL_HWC_COLOR_3 (0x0800FC + VOYAGER_BASE) -#define ALPHA_DISPLAY_CTRL (0x080100 + VOYAGER_BASE) -#define ALPHA_FB_ADDRESS (0x080104 + VOYAGER_BASE) -#define ALPHA_FB_WIDTH (0x080108 + VOYAGER_BASE) -#define ALPHA_PLANE_TL (0x08010C + VOYAGER_BASE) -#define ALPHA_PLANE_BR (0x080110 + VOYAGER_BASE) -#define ALPHA_CHROMA_KEY (0x080114 + VOYAGER_BASE) -#define CRT_DISPLAY_CTRL (0x080200 + VOYAGER_BASE) -#define CRT_FB_ADDRESS (0x080204 + VOYAGER_BASE) -#define CRT_FB_WIDTH (0x080208 + VOYAGER_BASE) -#define CRT_HORIZONTAL_TOTAL (0x08020C + VOYAGER_BASE) -#define CRT_HORIZONTAL_SYNC (0x080210 + VOYAGER_BASE) -#define CRT_VERTICAL_TOTAL (0x080214 + VOYAGER_BASE) -#define CRT_VERTICAL_SYNC (0x080218 + VOYAGER_BASE) -#define CRT_SIGNATURE_ANALYZER (0x08021C + VOYAGER_BASE) -#define CRT_CURRENT_LINE (0x080220 + VOYAGER_BASE) -#define CRT_MONITOR_DETECT (0x080224 + VOYAGER_BASE) -#define CRT_HWC_ADDRESS (0x080230 + VOYAGER_BASE) -#define CRT_HWC_LOCATION (0x080234 + VOYAGER_BASE) -#define CRT_HWC_COLOR_12 (0x080238 + VOYAGER_BASE) -#define CRT_HWC_COLOR_3 (0x08023C + VOYAGER_BASE) -#define CRT_PALETTE_RAM (0x080400 + VOYAGER_BASE) -#define PANEL_PALETTE_RAM (0x080800 + VOYAGER_BASE) -#define VIDEO_PALETTE_RAM (0x080C00 + VOYAGER_BASE) - -/* ----- 8051 Controle register ----------------------------------------- */ -#define VOYAGER_8051_BASE (0x000c0000 + VOYAGER_BASE) -#define VOYAGER_8051_RESET (0x000b0000 + VOYAGER_BASE) -#define VOYAGER_8051_SELECT (0x000b0004 + VOYAGER_BASE) -#define VOYAGER_8051_CPU_INT (0x000b000c + VOYAGER_BASE) - -/* ----- AC97 Controle register ----------------------------------------- */ -#define AC97_TX_SLOT0 (0x00000000 + VOYAGER_AC97_BASE) -#define AC97_CONTROL_STATUS (0x00000080 + VOYAGER_AC97_BASE) -#define AC97C_READ (1 << 19) -#define AC97C_WD_BIT (1 << 2) -#define AC97C_INDEX_MASK 0x7f - -/* arch/sh/cchips/voyagergx/consistent.c */ -void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t); -int voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t); - -/* arch/sh/cchips/voyagergx/irq.c */ -void setup_voyagergx_irq(void); - -#endif /* _VOYAGER_GX_REG_H */ diff --git a/include/asm-sh64/Kbuild b/include/asm-sh64/Kbuild deleted file mode 100644 index c68e1680da01..000000000000 --- a/include/asm-sh64/Kbuild +++ /dev/null @@ -1 +0,0 @@ -include include/asm-generic/Kbuild.asm diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h deleted file mode 100644 index 237ee4e5b72a..000000000000 --- a/include/asm-sh64/a.out.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef __ASM_SH64_A_OUT_H -#define __ASM_SH64_A_OUT_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/a.out.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -struct exec -{ - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ - unsigned a_text; /* length of text, in bytes */ - unsigned a_data; /* length of data, in bytes */ - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ - unsigned a_syms; /* length of symbol table data in file, in bytes */ - unsigned a_entry; /* start address */ - unsigned a_trsize; /* length of relocation info for text, in bytes */ - unsigned a_drsize; /* length of relocation info for data, in bytes */ -}; - -#define N_TRSIZE(a) ((a).a_trsize) -#define N_DRSIZE(a) ((a).a_drsize) -#define N_SYMSIZE(a) ((a).a_syms) - -#ifdef __KERNEL__ - -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP - -#endif - -#endif /* __ASM_SH64_A_OUT_H */ diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h deleted file mode 100644 index 28f2ea9b567b..000000000000 --- a/include/asm-sh64/atomic.h +++ /dev/null @@ -1,158 +0,0 @@ -#ifndef __ASM_SH64_ATOMIC_H -#define __ASM_SH64_ATOMIC_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/atomic.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - */ - -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc.. - * - */ - -typedef struct { volatile int counter; } atomic_t; - -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) - -#define atomic_read(v) ((v)->counter) -#define atomic_set(v,i) ((v)->counter = (i)) - -#include <asm/system.h> - -/* - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - */ - -static __inline__ void atomic_add(int i, atomic_t * v) -{ - unsigned long flags; - - local_irq_save(flags); - *(long *)v += i; - local_irq_restore(flags); -} - -static __inline__ void atomic_sub(int i, atomic_t *v) -{ - unsigned long flags; - - local_irq_save(flags); - *(long *)v -= i; - local_irq_restore(flags); -} - -static __inline__ int atomic_add_return(int i, atomic_t * v) -{ - unsigned long temp, flags; - - local_irq_save(flags); - temp = *(long *)v; - temp += i; - *(long *)v = temp; - local_irq_restore(flags); - - return temp; -} - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -static __inline__ int atomic_sub_return(int i, atomic_t * v) -{ - unsigned long temp, flags; - - local_irq_save(flags); - temp = *(long *)v; - temp -= i; - *(long *)v = temp; - local_irq_restore(flags); - - return temp; -} - -#define atomic_dec_return(v) atomic_sub_return(1,(v)) -#define atomic_inc_return(v) atomic_add_return(1,(v)) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) - -#define atomic_inc(v) atomic_add(1,(v)) -#define atomic_dec(v) atomic_sub(1,(v)) - -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (likely(ret == old)) - v->counter = new; - local_irq_restore(flags); - - return ret; -} - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -static inline int atomic_add_unless(atomic_t *v, int a, int u) -{ - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (ret != u) - v->counter += a; - local_irq_restore(flags); - - return ret != u; -} -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - local_irq_save(flags); - *(long *)v &= ~mask; - local_irq_restore(flags); -} - -static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - local_irq_save(flags); - *(long *)v |= mask; - local_irq_restore(flags); -} - -/* Atomic operations are already serializing on SH */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - -#include <asm-generic/atomic.h> -#endif /* __ASM_SH64_ATOMIC_H */ diff --git a/include/asm-sh64/auxvec.h b/include/asm-sh64/auxvec.h deleted file mode 100644 index 1ad5a44bdc76..000000000000 --- a/include/asm-sh64/auxvec.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __ASM_SH64_AUXVEC_H -#define __ASM_SH64_AUXVEC_H - -#endif /* __ASM_SH64_AUXVEC_H */ diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h deleted file mode 100644 index 600c59efb4c2..000000000000 --- a/include/asm-sh64/bitops.h +++ /dev/null @@ -1,155 +0,0 @@ -#ifndef __ASM_SH64_BITOPS_H -#define __ASM_SH64_BITOPS_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/bitops.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - */ - -#ifdef __KERNEL__ - -#ifndef _LINUX_BITOPS_H -#error only <linux/bitops.h> can be included directly -#endif - -#include <linux/compiler.h> -#include <asm/system.h> -/* For __swab32 */ -#include <asm/byteorder.h> - -static __inline__ void set_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a |= mask; - local_irq_restore(flags); -} - -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() -static inline void clear_bit(int nr, volatile unsigned long *a) -{ - int mask; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a &= ~mask; - local_irq_restore(flags); -} - -static __inline__ void change_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a ^= mask; - local_irq_restore(flags); -} - -static __inline__ int test_and_set_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a |= mask; - local_irq_restore(flags); - - return retval; -} - -static __inline__ int test_and_clear_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a &= ~mask; - local_irq_restore(flags); - - return retval; -} - -static __inline__ int test_and_change_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a ^= mask; - local_irq_restore(flags); - - return retval; -} - -#include <asm-generic/bitops/non-atomic.h> - -static __inline__ unsigned long ffz(unsigned long word) -{ - unsigned long result, __d2, __d3; - - __asm__("gettr tr0, %2\n\t" - "pta $+32, tr0\n\t" - "andi %1, 1, %3\n\t" - "beq %3, r63, tr0\n\t" - "pta $+4, tr0\n" - "0:\n\t" - "shlri.l %1, 1, %1\n\t" - "addi %0, 1, %0\n\t" - "andi %1, 1, %3\n\t" - "beqi %3, 1, tr0\n" - "1:\n\t" - "ptabs %2, tr0\n\t" - : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) - : "0" (0L), "1" (word)); - - return result; -} - -#include <asm-generic/bitops/__ffs.h> -#include <asm-generic/bitops/find.h> -#include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> -#include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/ffs.h> -#include <asm-generic/bitops/ext2-non-atomic.h> -#include <asm-generic/bitops/ext2-atomic.h> -#include <asm-generic/bitops/minix.h> -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/fls64.h> - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH64_BITOPS_H */ diff --git a/include/asm-sh64/bug.h b/include/asm-sh64/bug.h deleted file mode 100644 index f3a9c9248ef4..000000000000 --- a/include/asm-sh64/bug.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __ASM_SH64_BUG_H -#define __ASM_SH64_BUG_H - -#ifdef CONFIG_BUG -/* - * Tell the user there is some problem, then force a segfault (in process - * context) or a panic (interrupt context). - */ -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ - *(volatile int *)0 = 0; \ -} while (0) - -#define HAVE_ARCH_BUG -#endif - -#include <asm-generic/bug.h> - -#endif /* __ASM_SH64_BUG_H */ diff --git a/include/asm-sh64/bugs.h b/include/asm-sh64/bugs.h deleted file mode 100644 index 05554aaea672..000000000000 --- a/include/asm-sh64/bugs.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef __ASM_SH64_BUGS_H -#define __ASM_SH64_BUGS_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/bugs.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -/* - * I don't know of any Super-H bugs yet. - */ - -#include <asm/processor.h> - -static void __init check_bugs(void) -{ - extern char *get_cpu_subtype(void); - extern unsigned long loops_per_jiffy; - - cpu_data->loops_per_jiffy = loops_per_jiffy; - - printk("CPU: %s\n", get_cpu_subtype()); -} -#endif /* __ASM_SH64_BUGS_H */ diff --git a/include/asm-sh64/byteorder.h b/include/asm-sh64/byteorder.h deleted file mode 100644 index 7419d78820ee..000000000000 --- a/include/asm-sh64/byteorder.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef __ASM_SH64_BYTEORDER_H -#define __ASM_SH64_BYTEORDER_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/byteorder.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#include <asm/types.h> - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) -{ - __asm__("byterev %0, %0\n\t" - "shari %0, 32, %0" - : "=r" (x) - : "0" (x)); - return x; -} - -static inline __attribute_const__ __u16 ___arch__swab16(__u16 x) -{ - __asm__("byterev %0, %0\n\t" - "shari %0, 48, %0" - : "=r" (x) - : "0" (x)); - return x; -} - -#define __arch__swab32(x) ___arch__swab32(x) -#define __arch__swab16(x) ___arch__swab16(x) - -#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __BYTEORDER_HAS_U64__ -# define __SWAB_64_THRU_32__ -#endif - -#ifdef __LITTLE_ENDIAN__ -#include <linux/byteorder/little_endian.h> -#else -#include <linux/byteorder/big_endian.h> -#endif - -#endif /* __ASM_SH64_BYTEORDER_H */ diff --git a/include/asm-sh64/cayman.h b/include/asm-sh64/cayman.h deleted file mode 100644 index 7b6b96844842..000000000000 --- a/include/asm-sh64/cayman.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/cayman.h - * - * Cayman definitions - * - * Global defintions for the SH5 Cayman board - * - * Copyright (C) 2002 Stuart Menefy - */ - - -/* Setup for the SMSC FDC37C935 / LAN91C100FD */ -#define SMSC_IRQ IRQ_IRL1 - -/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */ -#define PCI2_IRQ IRQ_IRL3 diff --git a/include/asm-sh64/cpumask.h b/include/asm-sh64/cpumask.h deleted file mode 100644 index b7b105dbedaf..000000000000 --- a/include/asm-sh64/cpumask.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_CPUMASK_H -#define __ASM_SH64_CPUMASK_H - -#include <asm-generic/cpumask.h> - -#endif /* __ASM_SH64_CPUMASK_H */ diff --git a/include/asm-sh64/cputime.h b/include/asm-sh64/cputime.h deleted file mode 100644 index 0fd89da2aa86..000000000000 --- a/include/asm-sh64/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SH64_CPUTIME_H -#define __SH64_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __SH64_CPUTIME_H */ diff --git a/include/asm-sh64/current.h b/include/asm-sh64/current.h deleted file mode 100644 index 261224339d6f..000000000000 --- a/include/asm-sh64/current.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef __ASM_SH64_CURRENT_H -#define __ASM_SH64_CURRENT_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/current.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - */ - -#include <linux/thread_info.h> - -struct task_struct; - -static __inline__ struct task_struct * get_current(void) -{ - return current_thread_info()->task; -} - -#define current get_current() - -#endif /* __ASM_SH64_CURRENT_H */ - diff --git a/include/asm-sh64/delay.h b/include/asm-sh64/delay.h deleted file mode 100644 index 6ae31301a16a..000000000000 --- a/include/asm-sh64/delay.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __ASM_SH64_DELAY_H -#define __ASM_SH64_DELAY_H - -extern void __delay(int loops); -extern void __udelay(unsigned long long usecs, unsigned long lpj); -extern void __ndelay(unsigned long long nsecs, unsigned long lpj); -extern void udelay(unsigned long usecs); -extern void ndelay(unsigned long nsecs); - -#endif /* __ASM_SH64_DELAY_H */ - diff --git a/include/asm-sh64/device.h b/include/asm-sh64/device.h deleted file mode 100644 index d8f9872b0e2d..000000000000 --- a/include/asm-sh64/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include <asm-generic/device.h> - diff --git a/include/asm-sh64/div64.h b/include/asm-sh64/div64.h deleted file mode 100644 index f75869565e2e..000000000000 --- a/include/asm-sh64/div64.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_DIV64_H -#define __ASM_SH64_DIV64_H - -#include <asm-generic/div64.h> - -#endif /* __ASM_SH64_DIV64_H */ diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h deleted file mode 100644 index 18f8dd642ac5..000000000000 --- a/include/asm-sh64/dma-mapping.h +++ /dev/null @@ -1,194 +0,0 @@ -#ifndef __ASM_SH_DMA_MAPPING_H -#define __ASM_SH_DMA_MAPPING_H - -#include <linux/mm.h> -#include <linux/scatterlist.h> -#include <asm/io.h> - -struct pci_dev; -extern void *consistent_alloc(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle); -extern void consistent_free(struct pci_dev *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -#define dma_supported(dev, mask) (1) - -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - return consistent_alloc(NULL, size, dma_handle); -} - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - consistent_free(NULL, size, vaddr, dma_handle); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) - -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - unsigned long start = (unsigned long) vaddr; - unsigned long s = start & L1_CACHE_ALIGN_MASK; - unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK; - - for (; s <= e; s += L1_CACHE_BYTES) - asm volatile ("ocbp %0, 0" : : "r" (s)); -} - -static inline dma_addr_t dma_map_single(struct device *dev, - void *ptr, size_t size, - enum dma_data_direction dir) -{ -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return virt_to_phys(ptr); -#endif - dma_cache_sync(dev, ptr, size, dir); - - return virt_to_phys(ptr); -} - -#define dma_unmap_single(dev, addr, size, dir) do { } while (0) - -static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); -#endif - sg[i].dma_address = sg_phys(&sg[i]); - } - - return nents; -} - -#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - return dma_map_single(dev, page_address(page) + offset, size, dir); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction dir) -{ - dma_unmap_single(dev, dma_address, size, dir); -} - -static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; -#endif - dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); -} - -static inline void dma_sync_single_range(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; -#endif - dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); -} - -static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nelems; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); -#endif - sg[i].dma_address = sg_phys(&sg[i]); - } -} - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) -{ - dma_sync_single(dev, dma_handle, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) -{ - dma_sync_single(dev, dma_handle, size, dir); -} - -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); -} - -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - dma_sync_sg(dev, sg, nelems, dir); -} - -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - dma_sync_sg(dev, sg, nelems, dir); -} - -static inline int dma_get_cache_alignment(void) -{ - /* - * Each processor family will define its own L1_CACHE_SHIFT, - * L1_CACHE_BYTES wraps to this, so this is always safe. - */ - return L1_CACHE_BYTES; -} - -static inline int dma_mapping_error(dma_addr_t dma_addr) -{ - return dma_addr == 0; -} - -#endif /* __ASM_SH_DMA_MAPPING_H */ - diff --git a/include/asm-sh64/dma.h b/include/asm-sh64/dma.h deleted file mode 100644 index e701f39470a2..000000000000 --- a/include/asm-sh64/dma.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef __ASM_SH64_DMA_H -#define __ASM_SH64_DMA_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/dma.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - */ - -#include <linux/mm.h> -#include <asm/io.h> -#include <asm/pgtable.h> - -#define MAX_DMA_CHANNELS 4 - -/* - * SH5 can DMA in any memory area. - * - * The static definition is dodgy because it should limit - * the highest DMA-able address based on the actual - * Physical memory available. This is actually performed - * at run time in defining the memory allowed to DMA_ZONE. - */ -#define MAX_DMA_ADDRESS ~(NPHYS_MASK) - -#define DMA_MODE_READ 0 -#define DMA_MODE_WRITE 1 - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - -#endif /* __ASM_SH64_DMA_H */ diff --git a/include/asm-sh64/elf.h b/include/asm-sh64/elf.h deleted file mode 100644 index f994286e1998..000000000000 --- a/include/asm-sh64/elf.h +++ /dev/null @@ -1,107 +0,0 @@ -#ifndef __ASM_SH64_ELF_H -#define __ASM_SH64_ELF_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/elf.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -/* - * ELF register definitions.. - */ - -#include <asm/ptrace.h> -#include <asm/user.h> -#include <asm/byteorder.h> - -typedef unsigned long elf_greg_t; - -#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; - -typedef struct user_fpu_struct elf_fpregset_t; - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ( (x)->e_machine == EM_SH ) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS32 -#ifdef __LITTLE_ENDIAN__ -#define ELF_DATA ELFDATA2LSB -#else -#define ELF_DATA ELFDATA2MSB -#endif -#define ELF_ARCH EM_SH - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) - -#define R_SH_DIR32 1 -#define R_SH_REL32 2 -#define R_SH_IMM_LOW16 246 -#define R_SH_IMM_LOW16_PCREL 247 -#define R_SH_IMM_MEDLOW16 248 -#define R_SH_IMM_MEDLOW16_PCREL 249 - -#define ELF_CORE_COPY_REGS(_dest,_regs) \ - memcpy((char *) &_dest, (char *) _regs, \ - sizeof(struct pt_regs)); - -/* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This could be done in user space, - but it's not easy, and we've already done it here. */ - -#define ELF_HWCAP (0) - -/* This yields a string that ld.so will use to load implementation - specific libraries for optimization. This is more specific in - intent than poking at uname or /proc/cpuinfo. - - For the moment, we have only optimizations for the Intel generations, - but that could change... */ - -#define ELF_PLATFORM (NULL) - -#define ELF_PLAT_INIT(_r, load_addr) \ - do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ - _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ - _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ - _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \ - _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \ - _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \ - _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \ - _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \ - _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \ - _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \ - _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \ - _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \ - _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \ - _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \ - _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \ - _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \ - _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \ - _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \ - _r->sr = SR_FD | SR_MMU; } while (0) - -#ifdef __KERNEL__ -#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) -#endif - -#endif /* __ASM_SH64_ELF_H */ diff --git a/include/asm-sh64/emergency-restart.h b/include/asm-sh64/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/include/asm-sh64/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-sh64/errno.h b/include/asm-sh64/errno.h deleted file mode 100644 index 57b46d4bdd41..000000000000 --- a/include/asm-sh64/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_ERRNO_H -#define __ASM_SH64_ERRNO_H - -#include <asm-generic/errno.h> - -#endif /* __ASM_SH64_ERRNO_H */ diff --git a/include/asm-sh64/fb.h b/include/asm-sh64/fb.h deleted file mode 100644 index d92e99cd8c8a..000000000000 --- a/include/asm-sh64/fb.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include <linux/fb.h> -#include <linux/fs.h> -#include <asm/page.h> - -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) -{ - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); -} - -static inline int fb_is_primary_device(struct fb_info *info) -{ - return 0; -} - -#endif /* _ASM_FB_H_ */ diff --git a/include/asm-sh64/fcntl.h b/include/asm-sh64/fcntl.h deleted file mode 100644 index 744dd79b9d5d..000000000000 --- a/include/asm-sh64/fcntl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-sh/fcntl.h> diff --git a/include/asm-sh64/futex.h b/include/asm-sh64/futex.h deleted file mode 100644 index 6a332a9f099c..000000000000 --- a/include/asm-sh64/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include <asm-generic/futex.h> - -#endif diff --git a/include/asm-sh64/gpio.h b/include/asm-sh64/gpio.h deleted file mode 100644 index 6bc5a13d8415..000000000000 --- a/include/asm-sh64/gpio.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_SH64_GPIO_H -#define __ASM_SH64_GPIO_H - -/* - * This is just a stub, so that every arch using sh-sci has a gpio.h - */ - -#endif /* __ASM_SH64_GPIO_H */ diff --git a/include/asm-sh64/hardirq.h b/include/asm-sh64/hardirq.h deleted file mode 100644 index 555fd7a35108..000000000000 --- a/include/asm-sh64/hardirq.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef __ASM_SH64_HARDIRQ_H -#define __ASM_SH64_HARDIRQ_H - -#include <linux/threads.h> -#include <linux/irq.h> - -/* entry.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ - -/* arch/sh64/kernel/irq.c */ -extern void ack_bad_irq(unsigned int irq); - -#endif /* __ASM_SH64_HARDIRQ_H */ - diff --git a/include/asm-sh64/hardware.h b/include/asm-sh64/hardware.h deleted file mode 100644 index 931c1ad80847..000000000000 --- a/include/asm-sh64/hardware.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __ASM_SH64_HARDWARE_H -#define __ASM_SH64_HARDWARE_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/hardware.h - * - * Copyright (C) 2002 Stuart Menefy - * Copyright (C) 2003 Paul Mundt - * - * Defitions of the locations of registers in the physical address space. - */ - -#define PHYS_PERIPHERAL_BLOCK 0x09000000 -#define PHYS_DMAC_BLOCK 0x0e000000 -#define PHYS_PCI_BLOCK 0x60000000 -#define PHYS_EMI_BLOCK 0xff000000 - -#endif /* __ASM_SH64_HARDWARE_H */ diff --git a/include/asm-sh64/hw_irq.h b/include/asm-sh64/hw_irq.h deleted file mode 100644 index ebb39089b0ac..000000000000 --- a/include/asm-sh64/hw_irq.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASM_SH64_HW_IRQ_H -#define __ASM_SH64_HW_IRQ_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/hw_irq.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#endif /* __ASM_SH64_HW_IRQ_H */ diff --git a/include/asm-sh64/ide.h b/include/asm-sh64/ide.h deleted file mode 100644 index b6e31e8b9410..000000000000 --- a/include/asm-sh64/ide.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * linux/include/asm-sh64/ide.h - * - * Copyright (C) 1994-1996 Linus Torvalds & authors - * - * sh64 version by Richard Curnow & Paul Mundt - */ - -/* - * This file contains the sh64 architecture specific IDE code. - */ - -#ifndef __ASM_SH64_IDE_H -#define __ASM_SH64_IDE_H - -#ifdef __KERNEL__ - - -/* Without this, the initialisation of PCI IDE cards end up calling - * ide_init_hwif_ports, which won't work. */ -#ifdef CONFIG_BLK_DEV_IDEPCI -#define ide_default_io_ctl(base) (0) -#endif - -#include <asm-generic/ide_iops.h> - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH64_IDE_H */ diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h deleted file mode 100644 index 7bd7314d38c2..000000000000 --- a/include/asm-sh64/io.h +++ /dev/null @@ -1,196 +0,0 @@ -#ifndef __ASM_SH64_IO_H -#define __ASM_SH64_IO_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/io.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - */ - -/* - * Convention: - * read{b,w,l}/write{b,w,l} are for PCI, - * while in{b,w,l}/out{b,w,l} are for ISA - * These may (will) be platform specific function. - * - * In addition, we have - * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O. - * which are processor specific. Address should be the result of - * onchip_remap(); - */ - -#include <linux/compiler.h> -#include <asm/cache.h> -#include <asm/system.h> -#include <asm/page.h> -#include <asm-generic/iomap.h> - -/* - * Nothing overly special here.. instead of doing the same thing - * over and over again, we just define a set of sh64_in/out functions - * with an implicit size. The traditional read{b,w,l}/write{b,w,l} - * mess is wrapped to this, as are the SH-specific ctrl_in/out routines. - */ -static inline unsigned char sh64_in8(const volatile void __iomem *addr) -{ - return *(volatile unsigned char __force *)addr; -} - -static inline unsigned short sh64_in16(const volatile void __iomem *addr) -{ - return *(volatile unsigned short __force *)addr; -} - -static inline unsigned int sh64_in32(const volatile void __iomem *addr) -{ - return *(volatile unsigned int __force *)addr; -} - -static inline unsigned long long sh64_in64(const volatile void __iomem *addr) -{ - return *(volatile unsigned long long __force *)addr; -} - -static inline void sh64_out8(unsigned char b, volatile void __iomem *addr) -{ - *(volatile unsigned char __force *)addr = b; - wmb(); -} - -static inline void sh64_out16(unsigned short b, volatile void __iomem *addr) -{ - *(volatile unsigned short __force *)addr = b; - wmb(); -} - -static inline void sh64_out32(unsigned int b, volatile void __iomem *addr) -{ - *(volatile unsigned int __force *)addr = b; - wmb(); -} - -static inline void sh64_out64(unsigned long long b, volatile void __iomem *addr) -{ - *(volatile unsigned long long __force *)addr = b; - wmb(); -} - -#define readb(addr) sh64_in8(addr) -#define readw(addr) sh64_in16(addr) -#define readl(addr) sh64_in32(addr) -#define readb_relaxed(addr) sh64_in8(addr) -#define readw_relaxed(addr) sh64_in16(addr) -#define readl_relaxed(addr) sh64_in32(addr) - -#define writeb(b, addr) sh64_out8(b, addr) -#define writew(b, addr) sh64_out16(b, addr) -#define writel(b, addr) sh64_out32(b, addr) - -#define ctrl_inb(addr) sh64_in8(ioport_map(addr, 1)) -#define ctrl_inw(addr) sh64_in16(ioport_map(addr, 2)) -#define ctrl_inl(addr) sh64_in32(ioport_map(addr, 4)) - -#define ctrl_outb(b, addr) sh64_out8(b, ioport_map(addr, 1)) -#define ctrl_outw(b, addr) sh64_out16(b, ioport_map(addr, 2)) -#define ctrl_outl(b, addr) sh64_out32(b, ioport_map(addr, 4)) - -#define ioread8(addr) sh64_in8(addr) -#define ioread16(addr) sh64_in16(addr) -#define ioread32(addr) sh64_in32(addr) -#define iowrite8(b, addr) sh64_out8(b, addr) -#define iowrite16(b, addr) sh64_out16(b, addr) -#define iowrite32(b, addr) sh64_out32(b, addr) - -#define inb(addr) ctrl_inb(addr) -#define inw(addr) ctrl_inw(addr) -#define inl(addr) ctrl_inl(addr) -#define outb(b, addr) ctrl_outb(b, addr) -#define outw(b, addr) ctrl_outw(b, addr) -#define outl(b, addr) ctrl_outl(b, addr) - -void outsw(unsigned long port, const void *addr, unsigned long count); -void insw(unsigned long port, void *addr, unsigned long count); -void outsl(unsigned long port, const void *addr, unsigned long count); -void insl(unsigned long port, void *addr, unsigned long count); - -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) -#define outb_p(x,addr) outb(x,addr) -#define outw_p(x,addr) outw(x,addr) -#define outl_p(x,addr) outl(x,addr) - -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel - -void memcpy_toio(void __iomem *to, const void *from, long count); -void memcpy_fromio(void *to, void __iomem *from, long count); - -#define mmiowb() - -#ifdef __KERNEL__ - -#ifdef CONFIG_SH_CAYMAN -extern unsigned long smsc_superio_virt; -#endif -#ifdef CONFIG_PCI -extern unsigned long pciio_virt; -#endif - -#define IO_SPACE_LIMIT 0xffffffff - -/* - * Change virtual addresses to physical addresses and vv. - * These are trivial on the 1:1 Linux/SuperH mapping - */ -static inline unsigned long virt_to_phys(volatile void * address) -{ - return __pa(address); -} - -static inline void * phys_to_virt(unsigned long address) -{ - return __va(address); -} - -extern void * __ioremap(unsigned long phys_addr, unsigned long size, - unsigned long flags); - -static inline void * ioremap(unsigned long phys_addr, unsigned long size) -{ - return __ioremap(phys_addr, size, 1); -} - -static inline void * ioremap_nocache (unsigned long phys_addr, unsigned long size) -{ - return __ioremap(phys_addr, size, 0); -} - -extern void iounmap(void *addr); - -unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name); -extern void onchip_unmap(unsigned long vaddr); - -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -#endif /* __KERNEL__ */ -#endif /* __ASM_SH64_IO_H */ diff --git a/include/asm-sh64/ioctl.h b/include/asm-sh64/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/include/asm-sh64/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ioctl.h> diff --git a/include/asm-sh64/ioctls.h b/include/asm-sh64/ioctls.h deleted file mode 100644 index 6b0c04f63c57..000000000000 --- a/include/asm-sh64/ioctls.h +++ /dev/null @@ -1,116 +0,0 @@ -#ifndef __ASM_SH64_IOCTLS_H -#define __ASM_SH64_IOCTLS_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/ioctls.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2004 Richard Curnow - * - */ - -#include <asm/ioctl.h> - -#define FIOCLEX 0x6601 /* _IO('f', 1) */ -#define FIONCLEX 0x6602 /* _IO('f', 2) */ -#define FIOASYNC 0x4004667d /* _IOW('f', 125, int) */ -#define FIONBIO 0x4004667e /* _IOW('f', 126, int) */ -#define FIONREAD 0x8004667f /* _IOW('f', 127, int) */ -#define TIOCINQ FIONREAD -#define FIOQSIZE 0x80086680 /* _IOR('f', 128, loff_t) */ - -#define TCGETS 0x5401 -#define TCSETS 0x5402 -#define TCSETSW 0x5403 -#define TCSETSF 0x5404 - -#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ -#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ -#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ -#define TCSETAF 0x4012741c /* _IOW('t', 28, struct termio) */ - -#define TCSBRK 0x741d /* _IO('t', 29) */ -#define TCXONC 0x741e /* _IO('t', 30) */ -#define TCFLSH 0x741f /* _IO('t', 31) */ - -#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ -#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ -#define TIOCSTART 0x746e /* _IO('t', 110) start output, like ^Q */ -#define TIOCSTOP 0x746f /* _IO('t', 111) stop output, like ^S */ -#define TIOCOUTQ 0x80047473 /* _IOR('t', 115, int) output queue size */ - -#define TIOCSPGRP 0x40047476 /* _IOW('t', 118, int) */ -#define TIOCGPGRP 0x80047477 /* _IOR('t', 119, int) */ - -#define TIOCEXCL 0x540c /* _IO('T', 12) */ -#define TIOCNXCL 0x540d /* _IO('T', 13) */ -#define TIOCSCTTY 0x540e /* _IO('T', 14) */ - -#define TIOCSTI 0x40015412 /* _IOW('T', 18, char) 0x5412 */ -#define TIOCMGET 0x80045415 /* _IOR('T', 21, unsigned int) 0x5415 */ -#define TIOCMBIS 0x40045416 /* _IOW('T', 22, unsigned int) 0x5416 */ -#define TIOCMBIC 0x40045417 /* _IOW('T', 23, unsigned int) 0x5417 */ -#define TIOCMSET 0x40045418 /* _IOW('T', 24, unsigned int) 0x5418 */ - -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG - -#define TIOCGSOFTCAR 0x80045419 /* _IOR('T', 25, unsigned int) 0x5419 */ -#define TIOCSSOFTCAR 0x4004541a /* _IOW('T', 26, unsigned int) 0x541A */ -#define TIOCLINUX 0x4004541c /* _IOW('T', 28, char) 0x541C */ -#define TIOCCONS 0x541d /* _IO('T', 29) */ -#define TIOCGSERIAL 0x803c541e /* _IOR('T', 30, struct serial_struct) 0x541E */ -#define TIOCSSERIAL 0x403c541f /* _IOW('T', 31, struct serial_struct) 0x541F */ -#define TIOCPKT 0x40045420 /* _IOW('T', 32, int) 0x5420 */ - -#define TIOCPKT_DATA 0 -#define TIOCPKT_FLUSHREAD 1 -#define TIOCPKT_FLUSHWRITE 2 -#define TIOCPKT_STOP 4 -#define TIOCPKT_START 8 -#define TIOCPKT_NOSTOP 16 -#define TIOCPKT_DOSTOP 32 - - -#define TIOCNOTTY 0x5422 /* _IO('T', 34) */ -#define TIOCSETD 0x40045423 /* _IOW('T', 35, int) 0x5423 */ -#define TIOCGETD 0x80045424 /* _IOR('T', 36, int) 0x5424 */ -#define TCSBRKP 0x40045424 /* _IOW('T', 37, int) 0x5425 */ /* Needed for POSIX tcsendbreak() */ -#define TIOCTTYGSTRUCT 0x8c105426 /* _IOR('T', 38, struct tty_struct) 0x5426 */ /* For debugging only */ -#define TIOCSBRK 0x5427 /* _IO('T', 39) */ /* BSD compatibility */ -#define TIOCCBRK 0x5428 /* _IO('T', 40) */ /* BSD compatibility */ -#define TIOCGSID 0x80045429 /* _IOR('T', 41, pid_t) 0x5429 */ /* Return the session ID of FD */ -#define TIOCGPTN 0x80045430 /* _IOR('T',0x30, unsigned int) 0x5430 Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK 0x40045431 /* _IOW('T',0x31, int) Lock/unlock Pty */ - -#define TIOCSERCONFIG 0x5453 /* _IO('T', 83) */ -#define TIOCSERGWILD 0x80045454 /* _IOR('T', 84, int) 0x5454 */ -#define TIOCSERSWILD 0x40045455 /* _IOW('T', 85, int) 0x5455 */ -#define TIOCGLCKTRMIOS 0x5456 -#define TIOCSLCKTRMIOS 0x5457 -#define TIOCSERGSTRUCT 0x80d85458 /* _IOR('T', 88, struct async_struct) 0x5458 */ /* For debugging only */ -#define TIOCSERGETLSR 0x80045459 /* _IOR('T', 89, unsigned int) 0x5459 */ /* Get line status register */ - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ -#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ - -#define TIOCSERGETMULTI 0x80a8545a /* _IOR('T', 90, struct serial_multiport_struct) 0x545A */ /* Get multiport config */ -#define TIOCSERSETMULTI 0x40a8545b /* _IOW('T', 91, struct serial_multiport_struct) 0x545B */ /* Set multiport config */ - -#define TIOCMIWAIT 0x545c /* _IO('T', 92) wait for a change on serial input line(s) */ -#define TIOCGICOUNT 0x545d /* read serial port inline interrupt counts */ - -#endif /* __ASM_SH64_IOCTLS_H */ diff --git a/include/asm-sh64/ipcbuf.h b/include/asm-sh64/ipcbuf.h deleted file mode 100644 index c441e35299c0..000000000000 --- a/include/asm-sh64/ipcbuf.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __ASM_SH64_IPCBUF_H__ -#define __ASM_SH64_IPCBUF_H__ - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/ipcbuf.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -/* - * The ipc64_perm structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 32-bit mode_t and seq - * - 2 miscellaneous 32-bit values - */ - -struct ipc64_perm -{ - __kernel_key_t key; - __kernel_uid32_t uid; - __kernel_gid32_t gid; - __kernel_uid32_t cuid; - __kernel_gid32_t cgid; - __kernel_mode_t mode; - unsigned short __pad1; - unsigned short seq; - unsigned short __pad2; - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* __ASM_SH64_IPCBUF_H__ */ diff --git a/include/asm-sh64/irq_regs.h b/include/asm-sh64/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/include/asm-sh64/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/irq_regs.h> diff --git a/include/asm-sh64/kdebug.h b/include/asm-sh64/kdebug.h deleted file mode 100644 index 6ece1b037665..000000000000 --- a/include/asm-sh64/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kdebug.h> diff --git a/include/asm-sh64/keyboard.h b/include/asm-sh64/keyboard.h deleted file mode 100644 index 0b01c3beb2f8..000000000000 --- a/include/asm-sh64/keyboard.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * linux/include/asm-shmedia/keyboard.h - * - * Copied from i386 version: - * Created 3 Nov 1996 by Geert Uytterhoeven - */ - -/* - * This file contains the i386 architecture specific keyboard definitions - */ - -#ifndef __ASM_SH64_KEYBOARD_H -#define __ASM_SH64_KEYBOARD_H - -#ifdef __KERNEL__ - -#include <linux/kernel.h> -#include <linux/ioport.h> -#include <asm/io.h> - -#ifdef CONFIG_SH_CAYMAN -#define KEYBOARD_IRQ (START_EXT_IRQS + 2) /* SMSC SuperIO IRQ 1 */ -#endif -#define DISABLE_KBD_DURING_INTERRUPTS 0 - -extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode); -extern int pckbd_getkeycode(unsigned int scancode); -extern int pckbd_translate(unsigned char scancode, unsigned char *keycode, - char raw_mode); -extern char pckbd_unexpected_up(unsigned char keycode); -extern void pckbd_leds(unsigned char leds); -extern void pckbd_init_hw(void); - -#define kbd_setkeycode pckbd_setkeycode -#define kbd_getkeycode pckbd_getkeycode -#define kbd_translate pckbd_translate -#define kbd_unexpected_up pckbd_unexpected_up -#define kbd_leds pckbd_leds -#define kbd_init_hw pckbd_init_hw - -/* resource allocation */ -#define kbd_request_region() -#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \ - "keyboard", NULL) - -/* How to access the keyboard macros on this platform. */ -#define kbd_read_input() inb(KBD_DATA_REG) -#define kbd_read_status() inb(KBD_STATUS_REG) -#define kbd_write_output(val) outb(val, KBD_DATA_REG) -#define kbd_write_command(val) outb(val, KBD_CNTL_REG) - -/* Some stoneage hardware needs delays after some operations. */ -#define kbd_pause() do { } while(0) - -/* - * Machine specific bits for the PS/2 driver - */ - -#ifdef CONFIG_SH_CAYMAN -#define AUX_IRQ (START_EXT_IRQS + 6) /* SMSC SuperIO IRQ12 */ -#endif - -#define aux_request_irq(hand, dev_id) \ - request_irq(AUX_IRQ, hand, IRQF_SHARED, "PS2 Mouse", dev_id) - -#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id) - -#endif /* __KERNEL__ */ -#endif /* __ASM_SH64_KEYBOARD_H */ - diff --git a/include/asm-sh64/kmap_types.h b/include/asm-sh64/kmap_types.h deleted file mode 100644 index 2ae7c7587919..000000000000 --- a/include/asm-sh64/kmap_types.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SH64_KMAP_TYPES_H -#define __ASM_SH64_KMAP_TYPES_H - -#include <asm-sh/kmap_types.h> - -#endif /* __ASM_SH64_KMAP_TYPES_H */ - diff --git a/include/asm-sh64/linkage.h b/include/asm-sh64/linkage.h deleted file mode 100644 index 1dd0e84a228d..000000000000 --- a/include/asm-sh64/linkage.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SH64_LINKAGE_H -#define __ASM_SH64_LINKAGE_H - -#include <asm-sh/linkage.h> - -#endif /* __ASM_SH64_LINKAGE_H */ - diff --git a/include/asm-sh64/local.h b/include/asm-sh64/local.h deleted file mode 100644 index d9bd95dd36e2..000000000000 --- a/include/asm-sh64/local.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SH64_LOCAL_H -#define __ASM_SH64_LOCAL_H - -#include <asm-generic/local.h> - -#endif /* __ASM_SH64_LOCAL_H */ - diff --git a/include/asm-sh64/mc146818rtc.h b/include/asm-sh64/mc146818rtc.h deleted file mode 100644 index 6cd3aec68dbe..000000000000 --- a/include/asm-sh64/mc146818rtc.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * linux/include/asm-sh64/mc146818rtc.h - * -*/ - -/* For now, an empty place-holder to get IDE to compile. */ - diff --git a/include/asm-sh64/mman.h b/include/asm-sh64/mman.h deleted file mode 100644 index a9be6d885c3e..000000000000 --- a/include/asm-sh64/mman.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_MMAN_H -#define __ASM_SH64_MMAN_H - -#include <asm-sh/mman.h> - -#endif /* __ASM_SH64_MMAN_H */ diff --git a/include/asm-sh64/mmu.h b/include/asm-sh64/mmu.h deleted file mode 100644 index ccd36d26615a..000000000000 --- a/include/asm-sh64/mmu.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __MMU_H -#define __MMU_H - -/* Default "unsigned long" context */ -typedef unsigned long mm_context_t; - -#endif diff --git a/include/asm-sh64/mmu_context.h b/include/asm-sh64/mmu_context.h deleted file mode 100644 index 507bf72bb8e1..000000000000 --- a/include/asm-sh64/mmu_context.h +++ /dev/null @@ -1,208 +0,0 @@ -#ifndef __ASM_SH64_MMU_CONTEXT_H -#define __ASM_SH64_MMU_CONTEXT_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/mmu_context.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - * ASID handling idea taken from MIPS implementation. - * - */ - -#ifndef __ASSEMBLY__ - -/* - * Cache of MMU context last used. - * - * The MMU "context" consists of two things: - * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache) - * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache) - */ -extern unsigned long mmu_context_cache; - -#include <asm/page.h> -#include <asm-generic/mm_hooks.h> - -/* Current mm's pgd */ -extern pgd_t *mmu_pdtp_cache; - -#define SR_ASID_MASK 0xffffffffff00ffffULL -#define SR_ASID_SHIFT 16 - -#define MMU_CONTEXT_ASID_MASK 0x000000ff -#define MMU_CONTEXT_VERSION_MASK 0xffffff00 -#define MMU_CONTEXT_FIRST_VERSION 0x00000100 -#define NO_CONTEXT 0 - -/* ASID is 8-bit value, so it can't be 0x100 */ -#define MMU_NO_ASID 0x100 - - -/* - * Virtual Page Number mask - */ -#define MMU_VPN_MASK 0xfffff000 - -static inline void -get_new_mmu_context(struct mm_struct *mm) -{ - extern void flush_tlb_all(void); - extern void flush_cache_all(void); - - unsigned long mc = ++mmu_context_cache; - - if (!(mc & MMU_CONTEXT_ASID_MASK)) { - /* We exhaust ASID of this version. - Flush all TLB and start new cycle. */ - flush_tlb_all(); - /* We have to flush all caches as ASIDs are - used in cache */ - flush_cache_all(); - /* Fix version if needed. - Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */ - if (!mc) - mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; - } - mm->context = mc; -} - -/* - * Get MMU context if needed. - */ -static __inline__ void -get_mmu_context(struct mm_struct *mm) -{ - if (mm) { - unsigned long mc = mmu_context_cache; - /* Check if we have old version of context. - If it's old, we need to get new context with new version. */ - if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) - get_new_mmu_context(mm); - } -} - -/* - * Initialize the context related info for a new mm_struct - * instance. - */ -static inline int init_new_context(struct task_struct *tsk, - struct mm_struct *mm) -{ - mm->context = NO_CONTEXT; - - return 0; -} - -/* - * Destroy context related info for an mm_struct that is about - * to be put to rest. - */ -static inline void destroy_context(struct mm_struct *mm) -{ - extern void flush_tlb_mm(struct mm_struct *mm); - - /* Well, at least free TLB entries */ - flush_tlb_mm(mm); -} - -#endif /* __ASSEMBLY__ */ - -/* Common defines */ -#define TLB_STEP 0x00000010 -#define TLB_PTEH 0x00000000 -#define TLB_PTEL 0x00000008 - -/* PTEH defines */ -#define PTEH_ASID_SHIFT 2 -#define PTEH_VALID 0x0000000000000001 -#define PTEH_SHARED 0x0000000000000002 -#define PTEH_MATCH_ASID 0x00000000000003ff - -#ifndef __ASSEMBLY__ -/* This has to be a common function because the next location to fill - * information is shared. */ -extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); - -/* Profiling counter. */ -#ifdef CONFIG_SH64_PROC_TLB -extern unsigned long long calls_to_do_fast_page_fault; -#endif - -static inline unsigned long get_asid(void) -{ - unsigned long long sr; - - asm volatile ("getcon " __SR ", %0\n\t" - : "=r" (sr)); - - sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK; - return (unsigned long) sr; -} - -/* Set ASID into SR */ -static inline void set_asid(unsigned long asid) -{ - unsigned long long sr, pc; - - asm volatile ("getcon " __SR ", %0" : "=r" (sr)); - - sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT); - - /* - * It is possible that this function may be inlined and so to avoid - * the assembler reporting duplicate symbols we make use of the gas trick - * of generating symbols using numerics and forward reference. - */ - asm volatile ("movi 1, %1\n\t" - "shlli %1, 28, %1\n\t" - "or %0, %1, %1\n\t" - "putcon %1, " __SR "\n\t" - "putcon %0, " __SSR "\n\t" - "movi 1f, %1\n\t" - "ori %1, 1 , %1\n\t" - "putcon %1, " __SPC "\n\t" - "rte\n" - "1:\n\t" - : "=r" (sr), "=r" (pc) : "0" (sr)); -} - -/* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. - */ -static __inline__ void activate_context(struct mm_struct *mm) -{ - get_mmu_context(mm); - set_asid(mm->context & MMU_CONTEXT_ASID_MASK); -} - - -static __inline__ void switch_mm(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) -{ - if (prev != next) { - mmu_pdtp_cache = next->pgd; - activate_context(next); - } -} - -#define deactivate_mm(tsk,mm) do { } while (0) - -#define activate_mm(prev, next) \ - switch_mm((prev),(next),NULL) - -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_SH64_MMU_CONTEXT_H */ diff --git a/include/asm-sh64/module.h b/include/asm-sh64/module.h deleted file mode 100644 index c313650d3d93..000000000000 --- a/include/asm-sh64/module.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __ASM_SH64_MODULE_H -#define __ASM_SH64_MODULE_H -/* - * This file contains the SH architecture specific module code. - */ - -struct mod_arch_specific { - /* empty */ -}; - -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr - -#define module_map(x) vmalloc(x) -#define module_unmap(x) vfree(x) -#define module_arch_init(x) (0) -#define arch_init_modules(x) do { } while (0) - -#endif /* __ASM_SH64_MODULE_H */ diff --git a/include/asm-sh64/msgbuf.h b/include/asm-sh64/msgbuf.h deleted file mode 100644 index cf0494ce0ba8..000000000000 --- a/include/asm-sh64/msgbuf.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef __ASM_SH64_MSGBUF_H -#define __ASM_SH64_MSGBUF_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/msgbuf.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -/* - * The msqid64_ds structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - unsigned long __unused1; - __kernel_time_t msg_rtime; /* last msgrcv time */ - unsigned long __unused2; - __kernel_time_t msg_ctime; /* last change time */ - unsigned long __unused3; - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* __ASM_SH64_MSGBUF_H */ diff --git a/include/asm-sh64/mutex.h b/include/asm-sh64/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/include/asm-sh64/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include <asm-generic/mutex-dec.h> diff --git a/include/asm-sh64/namei.h b/include/asm-sh64/namei.h deleted file mode 100644 index 99d759a805ce..000000000000 --- a/include/asm-sh64/namei.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef __ASM_SH64_NAMEI_H -#define __ASM_SH64_NAMEI_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/namei.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * Included from linux/fs/namei.c - * - */ - -/* This dummy routine maybe changed to something useful - * for /usr/gnemul/ emulation stuff. - * Look at asm-sparc/namei.h for details. - */ - -#define __emul_prefix() NULL - -#endif /* __ASM_SH64_NAMEI_H */ diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h deleted file mode 100644 index 472089aefc60..000000000000 --- a/include/asm-sh64/page.h +++ /dev/null @@ -1,119 +0,0 @@ -#ifndef __ASM_SH64_PAGE_H -#define __ASM_SH64_PAGE_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/page.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003, 2004 Paul Mundt - * - * benedict.gaster@superh.com 19th, 24th July 2002. - * - * Modified to take account of enabling for D-CACHE support. - * - */ - - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#ifdef __ASSEMBLY__ -#define PAGE_SIZE 4096 -#else -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#endif -#define PAGE_MASK (~(PAGE_SIZE-1)) -#define PTE_MASK PAGE_MASK - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define HPAGE_SHIFT 16 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) -#define HPAGE_SHIFT 20 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) -#define HPAGE_SHIFT 29 -#endif - -#ifdef CONFIG_HUGETLB_PAGE -#define HPAGE_SIZE (1UL << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE-1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) -#define ARCH_HAS_SETCLEAR_HUGE_PTE -#endif - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -extern struct page *mem_map; -extern void sh64_page_clear(void *page); -extern void sh64_page_copy(void *from, void *to); - -#define clear_page(page) sh64_page_clear(page) -#define copy_page(to,from) sh64_page_copy(from, to) - -#if defined(CONFIG_DCACHE_DISABLED) - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -#else - -extern void clear_user_page(void *to, unsigned long address, struct page *pg); -extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); - -#endif /* defined(CONFIG_DCACHE_DISABLED) */ - -/* - * These are used to make use of C type-checking.. - */ -typedef struct { unsigned long long pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) -#define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgd(x) ((pgd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#endif /* !__ASSEMBLY__ */ - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - -/* - * Kconfig defined. - */ -#define __MEMORY_START (CONFIG_MEMORY_START) -#define PAGE_OFFSET (CONFIG_CACHED_MEMORY_OFFSET) - -#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#define MAP_NR(addr) ((__pa(addr)-__MEMORY_START) >> PAGE_SHIFT) -#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) - -#define phys_to_page(phys) (mem_map + (((phys) - __MEMORY_START) >> PAGE_SHIFT)) -#define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + __MEMORY_START) - -/* PFN start number, because of __MEMORY_START */ -#define PFN_START (__MEMORY_START >> PAGE_SHIFT) -#define ARCH_PFN_OFFSET (PFN_START) -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) - -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#include <asm-generic/memory_model.h> -#include <asm-generic/page.h> - -#endif /* __KERNEL__ */ -#endif /* __ASM_SH64_PAGE_H */ diff --git a/include/asm-sh64/param.h b/include/asm-sh64/param.h deleted file mode 100644 index f409adb41540..000000000000 --- a/include/asm-sh64/param.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/param.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - */ -#ifndef __ASM_SH64_PARAM_H -#define __ASM_SH64_PARAM_H - - -#ifdef __KERNEL__ -# ifdef CONFIG_SH_WDT -# define HZ 1000 /* Needed for high-res WOVF */ -# else -# define HZ 100 -# endif -# define USER_HZ 100 /* User interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */ -#endif - -#ifndef HZ -#define HZ 100 -#endif - -#define EXEC_PAGESIZE 4096 - -#ifndef NGROUPS -#define NGROUPS 32 -#endif - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif /* __ASM_SH64_PARAM_H */ diff --git a/include/asm-sh64/pci.h b/include/asm-sh64/pci.h deleted file mode 100644 index 18055dbbb4b5..000000000000 --- a/include/asm-sh64/pci.h +++ /dev/null @@ -1,102 +0,0 @@ -#ifndef __ASM_SH64_PCI_H -#define __ASM_SH64_PCI_H - -#ifdef __KERNEL__ - -#include <linux/dma-mapping.h> - -/* Can be used to override the logic in pci_scan_bus for skipping - already-configured bus numbers - to be used for buggy BIOSes - or architectures with incomplete PCI setup by the loader */ - -#define pcibios_assign_all_busses() 1 - -/* - * These are currently the correct values for the STM overdrive board - * We need some way of setting this on a board specific way, it will - * not be the same on other boards I think - */ -#if defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) -#define PCIBIOS_MIN_IO 0x2000 -#define PCIBIOS_MIN_MEM 0x40000000 -#endif - -extern void pcibios_set_master(struct pci_dev *dev); - -/* - * Set penalize isa irq function - */ -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - -/* Dynamic DMA mapping stuff. - * SuperH has everything mapped statically like x86. - */ - -/* The PCI address space does equal the physical memory - * address space. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) - -#include <linux/types.h> -#include <linux/slab.h> -#include <asm/scatterlist.h> -#include <linux/string.h> -#include <asm/io.h> - -/* pci_unmap_{single,page} being a nop depends upon the - * configuration. - */ -#ifdef CONFIG_SH_PCIDMA_NONCOHERENT -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) -#else -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) -#define pci_unmap_addr(PTR, ADDR_NAME) (0) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) -#define pci_unmap_len(PTR, LEN_NAME) (0) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) -#endif - -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - -/* Board-specific fixup routines. */ -extern void pcibios_fixup(void); -extern void pcibios_fixup_irqs(void); - -#ifdef CONFIG_PCI_AUTO -extern int pciauto_assign_resources(int busno, struct pci_channel *hose); -#endif - -#endif /* __KERNEL__ */ - -/* generic pci stuff */ -#include <asm-generic/pci.h> - -/* generic DMA-mapping stuff */ -#include <asm-generic/pci-dma-compat.h> - -#endif /* __ASM_SH64_PCI_H */ - diff --git a/include/asm-sh64/percpu.h b/include/asm-sh64/percpu.h deleted file mode 100644 index a01d16cd0e8c..000000000000 --- a/include/asm-sh64/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_PERCPU -#define __ASM_SH64_PERCPU - -#include <asm-generic/percpu.h> - -#endif /* __ASM_SH64_PERCPU */ diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h deleted file mode 100644 index 6eccab770a6d..000000000000 --- a/include/asm-sh64/pgalloc.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef __ASM_SH64_PGALLOC_H -#define __ASM_SH64_PGALLOC_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/pgalloc.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003, 2004 Paul Mundt - * Copyright (C) 2003, 2004 Richard Curnow - * - */ - -#include <linux/mm.h> -#include <linux/quicklist.h> -#include <asm/page.h> - -static inline void pgd_init(unsigned long page) -{ - unsigned long *pgd = (unsigned long *)page; - extern pte_t empty_bad_pte_table[PTRS_PER_PTE]; - int i; - - for (i = 0; i < USER_PTRS_PER_PGD; i++) - pgd[i] = (unsigned long)empty_bad_pte_table; -} - -/* - * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any. - */ - -static inline pgd_t *get_pgd_slow(void) -{ - unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); - pgd_t *ret = kmalloc(pgd_size, GFP_KERNEL); - return ret; -} - -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - return quicklist_alloc(0, GFP_KERNEL, NULL); -} - -static inline void pgd_free(pgd_t *pgd) -{ - quicklist_free(0, NULL, pgd); -} - -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) -{ - void *pg = quicklist_alloc(0, GFP_KERNEL, NULL); - return pg ? virt_to_page(pg) : NULL; -} - -static inline void pte_free_kernel(pte_t *pte) -{ - quicklist_free(0, NULL, pte); -} - -static inline void pte_free(struct page *pte) -{ - quicklist_free_page(0, NULL, pte); -} - -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) -{ - return quicklist_alloc(0, GFP_KERNEL, NULL); -} - -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) - -/* - * allocating and freeing a pmd is trivial: the 1-entry pmd is - * inside the pgd, so has no extra memory associated with it. - */ - -#if defined(CONFIG_SH64_PGTABLE_2_LEVEL) - -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(x) do { } while (0) -#define pgd_populate(mm, pmd, pte) BUG() -#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) -#define __pmd_free_tlb(tlb,pmd) do { } while (0) - -#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) - -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) -{ - return quicklist_alloc(0, GFP_KERNEL, NULL); -} - -static inline void pmd_free(pmd_t *pmd) -{ - quicklist_free(0, NULL, pmd); -} - -#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd) -#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd) - -#else -#error "No defined page table size" -#endif - -#define pmd_populate_kernel(mm, pmd, pte) \ - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte))) - -static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, - struct page *pte) -{ - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte))); -} - -static inline void check_pgt_cache(void) -{ - quicklist_trim(0, NULL, 25, 16); -} - -#endif /* __ASM_SH64_PGALLOC_H */ diff --git a/include/asm-sh64/platform.h b/include/asm-sh64/platform.h deleted file mode 100644 index bd0d9c405a80..000000000000 --- a/include/asm-sh64/platform.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef __ASM_SH64_PLATFORM_H -#define __ASM_SH64_PLATFORM_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/platform.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * benedict.gaster@superh.com: 3rd May 2002 - * Added support for ramdisk, removing statically linked romfs at the same time. - */ - -#include <linux/ioport.h> -#include <asm/irq.h> - - -/* - * Platform definition structure. - */ -struct sh64_platform { - unsigned int readonly_rootfs; - unsigned int ramdisk_flags; - unsigned int initial_root_dev; - unsigned int loader_type; - unsigned int initrd_start; - unsigned int initrd_size; - unsigned int fpu_flags; - unsigned int io_res_count; - unsigned int kram_res_count; - unsigned int xram_res_count; - unsigned int rom_res_count; - struct resource *io_res_p; - struct resource *kram_res_p; - struct resource *xram_res_p; - struct resource *rom_res_p; -}; - -extern struct sh64_platform platform_parms; - -extern unsigned long long memory_start, memory_end; - -extern unsigned long long fpu_in_use; - -extern int platform_int_priority[NR_INTC_IRQS]; - -#define FPU_FLAGS (platform_parms.fpu_flags) -#define STANDARD_IO_RESOURCES (platform_parms.io_res_count) -#define STANDARD_KRAM_RESOURCES (platform_parms.kram_res_count) -#define STANDARD_XRAM_RESOURCES (platform_parms.xram_res_count) -#define STANDARD_ROM_RESOURCES (platform_parms.rom_res_count) - -/* - * Kernel Memory description, Respectively: - * code = last but one memory descriptor - * data = last memory descriptor - */ -#define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2]) -#define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1]) - -#endif /* __ASM_SH64_PLATFORM_H */ diff --git a/include/asm-sh64/poll.h b/include/asm-sh64/poll.h deleted file mode 100644 index ca2950267c53..000000000000 --- a/include/asm-sh64/poll.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_SH64_POLL_H -#define __ASM_SH64_POLL_H - -#include <asm-generic/poll.h> - -#undef POLLREMOVE - -#endif /* __ASM_SH64_POLL_H */ diff --git a/include/asm-sh64/ptrace.h b/include/asm-sh64/ptrace.h deleted file mode 100644 index c424f80e3ae0..000000000000 --- a/include/asm-sh64/ptrace.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __ASM_SH64_PTRACE_H -#define __ASM_SH64_PTRACE_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/ptrace.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -/* - * This struct defines the way the registers are stored on the - * kernel stack during a system call or other kernel entry. - */ -struct pt_regs { - unsigned long long pc; - unsigned long long sr; - unsigned long long syscall_nr; - unsigned long long regs[63]; - unsigned long long tregs[8]; - unsigned long long pad[2]; -}; - -#ifdef __KERNEL__ -#define user_mode(regs) (((regs)->sr & 0x40000000)==0) -#define instruction_pointer(regs) ((regs)->pc) -#define profile_pc(regs) ((unsigned long)instruction_pointer(regs)) -extern void show_regs(struct pt_regs *); -#endif - -#endif /* __ASM_SH64_PTRACE_H */ diff --git a/include/asm-sh64/resource.h b/include/asm-sh64/resource.h deleted file mode 100644 index 8ff93944ae66..000000000000 --- a/include/asm-sh64/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_RESOURCE_H -#define __ASM_SH64_RESOURCE_H - -#include <asm-sh/resource.h> - -#endif /* __ASM_SH64_RESOURCE_H */ diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h deleted file mode 100644 index 7f729bbfce43..000000000000 --- a/include/asm-sh64/scatterlist.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/scatterlist.h - * - * Copyright (C) 2003 Paul Mundt - * - */ -#ifndef __ASM_SH64_SCATTERLIST_H -#define __ASM_SH64_SCATTERLIST_H - -#include <asm/types.h> - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset;/* for highmem, page offset */ - dma_addr_t dma_address; - unsigned int length; -}; - -/* These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) - -#define ISA_DMA_THRESHOLD (0xffffffff) - -#endif /* !__ASM_SH64_SCATTERLIST_H */ diff --git a/include/asm-sh64/sci.h b/include/asm-sh64/sci.h deleted file mode 100644 index 793c568b7820..000000000000 --- a/include/asm-sh64/sci.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-sh/sci.h> diff --git a/include/asm-sh64/sections.h b/include/asm-sh64/sections.h deleted file mode 100644 index 897f36bcdf85..000000000000 --- a/include/asm-sh64/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SH64_SECTIONS_H -#define __ASM_SH64_SECTIONS_H - -#include <asm-sh/sections.h> - -#endif /* __ASM_SH64_SECTIONS_H */ - diff --git a/include/asm-sh64/segment.h b/include/asm-sh64/segment.h deleted file mode 100644 index 92ac001fc483..000000000000 --- a/include/asm-sh64/segment.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_SEGMENT_H -#define _ASM_SEGMENT_H - -/* Only here because we have some old header files that expect it.. */ - -#endif /* _ASM_SEGMENT_H */ diff --git a/include/asm-sh64/semaphore-helper.h b/include/asm-sh64/semaphore-helper.h deleted file mode 100644 index fcfafe263e86..000000000000 --- a/include/asm-sh64/semaphore-helper.h +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef __ASM_SH64_SEMAPHORE_HELPER_H -#define __ASM_SH64_SEMAPHORE_HELPER_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/semaphore-helper.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ -#include <asm/errno.h> - -/* - * SMP- and interrupt-safe semaphores helper functions. - * - * (C) Copyright 1996 Linus Torvalds - * (C) Copyright 1999 Andrea Arcangeli - */ - -/* - * These two _must_ execute atomically wrt each other. - * - * This is trivially done with load_locked/store_cond, - * which we have. Let the rest of the losers suck eggs. - */ -static __inline__ void wake_one_more(struct semaphore * sem) -{ - atomic_inc((atomic_t *)&sem->sleepers); -} - -static __inline__ int waking_non_zero(struct semaphore *sem) -{ - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&semaphore_wake_lock, flags); - if (sem->sleepers > 0) { - sem->sleepers--; - ret = 1; - } - spin_unlock_irqrestore(&semaphore_wake_lock, flags); - return ret; -} - -/* - * waking_non_zero_interruptible: - * 1 got the lock - * 0 go to sleep - * -EINTR interrupted - * - * We must undo the sem->count down_interruptible() increment while we are - * protected by the spinlock in order to make atomic this atomic_inc() with the - * atomic_read() in wake_one_more(), otherwise we can race. -arca - */ -static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, - struct task_struct *tsk) -{ - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&semaphore_wake_lock, flags); - if (sem->sleepers > 0) { - sem->sleepers--; - ret = 1; - } else if (signal_pending(tsk)) { - atomic_inc(&sem->count); - ret = -EINTR; - } - spin_unlock_irqrestore(&semaphore_wake_lock, flags); - return ret; -} - -/* - * waking_non_zero_trylock: - * 1 failed to lock - * 0 got the lock - * - * We must undo the sem->count down_trylock() increment while we are - * protected by the spinlock in order to make atomic this atomic_inc() with the - * atomic_read() in wake_one_more(), otherwise we can race. -arca - */ -static __inline__ int waking_non_zero_trylock(struct semaphore *sem) -{ - unsigned long flags; - int ret = 1; - - spin_lock_irqsave(&semaphore_wake_lock, flags); - if (sem->sleepers <= 0) - atomic_inc(&sem->count); - else { - sem->sleepers--; - ret = 0; - } - spin_unlock_irqrestore(&semaphore_wake_lock, flags); - return ret; -} - -#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */ diff --git a/include/asm-sh64/semaphore.h b/include/asm-sh64/semaphore.h deleted file mode 100644 index f027cc14b55b..000000000000 --- a/include/asm-sh64/semaphore.h +++ /dev/null @@ -1,119 +0,0 @@ -#ifndef __ASM_SH64_SEMAPHORE_H -#define __ASM_SH64_SEMAPHORE_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/semaphore.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * SMP- and interrupt-safe semaphores. - * - * (C) Copyright 1996 Linus Torvalds - * - * SuperH verison by Niibe Yutaka - * (Currently no asm implementation but generic C code...) - * - */ - -#include <linux/linkage.h> -#include <linux/spinlock.h> -#include <linux/wait.h> -#include <linux/rwsem.h> - -#include <asm/system.h> -#include <asm/atomic.h> - -struct semaphore { - atomic_t count; - int sleepers; - wait_queue_head_t wait; -}; - -#define __SEMAPHORE_INITIALIZER(name, n) \ -{ \ - .count = ATOMIC_INIT(n), \ - .sleepers = 0, \ - .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ -} - -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) - -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) - -static inline void sema_init (struct semaphore *sem, int val) -{ -/* - * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); - * - * i'd rather use the more flexible initialization above, but sadly - * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well. - */ - atomic_set(&sem->count, val); - sem->sleepers = 0; - init_waitqueue_head(&sem->wait); -} - -static inline void init_MUTEX (struct semaphore *sem) -{ - sema_init(sem, 1); -} - -static inline void init_MUTEX_LOCKED (struct semaphore *sem) -{ - sema_init(sem, 0); -} - -#if 0 -asmlinkage void __down_failed(void /* special register calling convention */); -asmlinkage int __down_failed_interruptible(void /* params in registers */); -asmlinkage int __down_failed_trylock(void /* params in registers */); -asmlinkage void __up_wakeup(void /* special register calling convention */); -#endif - -asmlinkage void __down(struct semaphore * sem); -asmlinkage int __down_interruptible(struct semaphore * sem); -asmlinkage int __down_trylock(struct semaphore * sem); -asmlinkage void __up(struct semaphore * sem); - -extern spinlock_t semaphore_wake_lock; - -static inline void down(struct semaphore * sem) -{ - if (atomic_dec_return(&sem->count) < 0) - __down(sem); -} - -static inline int down_interruptible(struct semaphore * sem) -{ - int ret = 0; - - if (atomic_dec_return(&sem->count) < 0) - ret = __down_interruptible(sem); - return ret; -} - -static inline int down_trylock(struct semaphore * sem) -{ - int ret = 0; - - if (atomic_dec_return(&sem->count) < 0) - ret = __down_trylock(sem); - return ret; -} - -/* - * Note! This is subtle. We jump to wake people up only if - * the semaphore was negative (== somebody was waiting on it). - */ -static inline void up(struct semaphore * sem) -{ - if (atomic_inc_return(&sem->count) <= 0) - __up(sem); -} - -#endif /* __ASM_SH64_SEMAPHORE_H */ diff --git a/include/asm-sh64/sembuf.h b/include/asm-sh64/sembuf.h deleted file mode 100644 index ec4d9f143577..000000000000 --- a/include/asm-sh64/sembuf.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef __ASM_SH64_SEMBUF_H -#define __ASM_SH64_SEMBUF_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/sembuf.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -/* - * The semid64_ds structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - unsigned long __unused1; - __kernel_time_t sem_ctime; /* last change time */ - unsigned long __unused2; - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* __ASM_SH64_SEMBUF_H */ diff --git a/include/asm-sh64/serial.h b/include/asm-sh64/serial.h deleted file mode 100644 index e8d7b3f2da57..000000000000 --- a/include/asm-sh64/serial.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * include/asm-sh64/serial.h - * - * Configuration details for 8250, 16450, 16550, etc. serial ports - */ - -#ifndef _ASM_SERIAL_H -#define _ASM_SERIAL_H - -/* - * This assumes you have a 1.8432 MHz clock for your UART. - * - * It'd be nice if someone built a serial card with a 24.576 MHz - * clock, since the 16550A is capable of handling a top speed of 1.5 - * megabits/second; but this requires the faster clock. - */ -#define BASE_BAUD ( 1843200 / 16 ) - -#define RS_TABLE_SIZE 2 - -#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) - -#define SERIAL_PORT_DFNS \ - /* UART CLK PORT IRQ FLAGS */ \ - { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ - { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS } /* ttyS1 */ - -/* XXX: This should be moved ino irq.h */ -#define irq_cannonicalize(x) (x) - -#endif /* _ASM_SERIAL_H */ diff --git a/include/asm-sh64/setup.h b/include/asm-sh64/setup.h deleted file mode 100644 index 5b07b14c2927..000000000000 --- a/include/asm-sh64/setup.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __ASM_SH64_SETUP_H -#define __ASM_SH64_SETUP_H - -#define COMMAND_LINE_SIZE 256 - -#ifdef __KERNEL__ - -#define PARAM ((unsigned char *)empty_zero_page) -#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000)) -#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004)) -#define ORIG_ROOT_DEV (*(unsigned long *) (PARAM+0x008)) -#define LOADER_TYPE (*(unsigned long *) (PARAM+0x00c)) -#define INITRD_START (*(unsigned long *) (PARAM+0x010)) -#define INITRD_SIZE (*(unsigned long *) (PARAM+0x014)) - -#define COMMAND_LINE ((char *) (PARAM+256)) -#define COMMAND_LINE_SIZE 256 - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH64_SETUP_H */ - diff --git a/include/asm-sh64/shmbuf.h b/include/asm-sh64/shmbuf.h deleted file mode 100644 index 022f3494dd64..000000000000 --- a/include/asm-sh64/shmbuf.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef __ASM_SH64_SHMBUF_H -#define __ASM_SH64_SHMBUF_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/shmbuf.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -/* - * The shmid64_ds structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - unsigned long __unused1; - __kernel_time_t shm_dtime; /* last detach time */ - unsigned long __unused2; - __kernel_time_t shm_ctime; /* last change time */ - unsigned long __unused3; - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* __ASM_SH64_SHMBUF_H */ diff --git a/include/asm-sh64/shmparam.h b/include/asm-sh64/shmparam.h deleted file mode 100644 index 1bb820c833ee..000000000000 --- a/include/asm-sh64/shmparam.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_SH64_SHMPARAM_H -#define __ASM_SH64_SHMPARAM_H - -/* - * Set this to a sensible safe default, we'll work out the specifics for the - * align mask from the cache descriptor at run-time. - */ -#define SHMLBA 0x4000 - -#define __ARCH_FORCE_SHMLBA - -#endif /* __ASM_SH64_SHMPARAM_H */ diff --git a/include/asm-sh64/sigcontext.h b/include/asm-sh64/sigcontext.h deleted file mode 100644 index 6293509d8cc1..000000000000 --- a/include/asm-sh64/sigcontext.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef __ASM_SH64_SIGCONTEXT_H -#define __ASM_SH64_SIGCONTEXT_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/sigcontext.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -struct sigcontext { - unsigned long oldmask; - - /* CPU registers */ - unsigned long long sc_regs[63]; - unsigned long long sc_tregs[8]; - unsigned long long sc_pc; - unsigned long long sc_sr; - - /* FPU registers */ - unsigned long long sc_fpregs[32]; - unsigned int sc_fpscr; - unsigned int sc_fpvalid; -}; - -#endif /* __ASM_SH64_SIGCONTEXT_H */ diff --git a/include/asm-sh64/siginfo.h b/include/asm-sh64/siginfo.h deleted file mode 100644 index 56ef1da534d7..000000000000 --- a/include/asm-sh64/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_SIGINFO_H -#define __ASM_SH64_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif /* __ASM_SH64_SIGINFO_H */ diff --git a/include/asm-sh64/signal.h b/include/asm-sh64/signal.h deleted file mode 100644 index 244e134730d9..000000000000 --- a/include/asm-sh64/signal.h +++ /dev/null @@ -1,159 +0,0 @@ -#ifndef __ASM_SH64_SIGNAL_H -#define __ASM_SH64_SIGNAL_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/signal.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#include <linux/types.h> - -/* Avoid too many header ordering problems. */ -struct siginfo; - -#define _NSIG 64 -#define _NSIG_BPW 32 -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGIOT 6 -#define SIGBUS 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGUSR1 10 -#define SIGSEGV 11 -#define SIGUSR2 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGSTKFLT 16 -#define SIGCHLD 17 -#define SIGCONT 18 -#define SIGSTOP 19 -#define SIGTSTP 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGURG 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGIO 29 -#define SIGPOLL SIGIO -/* -#define SIGLOST 29 -*/ -#define SIGPWR 30 -#define SIGSYS 31 -#define SIGUNUSED 31 - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX (_NSIG-1) - -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - -#define SA_RESTORER 0x04000000 - -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - -#define MINSIGSTKSZ 2048 -#define SIGSTKSZ THREAD_SIZE - -#include <asm-generic/signal.h> - -#ifdef __KERNEL__ -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - void (*sa_restorer)(void); - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - -#ifdef __KERNEL__ -#include <asm/sigcontext.h> - -#define sigmask(sig) (1UL << ((sig) - 1)) -#define ptrace_signal_deliver(regs, cookie) do { } while (0) - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH64_SIGNAL_H */ diff --git a/include/asm-sh64/smp.h b/include/asm-sh64/smp.h deleted file mode 100644 index 4a4d0da39a84..000000000000 --- a/include/asm-sh64/smp.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASM_SH64_SMP_H -#define __ASM_SH64_SMP_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/smp.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#endif /* __ASM_SH64_SMP_H */ diff --git a/include/asm-sh64/socket.h b/include/asm-sh64/socket.h deleted file mode 100644 index 1853f7246ab0..000000000000 --- a/include/asm-sh64/socket.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_SOCKET_H -#define __ASM_SH64_SOCKET_H - -#include <asm-sh/socket.h> - -#endif /* __ASM_SH64_SOCKET_H */ diff --git a/include/asm-sh64/sockios.h b/include/asm-sh64/sockios.h deleted file mode 100644 index 419e76f12f41..000000000000 --- a/include/asm-sh64/sockios.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_SH64_SOCKIOS_H -#define __ASM_SH64_SOCKIOS_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/sockios.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -/* Socket-level I/O control calls. */ -#define FIOGETOWN _IOR('f', 123, int) -#define FIOSETOWN _IOW('f', 124, int) - -#define SIOCATMARK _IOR('s', 7, int) -#define SIOCSPGRP _IOW('s', 8, pid_t) -#define SIOCGPGRP _IOR('s', 9, pid_t) - -#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp (timeval) */ -#define SIOCGSTAMPNS _IOR('s', 101, struct timespec) /* Get stamp (timespec) */ -#endif /* __ASM_SH64_SOCKIOS_H */ diff --git a/include/asm-sh64/spinlock.h b/include/asm-sh64/spinlock.h deleted file mode 100644 index 296b0c9b24a2..000000000000 --- a/include/asm-sh64/spinlock.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef __ASM_SH64_SPINLOCK_H -#define __ASM_SH64_SPINLOCK_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/spinlock.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#error "No SMP on SH64" - -#endif /* __ASM_SH64_SPINLOCK_H */ diff --git a/include/asm-sh64/stat.h b/include/asm-sh64/stat.h deleted file mode 100644 index 86f551b1987e..000000000000 --- a/include/asm-sh64/stat.h +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef __ASM_SH64_STAT_H -#define __ASM_SH64_STAT_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/stat.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -struct __old_kernel_stat { - unsigned short st_dev; - unsigned short st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned long st_size; - unsigned long st_atime; - unsigned long st_mtime; - unsigned long st_ctime; -}; - -struct stat { - unsigned short st_dev; - unsigned short __pad1; - unsigned long st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned short __pad2; - unsigned long st_size; - unsigned long st_blksize; - unsigned long st_blocks; - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; - unsigned long __unused4; - unsigned long __unused5; -}; - -/* This matches struct stat64 in glibc2.1, hence the absolutely - * insane amounts of padding around dev_t's. - */ -struct stat64 { - unsigned short st_dev; - unsigned char __pad0[10]; - - unsigned long st_ino; - unsigned int st_mode; - unsigned int st_nlink; - - unsigned long st_uid; - unsigned long st_gid; - - unsigned short st_rdev; - unsigned char __pad3[10]; - - long long st_size; - unsigned long st_blksize; - - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ - unsigned long __pad4; /* future possible st_blocks high bits */ - - unsigned long st_atime; - unsigned long st_atime_nsec; - - unsigned long st_mtime; - unsigned long st_mtime_nsec; - - unsigned long st_ctime; - unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */ - - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* __ASM_SH64_STAT_H */ diff --git a/include/asm-sh64/statfs.h b/include/asm-sh64/statfs.h deleted file mode 100644 index 083fd79b2417..000000000000 --- a/include/asm-sh64/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_STATFS_H -#define __ASM_SH64_STATFS_H - -#include <asm-generic/statfs.h> - -#endif /* __ASM_SH64_STATFS_H */ diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h deleted file mode 100644 index be2a15ffcc55..000000000000 --- a/include/asm-sh64/system.h +++ /dev/null @@ -1,190 +0,0 @@ -#ifndef __ASM_SH64_SYSTEM_H -#define __ASM_SH64_SYSTEM_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/system.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * Copyright (C) 2004 Richard Curnow - * - */ - -#include <asm/registers.h> -#include <asm/processor.h> - -/* - * switch_to() should switch tasks to task nr n, first - */ - -typedef struct { - unsigned long seg; -} mm_segment_t; - -extern struct task_struct *sh64_switch_to(struct task_struct *prev, - struct thread_struct *prev_thread, - struct task_struct *next, - struct thread_struct *next_thread); - -#define switch_to(prev,next,last) \ - do {\ - if (last_task_used_math != next) {\ - struct pt_regs *regs = next->thread.uregs;\ - if (regs) regs->sr |= SR_FD;\ - }\ - last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\ - } while(0) - -#define nop() __asm__ __volatile__ ("nop") - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -extern void __xchg_called_with_bad_pointer(void); - -#define mb() __asm__ __volatile__ ("synco": : :"memory") -#define rmb() mb() -#define wmb() __asm__ __volatile__ ("synco": : :"memory") -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#endif /* CONFIG_SMP */ - -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) - -/* Interrupt Control */ -#ifndef HARD_CLI -#define SR_MASK_L 0x000000f0L -#define SR_MASK_LL 0x00000000000000f0LL -#else -#define SR_MASK_L 0x10000000L -#define SR_MASK_LL 0x0000000010000000LL -#endif - -static __inline__ void local_irq_enable(void) -{ - /* cli/sti based on SR.BL */ - unsigned long long __dummy0, __dummy1=~SR_MASK_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static __inline__ void local_irq_disable(void) -{ - /* cli/sti based on SR.BL */ - unsigned long long __dummy0, __dummy1=SR_MASK_LL; - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -#define local_save_flags(x) \ -(__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \ - __asm__ __volatile__( \ - "getcon " __SR ", %0\n\t" \ - "and %0, %1, %0" \ - : "=&r" (x) \ - : "r" (__dummy));})) - -#define local_irq_save(x) \ -(__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \ - __asm__ __volatile__( \ - "getcon " __SR ", %1\n\t" \ - "or %1, r63, %0\n\t" \ - "or %1, %2, %1\n\t" \ - "putcon %1, " __SR "\n\t" \ - "and %0, %2, %0" \ - : "=&r" (x), "=&r" (__d1) \ - : "r" (__d2));})); - -#define local_irq_restore(x) do { \ - if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \ - local_irq_enable(); /* yes...re-enable */ \ -} while (0) - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags != 0); \ -}) - -static inline unsigned long xchg_u32(volatile int * m, unsigned long val) -{ - unsigned long flags, retval; - - local_irq_save(flags); - retval = *m; - *m = val; - local_irq_restore(flags); - return retval; -} - -static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) -{ - unsigned long flags, retval; - - local_irq_save(flags); - retval = *m; - *m = val & 0xff; - local_irq_restore(flags); - return retval; -} - -static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 4: - return xchg_u32(ptr, x); - break; - case 1: - return xchg_u8(ptr, x); - break; - } - __xchg_called_with_bad_pointer(); - return x; -} - -/* XXX - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -void disable_hlt(void); -void enable_hlt(void); - - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#ifdef CONFIG_SH_ALPHANUMERIC -/* This is only used for debugging. */ -extern void print_seg(char *file,int line); -#define PLS() print_seg(__FILE__,__LINE__) -#else /* CONFIG_SH_ALPHANUMERIC */ -#define PLS() -#endif /* CONFIG_SH_ALPHANUMERIC */ - -#define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__) - -#define arch_align_stack(x) (x) - -#endif /* __ASM_SH64_SYSTEM_H */ diff --git a/include/asm-sh64/termbits.h b/include/asm-sh64/termbits.h deleted file mode 100644 index 86bde5ec1414..000000000000 --- a/include/asm-sh64/termbits.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_TERMBITS_H -#define __ASM_SH64_TERMBITS_H - -#include <asm-sh/termbits.h> - -#endif /* __ASM_SH64_TERMBITS_H */ diff --git a/include/asm-sh64/termios.h b/include/asm-sh64/termios.h deleted file mode 100644 index dc44e6ed3a7c..000000000000 --- a/include/asm-sh64/termios.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef __ASM_SH64_TERMIOS_H -#define __ASM_SH64_TERMIOS_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/termios.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#include <asm/termbits.h> -#include <asm/ioctls.h> - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - -#ifdef __KERNEL__ - -/* intr=^C quit=^\ erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ - unsigned short __tmp; \ - get_user(__tmp,&(termio)->x); \ - *(unsigned short *) &(termios)->x = __tmp; \ -} - -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH64_TERMIOS_H */ diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h deleted file mode 100644 index f6d5117c53af..000000000000 --- a/include/asm-sh64/thread_info.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef __ASM_SH64_THREAD_INFO_H -#define __ASM_SH64_THREAD_INFO_H - -/* - * SuperH 5 version - * Copyright (C) 2003 Paul Mundt - */ - -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ -#include <asm/registers.h> - -/* - * low level task data that entry.S needs immediate access to - * - this struct should fit entirely inside of one cache line - * - this struct shares the supervisor stack pages - * - if the contents of this structure are changed, the assembly constants must also be changed - */ -struct thread_info { - struct task_struct *task; /* main task structure */ - struct exec_domain *exec_domain; /* execution domain */ - unsigned long flags; /* low level flags */ - /* Put the 4 32-bit fields together to make asm offsetting easier. */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - __u16 cpu; - - mm_segment_t addr_limit; - struct restart_block restart_block; - - __u8 supervisor_stack[0]; -}; - -/* - * macros/functions for gaining access to the thread information structure - */ -#define INIT_THREAD_INFO(tsk) \ -{ \ - .task = &tsk, \ - .exec_domain = &default_exec_domain, \ - .flags = 0, \ - .cpu = 0, \ - .preempt_count = 1, \ - .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ -} - -#define init_thread_info (init_thread_union.thread_info) -#define init_stack (init_thread_union.stack) - -/* how to get the thread information struct from C */ -static inline struct thread_info *current_thread_info(void) -{ - struct thread_info *ti; - - __asm__ __volatile__ ("getcon " __KCR0 ", %0\n\t" : "=r" (ti)); - - return ti; -} - -/* thread information allocation */ - - - -#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) -#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) - -#endif /* __ASSEMBLY__ */ - -#define THREAD_SIZE 8192 - -#define PREEMPT_ACTIVE 0x10000000 - -/* thread information flags */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_SIGPENDING 2 /* signal pending */ -#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ -#define TIF_MEMDIE 4 -#define TIF_RESTORE_SIGMASK 5 /* Restore signal mask in do_signal */ - -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -#define _TIF_MEMDIE (1 << TIF_MEMDIE) -#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH64_THREAD_INFO_H */ diff --git a/include/asm-sh64/timex.h b/include/asm-sh64/timex.h deleted file mode 100644 index 163e2b62fe27..000000000000 --- a/include/asm-sh64/timex.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef __ASM_SH64_TIMEX_H -#define __ASM_SH64_TIMEX_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/timex.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * - * sh-5 architecture timex specifications - * - */ - -#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ -#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */ - -typedef unsigned long cycles_t; - -static __inline__ cycles_t get_cycles (void) -{ - return 0; -} - -#define vxtime_lock() do {} while (0) -#define vxtime_unlock() do {} while (0) - -#endif /* __ASM_SH64_TIMEX_H */ diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h deleted file mode 100644 index 16a164a23754..000000000000 --- a/include/asm-sh64/tlbflush.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef __ASM_SH64_TLBFLUSH_H -#define __ASM_SH64_TLBFLUSH_H - -#include <asm/pgalloc.h> - -/* - * TLB flushing: - * - * - flush_tlb() flushes the current mm struct TLBs - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(mm, start, end) flushes a range of pages - * - */ - -extern void flush_tlb(void); -extern void flush_tlb_all(void); -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); - -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); - -#endif /* __ASM_SH64_TLBFLUSH_H */ - diff --git a/include/asm-sh64/topology.h b/include/asm-sh64/topology.h deleted file mode 100644 index 34211787345f..000000000000 --- a/include/asm-sh64/topology.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH64_TOPOLOGY_H -#define __ASM_SH64_TOPOLOGY_H - -#include <asm-generic/topology.h> - -#endif /* __ASM_SH64_TOPOLOGY_H */ diff --git a/include/asm-sh64/types.h b/include/asm-sh64/types.h deleted file mode 100644 index 2c7ad73b3883..000000000000 --- a/include/asm-sh64/types.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef __ASM_SH64_TYPES_H -#define __ASM_SH64_TYPES_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/types.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; - -/* - * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the - * header files exported to user space - */ - -typedef __signed__ char __s8; -typedef unsigned char __u8; - -typedef __signed__ short __s16; -typedef unsigned short __u16; - -typedef __signed__ int __s32; -typedef unsigned int __u32; - -#if defined(__GNUC__) -__extension__ typedef __signed__ long long __s64; -__extension__ typedef unsigned long long __u64; -#endif - -#endif /* __ASSEMBLY__ */ - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ - -typedef __signed__ char s8; -typedef unsigned char u8; - -typedef __signed__ short s16; -typedef unsigned short u16; - -typedef __signed__ int s32; -typedef unsigned int u32; - -typedef __signed__ long long s64; -typedef unsigned long long u64; - -/* DMA addresses come in generic and 64-bit flavours. */ - -#ifdef CONFIG_HIGHMEM64G -typedef u64 dma_addr_t; -#else -typedef u32 dma_addr_t; -#endif -typedef u64 dma64_addr_t; - -#endif /* __ASSEMBLY__ */ - -#define BITS_PER_LONG 32 - -#endif /* __KERNEL__ */ - -#endif /* __ASM_SH64_TYPES_H */ diff --git a/include/asm-sh64/ucontext.h b/include/asm-sh64/ucontext.h deleted file mode 100644 index cf77a08551ca..000000000000 --- a/include/asm-sh64/ucontext.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ASM_SH64_UCONTEXT_H -#define __ASM_SH64_UCONTEXT_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/ucontext.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct sigcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif /* __ASM_SH64_UCONTEXT_H */ diff --git a/include/asm-sh64/unaligned.h b/include/asm-sh64/unaligned.h deleted file mode 100644 index 74481b186ae8..000000000000 --- a/include/asm-sh64/unaligned.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef __ASM_SH64_UNALIGNED_H -#define __ASM_SH64_UNALIGNED_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/unaligned.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#include <asm-generic/unaligned.h> - -#endif /* __ASM_SH64_UNALIGNED_H */ diff --git a/include/asm-sh64/user.h b/include/asm-sh64/user.h deleted file mode 100644 index eb3b33edd73e..000000000000 --- a/include/asm-sh64/user.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef __ASM_SH64_USER_H -#define __ASM_SH64_USER_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * include/asm-sh64/user.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - */ - -#include <linux/types.h> -#include <asm/ptrace.h> -#include <asm/page.h> - -/* - * Core file format: The core file is written in such a way that gdb - * can understand it and provide useful information to the user (under - * linux we use the `trad-core' bfd). The file contents are as follows: - * - * upage: 1 page consisting of a user struct that tells gdb - * what is present in the file. Directly after this is a - * copy of the task_struct, which is currently not used by gdb, - * but it may come in handy at some point. All of the registers - * are stored as part of the upage. The upage should always be - * only one page long. - * data: The data segment follows next. We use current->end_text to - * current->brk to pick up all of the user variables, plus any memory - * that may have been sbrk'ed. No attempt is made to determine if a - * page is demand-zero or if a page is totally unused, we just cover - * the entire range. All of the addresses are rounded in such a way - * that an integral number of pages is written. - * stack: We need the stack information in order to get a meaningful - * backtrace. We need to write the data from usp to - * current->start_stack, so we round each of these in order to be able - * to write an integer number of pages. - */ - -struct user_fpu_struct { - unsigned long long fp_regs[32]; - unsigned int fpscr; -}; - -struct user { - struct pt_regs regs; /* entire machine state */ - struct user_fpu_struct fpu; /* Math Co-processor registers */ - int u_fpvalid; /* True if math co-processor being used */ - size_t u_tsize; /* text size (pages) */ - size_t u_dsize; /* data size (pages) */ - size_t u_ssize; /* stack size (pages) */ - unsigned long start_code; /* text starting address */ - unsigned long start_data; /* data starting address */ - unsigned long start_stack; /* stack starting address */ - long int signal; /* signal causing core dump */ - struct regs * u_ar0; /* help gdb find registers */ - struct user_fpu_struct* u_fpstate; /* Math Co-processor pointer */ - unsigned long magic; /* identifies a core file */ - char u_comm[32]; /* user command name */ -}; - -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif /* __ASM_SH64_USER_H */ diff --git a/include/asm-sparc64/agp.h b/include/asm-sparc64/agp.h index 58f8cb6ae767..e9fcf0e781ea 100644 --- a/include/asm-sparc64/agp.h +++ b/include/asm-sparc64/agp.h @@ -5,7 +5,6 @@ #define map_page_into_agp(page) #define unmap_page_from_agp(page) -#define flush_agp_mappings() #define flush_agp_cache() mb() /* Convert a physical address to an address suitable for the GART. */ diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index a1f53a4da405..c7e52decba98 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h @@ -16,15 +16,6 @@ extern unsigned long __per_cpu_shift; (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) #define per_cpu_offset(x) (__per_cpu_offset(x)) -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset)) @@ -41,10 +32,6 @@ do { \ #else /* ! SMP */ #define real_setup_per_cpu_areas() do { } while (0) -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) #define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var @@ -54,7 +41,4 @@ do { \ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - #endif /* __ARCH_SPARC64_PERCPU__ */ diff --git a/include/asm-um/asm.h b/include/asm-um/asm.h new file mode 100644 index 000000000000..af1269a1e9eb --- /dev/null +++ b/include/asm-um/asm.h @@ -0,0 +1,6 @@ +#ifndef __UM_ASM_H +#define __UM_ASM_H + +#include "asm/arch/asm.h" + +#endif diff --git a/include/asm-um/linkage.h b/include/asm-um/linkage.h index 78b862472b36..cdb3024a699a 100644 --- a/include/asm-um/linkage.h +++ b/include/asm-um/linkage.h @@ -6,7 +6,6 @@ /* <linux/linkage.h> will pick sane defaults */ #ifdef CONFIG_GPROF -#undef FASTCALL #undef fastcall #endif diff --git a/include/asm-um/nops.h b/include/asm-um/nops.h new file mode 100644 index 000000000000..814e9bf5dea6 --- /dev/null +++ b/include/asm-um/nops.h @@ -0,0 +1,6 @@ +#ifndef __UM_NOPS_H +#define __UM_NOPS_H + +#include "asm/arch/nops.h" + +#endif diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index 12db5a1cdd74..3c6f0f80e827 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild @@ -3,21 +3,20 @@ include include/asm-generic/Kbuild.asm header-y += boot.h header-y += bootparam.h header-y += debugreg.h +header-y += kvm.h header-y += ldt.h header-y += msr-index.h header-y += prctl.h header-y += ptrace-abi.h header-y += sigcontext32.h header-y += ucontext.h -header-y += vsyscall32.h unifdef-y += e820.h unifdef-y += ist.h unifdef-y += mce.h unifdef-y += msr.h unifdef-y += mtrr.h -unifdef-y += page_32.h -unifdef-y += page_64.h +unifdef-y += page.h unifdef-y += posix_types_32.h unifdef-y += posix_types_64.h unifdef-y += ptrace.h diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index f8a89793ac8c..98a9ca266531 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h @@ -1,13 +1,123 @@ #ifndef _ASM_X86_ACPI_H #define _ASM_X86_ACPI_H -#ifdef CONFIG_X86_32 -# include "acpi_32.h" -#else -# include "acpi_64.h" -#endif +/* + * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <acpi/pdc_intel.h> +#include <asm/numa.h> #include <asm/processor.h> +#include <asm/mmu.h> + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_FLUSH_CPU_CACHE() wbinvd() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + asm("divl %2;" \ + :"=a"(q32), "=d"(r32) \ + :"r"(d32), \ + "0"(n_lo), "1"(n_hi)) + + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + asm("shrl $1,%2 ;" \ + "rcrl $1,%3;" \ + :"=r"(n_hi), "=r"(n_lo) \ + :"0"(n_hi), "1"(n_lo)) + +#ifdef CONFIG_ACPI +extern int acpi_lapic; +extern int acpi_ioapic; +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_ht; +extern int acpi_pci_disabled; +extern int acpi_skip_timer_override; +extern int acpi_use_timer_override; + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_ht = 0; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ +#define FIX_ACPI_PAGES 4 + +extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); + +static inline void acpi_noirq_set(void) { acpi_noirq = 1; } +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} +extern int acpi_irq_balance_set(char *str); + +/* routines for saving/restoring kernel state */ +extern int acpi_save_state_mem(void); +extern void acpi_restore_state_mem(void); + +extern unsigned long acpi_wakeup_address; + +/* early initialization routine */ +extern void acpi_reserve_bootmem(void); /* * Check if the CPU can handle C2 and deeper @@ -29,4 +139,35 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) return max_cstate; } +#else /* !CONFIG_ACPI */ + +#define acpi_lapic 0 +#define acpi_ioapic 0 +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define ARCH_HAS_POWER_INIT 1 + +struct bootnode; + +#ifdef CONFIG_ACPI_NUMA +extern int acpi_numa; +extern int acpi_scan_nodes(unsigned long start, unsigned long end); +#ifdef CONFIG_X86_64 +# define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +#endif +extern void acpi_fake_nodes(const struct bootnode *fake_nodes, + int num_nodes); +#else +static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, + int num_nodes) +{ +} #endif + +#define acpi_unlazy_tlb(x) leave_mm(x) + +#endif /*__X86_ASM_ACPI_H*/ diff --git a/include/asm-x86/acpi_32.h b/include/asm-x86/acpi_32.h deleted file mode 100644 index 723493e6c851..000000000000 --- a/include/asm-x86/acpi_32.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * asm-i386/acpi.h - * - * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#ifndef _ASM_ACPI_H -#define _ASM_ACPI_H - -#ifdef __KERNEL__ - -#include <acpi/pdc_intel.h> - -#include <asm/system.h> /* defines cmpxchg */ - -#define COMPILER_DEPENDENT_INT64 long long -#define COMPILER_DEPENDENT_UINT64 unsigned long long - -/* - * Calling conventions: - * - * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) - * ACPI_EXTERNAL_XFACE - External ACPI interfaces - * ACPI_INTERNAL_XFACE - Internal ACPI interfaces - * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces - */ -#define ACPI_SYSTEM_XFACE -#define ACPI_EXTERNAL_XFACE -#define ACPI_INTERNAL_XFACE -#define ACPI_INTERNAL_VAR_XFACE - -/* Asm macros */ - -#define ACPI_ASM_MACROS -#define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() -#define ACPI_FLUSH_CPU_CACHE() wbinvd() - -int __acpi_acquire_global_lock(unsigned int *lock); -int __acpi_release_global_lock(unsigned int *lock); - -#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) - -#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_release_global_lock(&facs->global_lock)) - -/* - * Math helper asm macros - */ -#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ - asm("divl %2;" \ - :"=a"(q32), "=d"(r32) \ - :"r"(d32), \ - "0"(n_lo), "1"(n_hi)) - - -#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ - asm("shrl $1,%2;" \ - "rcrl $1,%3;" \ - :"=r"(n_hi), "=r"(n_lo) \ - :"0"(n_hi), "1"(n_lo)) - -extern void early_quirks(void); - -#ifdef CONFIG_ACPI -extern int acpi_lapic; -extern int acpi_ioapic; -extern int acpi_noirq; -extern int acpi_strict; -extern int acpi_disabled; -extern int acpi_ht; -extern int acpi_pci_disabled; -static inline void disable_acpi(void) -{ - acpi_disabled = 1; - acpi_ht = 0; - acpi_pci_disabled = 1; - acpi_noirq = 1; -} - -/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ -#define FIX_ACPI_PAGES 4 - -extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); - -#ifdef CONFIG_X86_IO_APIC -extern int acpi_skip_timer_override; -extern int acpi_use_timer_override; -#endif - -static inline void acpi_noirq_set(void) { acpi_noirq = 1; } -static inline void acpi_disable_pci(void) -{ - acpi_pci_disabled = 1; - acpi_noirq_set(); -} -extern int acpi_irq_balance_set(char *str); - -/* routines for saving/restoring kernel state */ -extern int acpi_save_state_mem(void); -extern void acpi_restore_state_mem(void); - -extern unsigned long acpi_wakeup_address; - -/* early initialization routine */ -extern void acpi_reserve_bootmem(void); - -#else /* !CONFIG_ACPI */ - -#define acpi_lapic 0 -#define acpi_ioapic 0 -static inline void acpi_noirq_set(void) { } -static inline void acpi_disable_pci(void) { } -static inline void disable_acpi(void) { } - -#endif /* !CONFIG_ACPI */ - -#define ARCH_HAS_POWER_INIT 1 - -#endif /*__KERNEL__*/ - -#endif /*_ASM_ACPI_H*/ diff --git a/include/asm-x86/acpi_64.h b/include/asm-x86/acpi_64.h deleted file mode 100644 index 98173357dd89..000000000000 --- a/include/asm-x86/acpi_64.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - * asm-x86_64/acpi.h - * - * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#ifndef _ASM_ACPI_H -#define _ASM_ACPI_H - -#ifdef __KERNEL__ - -#include <acpi/pdc_intel.h> -#include <asm/numa.h> - -#define COMPILER_DEPENDENT_INT64 long long -#define COMPILER_DEPENDENT_UINT64 unsigned long long - -/* - * Calling conventions: - * - * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) - * ACPI_EXTERNAL_XFACE - External ACPI interfaces - * ACPI_INTERNAL_XFACE - Internal ACPI interfaces - * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces - */ -#define ACPI_SYSTEM_XFACE -#define ACPI_EXTERNAL_XFACE -#define ACPI_INTERNAL_XFACE -#define ACPI_INTERNAL_VAR_XFACE - -/* Asm macros */ - -#define ACPI_ASM_MACROS -#define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() -#define ACPI_FLUSH_CPU_CACHE() wbinvd() - -int __acpi_acquire_global_lock(unsigned int *lock); -int __acpi_release_global_lock(unsigned int *lock); - -#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) - -#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_release_global_lock(&facs->global_lock)) - -/* - * Math helper asm macros - */ -#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ - asm("divl %2;" \ - :"=a"(q32), "=d"(r32) \ - :"r"(d32), \ - "0"(n_lo), "1"(n_hi)) - - -#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ - asm("shrl $1,%2;" \ - "rcrl $1,%3;" \ - :"=r"(n_hi), "=r"(n_lo) \ - :"0"(n_hi), "1"(n_lo)) - -#ifdef CONFIG_ACPI -extern int acpi_lapic; -extern int acpi_ioapic; -extern int acpi_noirq; -extern int acpi_strict; -extern int acpi_disabled; -extern int acpi_pci_disabled; -extern int acpi_ht; -static inline void disable_acpi(void) -{ - acpi_disabled = 1; - acpi_ht = 0; - acpi_pci_disabled = 1; - acpi_noirq = 1; -} - -/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ -#define FIX_ACPI_PAGES 4 - -extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); -static inline void acpi_noirq_set(void) { acpi_noirq = 1; } -static inline void acpi_disable_pci(void) -{ - acpi_pci_disabled = 1; - acpi_noirq_set(); -} -extern int acpi_irq_balance_set(char *str); - -/* routines for saving/restoring kernel state */ -extern int acpi_save_state_mem(void); -extern void acpi_restore_state_mem(void); - -extern unsigned long acpi_wakeup_address; - -/* early initialization routine */ -extern void acpi_reserve_bootmem(void); - -#else /* !CONFIG_ACPI */ - -#define acpi_lapic 0 -#define acpi_ioapic 0 -static inline void acpi_noirq_set(void) { } -static inline void acpi_disable_pci(void) { } - -#endif /* !CONFIG_ACPI */ - -extern int acpi_numa; -extern int acpi_scan_nodes(unsigned long start, unsigned long end); -#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) - -extern int acpi_disabled; -extern int acpi_pci_disabled; - -#define ARCH_HAS_POWER_INIT 1 - -extern int acpi_skip_timer_override; -extern int acpi_use_timer_override; - -#ifdef CONFIG_ACPI_NUMA -extern void __init acpi_fake_nodes(const struct bootnode *fake_nodes, - int num_nodes); -#else -static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, - int num_nodes) -{ -} -#endif - -#endif /*__KERNEL__*/ - -#endif /*_ASM_ACPI_H*/ diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h index 62df2a9e7130..e4004a9f6a9a 100644 --- a/include/asm-x86/agp.h +++ b/include/asm-x86/agp.h @@ -12,13 +12,8 @@ * page. This avoids data corruption on some CPUs. */ -/* - * Caller's responsibility to call global_flush_tlb() for performance - * reasons - */ -#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) -#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) -#define flush_agp_mappings() global_flush_tlb() +#define map_page_into_agp(page) set_pages_uc(page, 1) +#define unmap_page_from_agp(page) set_pages_wb(page, 1) /* * Could use CLFLUSH here if the cpu supports it. But then it would diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index 9eef6a32a130..d8bacf3c4b08 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h @@ -1,5 +1,161 @@ -#ifdef CONFIG_X86_32 -# include "alternative_32.h" +#ifndef _ASM_X86_ALTERNATIVE_H +#define _ASM_X86_ALTERNATIVE_H + +#include <linux/types.h> +#include <linux/stddef.h> +#include <asm/asm.h> + +/* + * Alternative inline assembly for SMP. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define LOCK_PREFIX "" +#endif + +/* This must be included *after* the definition of LOCK_PREFIX */ +#include <asm/cpufeature.h> + +struct alt_instr { + u8 *instr; /* original instruction */ + u8 *replacement; + u8 cpuid; /* cpuid bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction, <= instrlen */ + u8 pad1; +#ifdef CONFIG_X86_64 + u32 pad2; +#endif +}; + +extern void alternative_instructions(void); +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; + +#ifdef CONFIG_SMP +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_smp_switch(int smp); +#else +static inline void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end) {} +static inline void alternatives_smp_module_del(struct module *mod) {} +static inline void alternatives_smp_switch(int smp) {} +#endif /* CONFIG_SMP */ + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661b\n" /* label */ \ + _ASM_PTR "663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement make sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661b\n" /* label */ \ + _ASM_PTR "663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature), ##input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661b\n" /* label */ \ + _ASM_PTR "663f\n" /* new instruction */ \ + " .byte %c[feat]\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" : output : [feat] "i" (feature), ##input) + +/* + * use this macro(s) if you need more than one output parameter + * in alternative_io + */ +#define ASM_OUTPUT2(a, b) a, b + +struct paravirt_patch_site; +#ifdef CONFIG_PARAVIRT +void apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end); #else -# include "alternative_64.h" +static inline void +apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end) +{} +#define __parainstructions NULL +#define __parainstructions_end NULL #endif + +extern void text_poke(void *addr, unsigned char *opcode, int len); + +#endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/include/asm-x86/alternative_32.h b/include/asm-x86/alternative_32.h deleted file mode 100644 index bda6c810c0f4..000000000000 --- a/include/asm-x86/alternative_32.h +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef _I386_ALTERNATIVE_H -#define _I386_ALTERNATIVE_H - -#include <asm/types.h> -#include <linux/stddef.h> -#include <linux/types.h> - -struct alt_instr { - u8 *instr; /* original instruction */ - u8 *replacement; - u8 cpuid; /* cpuid bit set for replacement */ - u8 instrlen; /* length of original instruction */ - u8 replacementlen; /* length of new instruction, <= instrlen */ - u8 pad; -}; - -extern void alternative_instructions(void); -extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); - -struct module; -#ifdef CONFIG_SMP -extern void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end); -extern void alternatives_smp_module_del(struct module *mod); -extern void alternatives_smp_switch(int smp); -#else -static inline void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end) {} -static inline void alternatives_smp_module_del(struct module *mod) {} -static inline void alternatives_smp_switch(int smp) {} -#endif /* CONFIG_SMP */ - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */\ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Pecularities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement maake sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */\ - ".previous" :: "i" (feature), ##input) - -/* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c[feat]\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" : output : [feat] "i" (feature), ##input) - -/* - * use this macro(s) if you need more than one output parameter - * in alternative_io - */ -#define ASM_OUTPUT2(a, b) a, b - -/* - * Alternative inline assembly for SMP. - * - * The LOCK_PREFIX macro defined here replaces the LOCK and - * LOCK_PREFIX macros used everywhere in the source tree. - * - * SMP alternatives use the same data structures as the other - * alternatives and the X86_FEATURE_UP flag to indicate the case of a - * UP system running a SMP kernel. The existing apply_alternatives() - * works fine for patching a SMP kernel for UP. - * - * The SMP alternative tables can be kept after boot and contain both - * UP and SMP versions of the instructions to allow switching back to - * SMP at runtime, when hotplugging in a new CPU, which is especially - * useful in virtualized environments. - * - * The very common lock prefix is handled as special case in a - * separate table which is a pure address list without replacement ptr - * and size information. That keeps the table sizes small. - */ - -#ifdef CONFIG_SMP -#define LOCK_PREFIX \ - ".section .smp_locks,\"a\"\n" \ - " .align 4\n" \ - " .long 661f\n" /* address */ \ - ".previous\n" \ - "661:\n\tlock; " - -#else /* ! CONFIG_SMP */ -#define LOCK_PREFIX "" -#endif - -struct paravirt_patch_site; -#ifdef CONFIG_PARAVIRT -void apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end); -#else -static inline void -apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end) -{} -#define __parainstructions NULL -#define __parainstructions_end NULL -#endif - -extern void text_poke(void *addr, unsigned char *opcode, int len); - -#endif /* _I386_ALTERNATIVE_H */ diff --git a/include/asm-x86/alternative_64.h b/include/asm-x86/alternative_64.h deleted file mode 100644 index ab161e810151..000000000000 --- a/include/asm-x86/alternative_64.h +++ /dev/null @@ -1,159 +0,0 @@ -#ifndef _X86_64_ALTERNATIVE_H -#define _X86_64_ALTERNATIVE_H - -#ifdef __KERNEL__ - -#include <linux/types.h> -#include <linux/stddef.h> - -/* - * Alternative inline assembly for SMP. - * - * The LOCK_PREFIX macro defined here replaces the LOCK and - * LOCK_PREFIX macros used everywhere in the source tree. - * - * SMP alternatives use the same data structures as the other - * alternatives and the X86_FEATURE_UP flag to indicate the case of a - * UP system running a SMP kernel. The existing apply_alternatives() - * works fine for patching a SMP kernel for UP. - * - * The SMP alternative tables can be kept after boot and contain both - * UP and SMP versions of the instructions to allow switching back to - * SMP at runtime, when hotplugging in a new CPU, which is especially - * useful in virtualized environments. - * - * The very common lock prefix is handled as special case in a - * separate table which is a pure address list without replacement ptr - * and size information. That keeps the table sizes small. - */ - -#ifdef CONFIG_SMP -#define LOCK_PREFIX \ - ".section .smp_locks,\"a\"\n" \ - " .align 8\n" \ - " .quad 661f\n" /* address */ \ - ".previous\n" \ - "661:\n\tlock; " - -#else /* ! CONFIG_SMP */ -#define LOCK_PREFIX "" -#endif - -/* This must be included *after* the definition of LOCK_PREFIX */ -#include <asm/cpufeature.h> - -struct alt_instr { - u8 *instr; /* original instruction */ - u8 *replacement; - u8 cpuid; /* cpuid bit set for replacement */ - u8 instrlen; /* length of original instruction */ - u8 replacementlen; /* length of new instruction, <= instrlen */ - u8 pad[5]; -}; - -extern void alternative_instructions(void); -extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); - -struct module; - -#ifdef CONFIG_SMP -extern void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end); -extern void alternatives_smp_module_del(struct module *mod); -extern void alternatives_smp_switch(int smp); -#else -static inline void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end) {} -static inline void alternatives_smp_module_del(struct module *mod) {} -static inline void alternatives_smp_switch(int smp) {} -#endif - -#endif - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Pecularities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement make sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature), ##input) - -/* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c[feat]\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" : output : [feat] "i" (feature), ##input) - -/* - * use this macro(s) if you need more than one output parameter - * in alternative_io - */ -#define ASM_OUTPUT2(a, b) a, b - -struct paravirt_patch; -#ifdef CONFIG_PARAVIRT -void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); -#else -static inline void -apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) -{} -#define __parainstructions NULL -#define __parainstructions_end NULL -#endif - -extern void text_poke(void *addr, unsigned char *opcode, int len); - -#endif /* _X86_64_ALTERNATIVE_H */ diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 9fbcc0bd2ac4..bcfc07fd3661 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -1,5 +1,140 @@ -#ifdef CONFIG_X86_32 -# include "apic_32.h" +#ifndef _ASM_X86_APIC_H +#define _ASM_X86_APIC_H + +#include <linux/pm.h> +#include <linux/delay.h> +#include <asm/fixmap.h> +#include <asm/apicdef.h> +#include <asm/processor.h> +#include <asm/system.h> + +#define ARCH_APICTIMER_STOPS_ON_C3 1 + +#define Dprintk(x...) + +/* + * Debugging macros + */ +#define APIC_QUIET 0 +#define APIC_VERBOSE 1 +#define APIC_DEBUG 2 + +/* + * Define the default level of output to be very little + * This can be turned up by using apic=verbose for more + * information and apic=debug for _lots_ of information. + * apic_verbosity is defined in apic.c + */ +#define apic_printk(v, s, a...) do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ + } while (0) + + +extern void generic_apic_probe(void); + +#ifdef CONFIG_X86_LOCAL_APIC + +extern int apic_verbosity; +extern int timer_over_8254; +extern int local_apic_timer_c2_ok; +extern int local_apic_timer_disabled; + +extern int apic_runs_main_timer; +extern int ioapic_force; +extern int disable_apic; +extern int disable_apic_timer; +extern unsigned boot_cpu_id; + +/* + * Basic functions accessing APICs. + */ +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> #else -# include "apic_64.h" +#define apic_write native_apic_write +#define apic_write_atomic native_apic_write_atomic +#define apic_read native_apic_read +#define setup_boot_clock setup_boot_APIC_clock +#define setup_secondary_clock setup_secondary_APIC_clock #endif + +static inline void native_apic_write(unsigned long reg, u32 v) +{ + *((volatile u32 *)(APIC_BASE + reg)) = v; +} + +static inline void native_apic_write_atomic(unsigned long reg, u32 v) +{ + (void) xchg((u32*)(APIC_BASE + reg), v); +} + +static inline u32 native_apic_read(unsigned long reg) +{ + return *((volatile u32 *)(APIC_BASE + reg)); +} + +extern void apic_wait_icr_idle(void); +extern u32 safe_apic_wait_icr_idle(void); +extern int get_physical_broadcast(void); + +#ifdef CONFIG_X86_GOOD_APIC +# define FORCE_READ_AROUND_WRITE 0 +# define apic_read_around(x) +# define apic_write_around(x, y) apic_write((x), (y)) +#else +# define FORCE_READ_AROUND_WRITE 1 +# define apic_read_around(x) apic_read(x) +# define apic_write_around(x, y) apic_write_atomic((x), (y)) +#endif + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction: + * - a single rmw on Pentium/82489DX + * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) + * ... yummie. + */ + + /* Docs say use 0 for future compatibility */ + apic_write_around(APIC_EOI, 0); +} + +extern int lapic_get_maxlvt(void); +extern void clear_local_APIC(void); +extern void connect_bsp_APIC(void); +extern void disconnect_bsp_APIC(int virt_wire_setup); +extern void disable_local_APIC(void); +extern void lapic_shutdown(void); +extern int verify_local_APIC(void); +extern void cache_APIC_registers(void); +extern void sync_Arb_IDs(void); +extern void init_bsp_APIC(void); +extern void setup_local_APIC(void); +extern void end_local_APIC_setup(void); +extern void init_apic_mappings(void); +extern void setup_boot_APIC_clock(void); +extern void setup_secondary_APIC_clock(void); +extern int APIC_init_uniprocessor(void); +extern void enable_NMI_through_LVT0(void); + +/* + * On 32bit this is mach-xxx local + */ +#ifdef CONFIG_X86_64 +extern void setup_apic_routing(void); +#endif + +extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); +extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); + +extern int apic_is_clustered_box(void); + +#else /* !CONFIG_X86_LOCAL_APIC */ +static inline void lapic_shutdown(void) { } +#define local_apic_timer_c2_ok 1 + +#endif /* !CONFIG_X86_LOCAL_APIC */ + +#endif /* __ASM_APIC_H */ diff --git a/include/asm-x86/apic_32.h b/include/asm-x86/apic_32.h deleted file mode 100644 index be158b27d54b..000000000000 --- a/include/asm-x86/apic_32.h +++ /dev/null @@ -1,127 +0,0 @@ -#ifndef __ASM_APIC_H -#define __ASM_APIC_H - -#include <linux/pm.h> -#include <linux/delay.h> -#include <asm/fixmap.h> -#include <asm/apicdef.h> -#include <asm/processor.h> -#include <asm/system.h> - -#define Dprintk(x...) - -/* - * Debugging macros - */ -#define APIC_QUIET 0 -#define APIC_VERBOSE 1 -#define APIC_DEBUG 2 - -extern int apic_verbosity; - -/* - * Define the default level of output to be very little - * This can be turned up by using apic=verbose for more - * information and apic=debug for _lots_ of information. - * apic_verbosity is defined in apic.c - */ -#define apic_printk(v, s, a...) do { \ - if ((v) <= apic_verbosity) \ - printk(s, ##a); \ - } while (0) - - -extern void generic_apic_probe(void); - -#ifdef CONFIG_X86_LOCAL_APIC - -/* - * Basic functions accessing APICs. - */ -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define apic_write native_apic_write -#define apic_write_atomic native_apic_write_atomic -#define apic_read native_apic_read -#define setup_boot_clock setup_boot_APIC_clock -#define setup_secondary_clock setup_secondary_APIC_clock -#endif - -static __inline fastcall void native_apic_write(unsigned long reg, - unsigned long v) -{ - *((volatile unsigned long *)(APIC_BASE+reg)) = v; -} - -static __inline fastcall void native_apic_write_atomic(unsigned long reg, - unsigned long v) -{ - xchg((volatile unsigned long *)(APIC_BASE+reg), v); -} - -static __inline fastcall unsigned long native_apic_read(unsigned long reg) -{ - return *((volatile unsigned long *)(APIC_BASE+reg)); -} - -void apic_wait_icr_idle(void); -unsigned long safe_apic_wait_icr_idle(void); -int get_physical_broadcast(void); - -#ifdef CONFIG_X86_GOOD_APIC -# define FORCE_READ_AROUND_WRITE 0 -# define apic_read_around(x) -# define apic_write_around(x,y) apic_write((x),(y)) -#else -# define FORCE_READ_AROUND_WRITE 1 -# define apic_read_around(x) apic_read(x) -# define apic_write_around(x,y) apic_write_atomic((x),(y)) -#endif - -static inline void ack_APIC_irq(void) -{ - /* - * ack_APIC_irq() actually gets compiled as a single instruction: - * - a single rmw on Pentium/82489DX - * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) - * ... yummie. - */ - - /* Docs say use 0 for future compatibility */ - apic_write_around(APIC_EOI, 0); -} - -extern int lapic_get_maxlvt(void); -extern void clear_local_APIC(void); -extern void connect_bsp_APIC (void); -extern void disconnect_bsp_APIC (int virt_wire_setup); -extern void disable_local_APIC (void); -extern void lapic_shutdown (void); -extern int verify_local_APIC (void); -extern void cache_APIC_registers (void); -extern void sync_Arb_IDs (void); -extern void init_bsp_APIC (void); -extern void setup_local_APIC (void); -extern void init_apic_mappings (void); -extern void smp_local_timer_interrupt (void); -extern void setup_boot_APIC_clock (void); -extern void setup_secondary_APIC_clock (void); -extern int APIC_init_uniprocessor (void); - -extern void enable_NMI_through_LVT0 (void * dummy); - -#define ARCH_APICTIMER_STOPS_ON_C3 1 - -extern int timer_over_8254; -extern int local_apic_timer_c2_ok; - -extern int local_apic_timer_disabled; - -#else /* !CONFIG_X86_LOCAL_APIC */ -static inline void lapic_shutdown(void) { } -#define local_apic_timer_c2_ok 1 - -#endif /* !CONFIG_X86_LOCAL_APIC */ - -#endif /* __ASM_APIC_H */ diff --git a/include/asm-x86/apic_64.h b/include/asm-x86/apic_64.h deleted file mode 100644 index 2747a11a2b19..000000000000 --- a/include/asm-x86/apic_64.h +++ /dev/null @@ -1,102 +0,0 @@ -#ifndef __ASM_APIC_H -#define __ASM_APIC_H - -#include <linux/pm.h> -#include <linux/delay.h> -#include <asm/fixmap.h> -#include <asm/apicdef.h> -#include <asm/system.h> - -#define Dprintk(x...) - -/* - * Debugging macros - */ -#define APIC_QUIET 0 -#define APIC_VERBOSE 1 -#define APIC_DEBUG 2 - -extern int apic_verbosity; -extern int apic_runs_main_timer; -extern int ioapic_force; -extern int disable_apic_timer; - -/* - * Define the default level of output to be very little - * This can be turned up by using apic=verbose for more - * information and apic=debug for _lots_ of information. - * apic_verbosity is defined in apic.c - */ -#define apic_printk(v, s, a...) do { \ - if ((v) <= apic_verbosity) \ - printk(s, ##a); \ - } while (0) - -struct pt_regs; - -/* - * Basic functions accessing APICs. - */ - -static __inline void apic_write(unsigned long reg, unsigned int v) -{ - *((volatile unsigned int *)(APIC_BASE+reg)) = v; -} - -static __inline unsigned int apic_read(unsigned long reg) -{ - return *((volatile unsigned int *)(APIC_BASE+reg)); -} - -extern void apic_wait_icr_idle(void); -extern unsigned int safe_apic_wait_icr_idle(void); - -static inline void ack_APIC_irq(void) -{ - /* - * ack_APIC_irq() actually gets compiled as a single instruction: - * - a single rmw on Pentium/82489DX - * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) - * ... yummie. - */ - - /* Docs say use 0 for future compatibility */ - apic_write(APIC_EOI, 0); -} - -extern int get_maxlvt (void); -extern void clear_local_APIC (void); -extern void connect_bsp_APIC (void); -extern void disconnect_bsp_APIC (int virt_wire_setup); -extern void disable_local_APIC (void); -extern void lapic_shutdown (void); -extern int verify_local_APIC (void); -extern void cache_APIC_registers (void); -extern void sync_Arb_IDs (void); -extern void init_bsp_APIC (void); -extern void setup_local_APIC (void); -extern void init_apic_mappings (void); -extern void smp_local_timer_interrupt (void); -extern void setup_boot_APIC_clock (void); -extern void setup_secondary_APIC_clock (void); -extern int APIC_init_uniprocessor (void); -extern void setup_apic_routing(void); - -extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, - unsigned char msg_type, unsigned char mask); - -extern int apic_is_clustered_box(void); - -#define K8_APIC_EXT_LVT_BASE 0x500 -#define K8_APIC_EXT_INT_MSG_FIX 0x0 -#define K8_APIC_EXT_INT_MSG_SMI 0x2 -#define K8_APIC_EXT_INT_MSG_NMI 0x4 -#define K8_APIC_EXT_INT_MSG_EXT 0x7 -#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 - -#define ARCH_APICTIMER_STOPS_ON_C3 1 - -extern unsigned boot_cpu_id; -extern int local_apic_timer_c2_ok; - -#endif /* __ASM_APIC_H */ diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 4542c220bf4d..550af7a6f88e 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -1,5 +1,413 @@ +#ifndef _ASM_X86_APICDEF_H +#define _ASM_X86_APICDEF_H + +/* + * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) + * + * Alan Cox <Alan.Cox@linux.org>, 1995. + * Ingo Molnar <mingo@redhat.com>, 1999, 2000 + */ + +#define APIC_DEFAULT_PHYS_BASE 0xfee00000 + +#define APIC_ID 0x20 + +#ifdef CONFIG_X86_64 +# define APIC_ID_MASK (0xFFu<<24) +# define GET_APIC_ID(x) (((x)>>24)&0xFFu) +# define SET_APIC_ID(x) (((x)<<24)) +#endif + +#define APIC_LVR 0x30 +#define APIC_LVR_MASK 0xFF00FF +#define GET_APIC_VERSION(x) ((x)&0xFFu) +#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) +#define APIC_INTEGRATED(x) ((x)&0xF0u) +#define APIC_XAPIC(x) ((x) >= 0x14) +#define APIC_TASKPRI 0x80 +#define APIC_TPRI_MASK 0xFFu +#define APIC_ARBPRI 0x90 +#define APIC_ARBPRI_MASK 0xFFu +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_EIO_ACK 0x0 +#define APIC_RRR 0xC0 +#define APIC_LDR 0xD0 +#define APIC_LDR_MASK (0xFFu<<24) +#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) +#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) +#define APIC_ALL_CPUS 0xFFu +#define APIC_DFR 0xE0 +#define APIC_DFR_CLUSTER 0x0FFFFFFFul +#define APIC_DFR_FLAT 0xFFFFFFFFul +#define APIC_SPIV 0xF0 +#define APIC_SPIV_FOCUS_DISABLED (1<<9) +#define APIC_SPIV_APIC_ENABLED (1<<8) +#define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ +#define APIC_TMR 0x180 +#define APIC_IRR 0x200 +#define APIC_ESR 0x280 +#define APIC_ESR_SEND_CS 0x00001 +#define APIC_ESR_RECV_CS 0x00002 +#define APIC_ESR_SEND_ACC 0x00004 +#define APIC_ESR_RECV_ACC 0x00008 +#define APIC_ESR_SENDILL 0x00020 +#define APIC_ESR_RECVILL 0x00040 +#define APIC_ESR_ILLREGA 0x00080 +#define APIC_ICR 0x300 +#define APIC_DEST_SELF 0x40000 +#define APIC_DEST_ALLINC 0x80000 +#define APIC_DEST_ALLBUT 0xC0000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 +#define APIC_DEST_LOGICAL 0x00800 +#define APIC_DEST_PHYSICAL 0x00000 +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF +#define APIC_ICR2 0x310 +#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) +#define SET_APIC_DEST_FIELD(x) ((x)<<24) +#define APIC_LVTT 0x320 +#define APIC_LVTTHMR 0x330 +#define APIC_LVTPC 0x340 +#define APIC_LVT0 0x350 +#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) +#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) +#define SET_APIC_TIMER_BASE(x) (((x)<<18)) +#define APIC_TIMER_BASE_CLKIN 0x0 +#define APIC_TIMER_BASE_TMBASE 0x1 +#define APIC_TIMER_BASE_DIV 0x2 +#define APIC_LVT_TIMER_PERIODIC (1<<17) +#define APIC_LVT_MASKED (1<<16) +#define APIC_LVT_LEVEL_TRIGGER (1<<15) +#define APIC_LVT_REMOTE_IRR (1<<14) +#define APIC_INPUT_POLARITY (1<<13) +#define APIC_SEND_PENDING (1<<12) +#define APIC_MODE_MASK 0x700 +#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) +#define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8)) +#define APIC_MODE_FIXED 0x0 +#define APIC_MODE_NMI 0x4 +#define APIC_MODE_EXTINT 0x7 +#define APIC_LVT1 0x360 +#define APIC_LVTERR 0x370 +#define APIC_TMICT 0x380 +#define APIC_TMCCT 0x390 +#define APIC_TDCR 0x3E0 +#define APIC_TDR_DIV_TMBASE (1<<2) +#define APIC_TDR_DIV_1 0xB +#define APIC_TDR_DIV_2 0x0 +#define APIC_TDR_DIV_4 0x1 +#define APIC_TDR_DIV_8 0x2 +#define APIC_TDR_DIV_16 0x3 +#define APIC_TDR_DIV_32 0x8 +#define APIC_TDR_DIV_64 0x9 +#define APIC_TDR_DIV_128 0xA +#define APIC_EILVT0 0x500 +#define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */ +#define APIC_EILVT_NR_AMD_10H 4 +#define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF) +#define APIC_EILVT_MSG_FIX 0x0 +#define APIC_EILVT_MSG_SMI 0x2 +#define APIC_EILVT_MSG_NMI 0x4 +#define APIC_EILVT_MSG_EXT 0x7 +#define APIC_EILVT_MASKED (1<<16) +#define APIC_EILVT1 0x510 +#define APIC_EILVT2 0x520 +#define APIC_EILVT3 0x530 + +#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) + #ifdef CONFIG_X86_32 -# include "apicdef_32.h" +# define MAX_IO_APICS 64 #else -# include "apicdef_64.h" +# define MAX_IO_APICS 128 +# define MAX_LOCAL_APIC 256 +#endif + +/* + * All x86-64 systems are xAPIC compatible. + * In the following, "apicid" is a physical APIC ID. + */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) +#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) +#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) +#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) +#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) + +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved[4]; } __reserved_02; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { /* LVT - Thermal Sensor */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_thermal; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; + +} __attribute__ ((packed)); + +#undef u32 + +#define BAD_APICID 0xFFu + #endif diff --git a/include/asm-x86/apicdef_32.h b/include/asm-x86/apicdef_32.h deleted file mode 100644 index 9f6995341fdc..000000000000 --- a/include/asm-x86/apicdef_32.h +++ /dev/null @@ -1,375 +0,0 @@ -#ifndef __ASM_APICDEF_H -#define __ASM_APICDEF_H - -/* - * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) - * - * Alan Cox <Alan.Cox@linux.org>, 1995. - * Ingo Molnar <mingo@redhat.com>, 1999, 2000 - */ - -#define APIC_DEFAULT_PHYS_BASE 0xfee00000 - -#define APIC_ID 0x20 -#define APIC_LVR 0x30 -#define APIC_LVR_MASK 0xFF00FF -#define GET_APIC_VERSION(x) ((x)&0xFF) -#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) -#define APIC_INTEGRATED(x) ((x)&0xF0) -#define APIC_XAPIC(x) ((x) >= 0x14) -#define APIC_TASKPRI 0x80 -#define APIC_TPRI_MASK 0xFF -#define APIC_ARBPRI 0x90 -#define APIC_ARBPRI_MASK 0xFF -#define APIC_PROCPRI 0xA0 -#define APIC_EOI 0xB0 -#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ -#define APIC_RRR 0xC0 -#define APIC_LDR 0xD0 -#define APIC_LDR_MASK (0xFF<<24) -#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) -#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) -#define APIC_ALL_CPUS 0xFF -#define APIC_DFR 0xE0 -#define APIC_DFR_CLUSTER 0x0FFFFFFFul -#define APIC_DFR_FLAT 0xFFFFFFFFul -#define APIC_SPIV 0xF0 -#define APIC_SPIV_FOCUS_DISABLED (1<<9) -#define APIC_SPIV_APIC_ENABLED (1<<8) -#define APIC_ISR 0x100 -#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ -#define APIC_TMR 0x180 -#define APIC_IRR 0x200 -#define APIC_ESR 0x280 -#define APIC_ESR_SEND_CS 0x00001 -#define APIC_ESR_RECV_CS 0x00002 -#define APIC_ESR_SEND_ACC 0x00004 -#define APIC_ESR_RECV_ACC 0x00008 -#define APIC_ESR_SENDILL 0x00020 -#define APIC_ESR_RECVILL 0x00040 -#define APIC_ESR_ILLREGA 0x00080 -#define APIC_ICR 0x300 -#define APIC_DEST_SELF 0x40000 -#define APIC_DEST_ALLINC 0x80000 -#define APIC_DEST_ALLBUT 0xC0000 -#define APIC_ICR_RR_MASK 0x30000 -#define APIC_ICR_RR_INVALID 0x00000 -#define APIC_ICR_RR_INPROG 0x10000 -#define APIC_ICR_RR_VALID 0x20000 -#define APIC_INT_LEVELTRIG 0x08000 -#define APIC_INT_ASSERT 0x04000 -#define APIC_ICR_BUSY 0x01000 -#define APIC_DEST_LOGICAL 0x00800 -#define APIC_DM_FIXED 0x00000 -#define APIC_DM_LOWEST 0x00100 -#define APIC_DM_SMI 0x00200 -#define APIC_DM_REMRD 0x00300 -#define APIC_DM_NMI 0x00400 -#define APIC_DM_INIT 0x00500 -#define APIC_DM_STARTUP 0x00600 -#define APIC_DM_EXTINT 0x00700 -#define APIC_VECTOR_MASK 0x000FF -#define APIC_ICR2 0x310 -#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) -#define SET_APIC_DEST_FIELD(x) ((x)<<24) -#define APIC_LVTT 0x320 -#define APIC_LVTTHMR 0x330 -#define APIC_LVTPC 0x340 -#define APIC_LVT0 0x350 -#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) -#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) -#define SET_APIC_TIMER_BASE(x) (((x)<<18)) -#define APIC_TIMER_BASE_CLKIN 0x0 -#define APIC_TIMER_BASE_TMBASE 0x1 -#define APIC_TIMER_BASE_DIV 0x2 -#define APIC_LVT_TIMER_PERIODIC (1<<17) -#define APIC_LVT_MASKED (1<<16) -#define APIC_LVT_LEVEL_TRIGGER (1<<15) -#define APIC_LVT_REMOTE_IRR (1<<14) -#define APIC_INPUT_POLARITY (1<<13) -#define APIC_SEND_PENDING (1<<12) -#define APIC_MODE_MASK 0x700 -#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) -#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) -#define APIC_MODE_FIXED 0x0 -#define APIC_MODE_NMI 0x4 -#define APIC_MODE_EXTINT 0x7 -#define APIC_LVT1 0x360 -#define APIC_LVTERR 0x370 -#define APIC_TMICT 0x380 -#define APIC_TMCCT 0x390 -#define APIC_TDCR 0x3E0 -#define APIC_TDR_DIV_TMBASE (1<<2) -#define APIC_TDR_DIV_1 0xB -#define APIC_TDR_DIV_2 0x0 -#define APIC_TDR_DIV_4 0x1 -#define APIC_TDR_DIV_8 0x2 -#define APIC_TDR_DIV_16 0x3 -#define APIC_TDR_DIV_32 0x8 -#define APIC_TDR_DIV_64 0x9 -#define APIC_TDR_DIV_128 0xA - -#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) - -#define MAX_IO_APICS 64 - -/* - * the local APIC register structure, memory mapped. Not terribly well - * tested, but we might eventually use this one in the future - the - * problem why we cannot use it right now is the P5 APIC, it has an - * errata which cannot take 8-bit reads and writes, only 32-bit ones ... - */ -#define u32 unsigned int - - -struct local_apic { - -/*000*/ struct { u32 __reserved[4]; } __reserved_01; - -/*010*/ struct { u32 __reserved[4]; } __reserved_02; - -/*020*/ struct { /* APIC ID Register */ - u32 __reserved_1 : 24, - phys_apic_id : 4, - __reserved_2 : 4; - u32 __reserved[3]; - } id; - -/*030*/ const - struct { /* APIC Version Register */ - u32 version : 8, - __reserved_1 : 8, - max_lvt : 8, - __reserved_2 : 8; - u32 __reserved[3]; - } version; - -/*040*/ struct { u32 __reserved[4]; } __reserved_03; - -/*050*/ struct { u32 __reserved[4]; } __reserved_04; - -/*060*/ struct { u32 __reserved[4]; } __reserved_05; - -/*070*/ struct { u32 __reserved[4]; } __reserved_06; - -/*080*/ struct { /* Task Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } tpr; - -/*090*/ const - struct { /* Arbitration Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } apr; - -/*0A0*/ const - struct { /* Processor Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } ppr; - -/*0B0*/ struct { /* End Of Interrupt Register */ - u32 eoi; - u32 __reserved[3]; - } eoi; - -/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; - -/*0D0*/ struct { /* Logical Destination Register */ - u32 __reserved_1 : 24, - logical_dest : 8; - u32 __reserved_2[3]; - } ldr; - -/*0E0*/ struct { /* Destination Format Register */ - u32 __reserved_1 : 28, - model : 4; - u32 __reserved_2[3]; - } dfr; - -/*0F0*/ struct { /* Spurious Interrupt Vector Register */ - u32 spurious_vector : 8, - apic_enabled : 1, - focus_cpu : 1, - __reserved_2 : 22; - u32 __reserved_3[3]; - } svr; - -/*100*/ struct { /* In Service Register */ -/*170*/ u32 bitfield; - u32 __reserved[3]; - } isr [8]; - -/*180*/ struct { /* Trigger Mode Register */ -/*1F0*/ u32 bitfield; - u32 __reserved[3]; - } tmr [8]; - -/*200*/ struct { /* Interrupt Request Register */ -/*270*/ u32 bitfield; - u32 __reserved[3]; - } irr [8]; - -/*280*/ union { /* Error Status Register */ - struct { - u32 send_cs_error : 1, - receive_cs_error : 1, - send_accept_error : 1, - receive_accept_error : 1, - __reserved_1 : 1, - send_illegal_vector : 1, - receive_illegal_vector : 1, - illegal_register_address : 1, - __reserved_2 : 24; - u32 __reserved_3[3]; - } error_bits; - struct { - u32 errors; - u32 __reserved_3[3]; - } all_errors; - } esr; - -/*290*/ struct { u32 __reserved[4]; } __reserved_08; - -/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; - -/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; - -/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; - -/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; - -/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; - -/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; - -/*300*/ struct { /* Interrupt Command Register 1 */ - u32 vector : 8, - delivery_mode : 3, - destination_mode : 1, - delivery_status : 1, - __reserved_1 : 1, - level : 1, - trigger : 1, - __reserved_2 : 2, - shorthand : 2, - __reserved_3 : 12; - u32 __reserved_4[3]; - } icr1; - -/*310*/ struct { /* Interrupt Command Register 2 */ - union { - u32 __reserved_1 : 24, - phys_dest : 4, - __reserved_2 : 4; - u32 __reserved_3 : 24, - logical_dest : 8; - } dest; - u32 __reserved_4[3]; - } icr2; - -/*320*/ struct { /* LVT - Timer */ - u32 vector : 8, - __reserved_1 : 4, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - timer_mode : 1, - __reserved_3 : 14; - u32 __reserved_4[3]; - } lvt_timer; - -/*330*/ struct { /* LVT - Thermal Sensor */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_thermal; - -/*340*/ struct { /* LVT - Performance Counter */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_pc; - -/*350*/ struct { /* LVT - LINT0 */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - polarity : 1, - remote_irr : 1, - trigger : 1, - mask : 1, - __reserved_2 : 15; - u32 __reserved_3[3]; - } lvt_lint0; - -/*360*/ struct { /* LVT - LINT1 */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - polarity : 1, - remote_irr : 1, - trigger : 1, - mask : 1, - __reserved_2 : 15; - u32 __reserved_3[3]; - } lvt_lint1; - -/*370*/ struct { /* LVT - Error */ - u32 vector : 8, - __reserved_1 : 4, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_error; - -/*380*/ struct { /* Timer Initial Count Register */ - u32 initial_count; - u32 __reserved_2[3]; - } timer_icr; - -/*390*/ const - struct { /* Timer Current Count Register */ - u32 curr_count; - u32 __reserved_2[3]; - } timer_ccr; - -/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; - -/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; - -/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; - -/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; - -/*3E0*/ struct { /* Timer Divide Configuration Register */ - u32 divisor : 4, - __reserved_1 : 28; - u32 __reserved_2[3]; - } timer_dcr; - -/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; - -} __attribute__ ((packed)); - -#undef u32 - -#endif diff --git a/include/asm-x86/apicdef_64.h b/include/asm-x86/apicdef_64.h deleted file mode 100644 index 1dd40067c67c..000000000000 --- a/include/asm-x86/apicdef_64.h +++ /dev/null @@ -1,392 +0,0 @@ -#ifndef __ASM_APICDEF_H -#define __ASM_APICDEF_H - -/* - * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) - * - * Alan Cox <Alan.Cox@linux.org>, 1995. - * Ingo Molnar <mingo@redhat.com>, 1999, 2000 - */ - -#define APIC_DEFAULT_PHYS_BASE 0xfee00000 - -#define APIC_ID 0x20 -#define APIC_ID_MASK (0xFFu<<24) -#define GET_APIC_ID(x) (((x)>>24)&0xFFu) -#define SET_APIC_ID(x) (((x)<<24)) -#define APIC_LVR 0x30 -#define APIC_LVR_MASK 0xFF00FF -#define GET_APIC_VERSION(x) ((x)&0xFFu) -#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) -#define APIC_INTEGRATED(x) ((x)&0xF0u) -#define APIC_TASKPRI 0x80 -#define APIC_TPRI_MASK 0xFFu -#define APIC_ARBPRI 0x90 -#define APIC_ARBPRI_MASK 0xFFu -#define APIC_PROCPRI 0xA0 -#define APIC_EOI 0xB0 -#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ -#define APIC_RRR 0xC0 -#define APIC_LDR 0xD0 -#define APIC_LDR_MASK (0xFFu<<24) -#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) -#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) -#define APIC_ALL_CPUS 0xFFu -#define APIC_DFR 0xE0 -#define APIC_DFR_CLUSTER 0x0FFFFFFFul -#define APIC_DFR_FLAT 0xFFFFFFFFul -#define APIC_SPIV 0xF0 -#define APIC_SPIV_FOCUS_DISABLED (1<<9) -#define APIC_SPIV_APIC_ENABLED (1<<8) -#define APIC_ISR 0x100 -#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ -#define APIC_TMR 0x180 -#define APIC_IRR 0x200 -#define APIC_ESR 0x280 -#define APIC_ESR_SEND_CS 0x00001 -#define APIC_ESR_RECV_CS 0x00002 -#define APIC_ESR_SEND_ACC 0x00004 -#define APIC_ESR_RECV_ACC 0x00008 -#define APIC_ESR_SENDILL 0x00020 -#define APIC_ESR_RECVILL 0x00040 -#define APIC_ESR_ILLREGA 0x00080 -#define APIC_ICR 0x300 -#define APIC_DEST_SELF 0x40000 -#define APIC_DEST_ALLINC 0x80000 -#define APIC_DEST_ALLBUT 0xC0000 -#define APIC_ICR_RR_MASK 0x30000 -#define APIC_ICR_RR_INVALID 0x00000 -#define APIC_ICR_RR_INPROG 0x10000 -#define APIC_ICR_RR_VALID 0x20000 -#define APIC_INT_LEVELTRIG 0x08000 -#define APIC_INT_ASSERT 0x04000 -#define APIC_ICR_BUSY 0x01000 -#define APIC_DEST_LOGICAL 0x00800 -#define APIC_DEST_PHYSICAL 0x00000 -#define APIC_DM_FIXED 0x00000 -#define APIC_DM_LOWEST 0x00100 -#define APIC_DM_SMI 0x00200 -#define APIC_DM_REMRD 0x00300 -#define APIC_DM_NMI 0x00400 -#define APIC_DM_INIT 0x00500 -#define APIC_DM_STARTUP 0x00600 -#define APIC_DM_EXTINT 0x00700 -#define APIC_VECTOR_MASK 0x000FF -#define APIC_ICR2 0x310 -#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) -#define SET_APIC_DEST_FIELD(x) ((x)<<24) -#define APIC_LVTT 0x320 -#define APIC_LVTTHMR 0x330 -#define APIC_LVTPC 0x340 -#define APIC_LVT0 0x350 -#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) -#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) -#define SET_APIC_TIMER_BASE(x) (((x)<<18)) -#define APIC_TIMER_BASE_CLKIN 0x0 -#define APIC_TIMER_BASE_TMBASE 0x1 -#define APIC_TIMER_BASE_DIV 0x2 -#define APIC_LVT_TIMER_PERIODIC (1<<17) -#define APIC_LVT_MASKED (1<<16) -#define APIC_LVT_LEVEL_TRIGGER (1<<15) -#define APIC_LVT_REMOTE_IRR (1<<14) -#define APIC_INPUT_POLARITY (1<<13) -#define APIC_SEND_PENDING (1<<12) -#define APIC_MODE_MASK 0x700 -#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) -#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) -#define APIC_MODE_FIXED 0x0 -#define APIC_MODE_NMI 0x4 -#define APIC_MODE_EXTINT 0x7 -#define APIC_LVT1 0x360 -#define APIC_LVTERR 0x370 -#define APIC_TMICT 0x380 -#define APIC_TMCCT 0x390 -#define APIC_TDCR 0x3E0 -#define APIC_TDR_DIV_TMBASE (1<<2) -#define APIC_TDR_DIV_1 0xB -#define APIC_TDR_DIV_2 0x0 -#define APIC_TDR_DIV_4 0x1 -#define APIC_TDR_DIV_8 0x2 -#define APIC_TDR_DIV_16 0x3 -#define APIC_TDR_DIV_32 0x8 -#define APIC_TDR_DIV_64 0x9 -#define APIC_TDR_DIV_128 0xA - -#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) - -#define MAX_IO_APICS 128 -#define MAX_LOCAL_APIC 256 - -/* - * All x86-64 systems are xAPIC compatible. - * In the following, "apicid" is a physical APIC ID. - */ -#define XAPIC_DEST_CPUS_SHIFT 4 -#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) -#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) -#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) -#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) -#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) -#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) - -/* - * the local APIC register structure, memory mapped. Not terribly well - * tested, but we might eventually use this one in the future - the - * problem why we cannot use it right now is the P5 APIC, it has an - * errata which cannot take 8-bit reads and writes, only 32-bit ones ... - */ -#define u32 unsigned int - -struct local_apic { - -/*000*/ struct { u32 __reserved[4]; } __reserved_01; - -/*010*/ struct { u32 __reserved[4]; } __reserved_02; - -/*020*/ struct { /* APIC ID Register */ - u32 __reserved_1 : 24, - phys_apic_id : 4, - __reserved_2 : 4; - u32 __reserved[3]; - } id; - -/*030*/ const - struct { /* APIC Version Register */ - u32 version : 8, - __reserved_1 : 8, - max_lvt : 8, - __reserved_2 : 8; - u32 __reserved[3]; - } version; - -/*040*/ struct { u32 __reserved[4]; } __reserved_03; - -/*050*/ struct { u32 __reserved[4]; } __reserved_04; - -/*060*/ struct { u32 __reserved[4]; } __reserved_05; - -/*070*/ struct { u32 __reserved[4]; } __reserved_06; - -/*080*/ struct { /* Task Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } tpr; - -/*090*/ const - struct { /* Arbitration Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } apr; - -/*0A0*/ const - struct { /* Processor Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } ppr; - -/*0B0*/ struct { /* End Of Interrupt Register */ - u32 eoi; - u32 __reserved[3]; - } eoi; - -/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; - -/*0D0*/ struct { /* Logical Destination Register */ - u32 __reserved_1 : 24, - logical_dest : 8; - u32 __reserved_2[3]; - } ldr; - -/*0E0*/ struct { /* Destination Format Register */ - u32 __reserved_1 : 28, - model : 4; - u32 __reserved_2[3]; - } dfr; - -/*0F0*/ struct { /* Spurious Interrupt Vector Register */ - u32 spurious_vector : 8, - apic_enabled : 1, - focus_cpu : 1, - __reserved_2 : 22; - u32 __reserved_3[3]; - } svr; - -/*100*/ struct { /* In Service Register */ -/*170*/ u32 bitfield; - u32 __reserved[3]; - } isr [8]; - -/*180*/ struct { /* Trigger Mode Register */ -/*1F0*/ u32 bitfield; - u32 __reserved[3]; - } tmr [8]; - -/*200*/ struct { /* Interrupt Request Register */ -/*270*/ u32 bitfield; - u32 __reserved[3]; - } irr [8]; - -/*280*/ union { /* Error Status Register */ - struct { - u32 send_cs_error : 1, - receive_cs_error : 1, - send_accept_error : 1, - receive_accept_error : 1, - __reserved_1 : 1, - send_illegal_vector : 1, - receive_illegal_vector : 1, - illegal_register_address : 1, - __reserved_2 : 24; - u32 __reserved_3[3]; - } error_bits; - struct { - u32 errors; - u32 __reserved_3[3]; - } all_errors; - } esr; - -/*290*/ struct { u32 __reserved[4]; } __reserved_08; - -/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; - -/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; - -/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; - -/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; - -/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; - -/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; - -/*300*/ struct { /* Interrupt Command Register 1 */ - u32 vector : 8, - delivery_mode : 3, - destination_mode : 1, - delivery_status : 1, - __reserved_1 : 1, - level : 1, - trigger : 1, - __reserved_2 : 2, - shorthand : 2, - __reserved_3 : 12; - u32 __reserved_4[3]; - } icr1; - -/*310*/ struct { /* Interrupt Command Register 2 */ - union { - u32 __reserved_1 : 24, - phys_dest : 4, - __reserved_2 : 4; - u32 __reserved_3 : 24, - logical_dest : 8; - } dest; - u32 __reserved_4[3]; - } icr2; - -/*320*/ struct { /* LVT - Timer */ - u32 vector : 8, - __reserved_1 : 4, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - timer_mode : 1, - __reserved_3 : 14; - u32 __reserved_4[3]; - } lvt_timer; - -/*330*/ struct { /* LVT - Thermal Sensor */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_thermal; - -/*340*/ struct { /* LVT - Performance Counter */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_pc; - -/*350*/ struct { /* LVT - LINT0 */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - polarity : 1, - remote_irr : 1, - trigger : 1, - mask : 1, - __reserved_2 : 15; - u32 __reserved_3[3]; - } lvt_lint0; - -/*360*/ struct { /* LVT - LINT1 */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - polarity : 1, - remote_irr : 1, - trigger : 1, - mask : 1, - __reserved_2 : 15; - u32 __reserved_3[3]; - } lvt_lint1; - -/*370*/ struct { /* LVT - Error */ - u32 vector : 8, - __reserved_1 : 4, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_error; - -/*380*/ struct { /* Timer Initial Count Register */ - u32 initial_count; - u32 __reserved_2[3]; - } timer_icr; - -/*390*/ const - struct { /* Timer Current Count Register */ - u32 curr_count; - u32 __reserved_2[3]; - } timer_ccr; - -/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; - -/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; - -/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; - -/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; - -/*3E0*/ struct { /* Timer Divide Configuration Register */ - u32 divisor : 4, - __reserved_1 : 28; - u32 __reserved_2[3]; - } timer_dcr; - -/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; - -} __attribute__ ((packed)); - -#undef u32 - -#define BAD_APICID 0xFFu - -#endif diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h index a8c1fca9726d..768aee8a04ef 100644 --- a/include/asm-x86/arch_hooks.h +++ b/include/asm-x86/arch_hooks.h @@ -6,7 +6,7 @@ /* * linux/include/asm/arch_hooks.h * - * define the architecture specific hooks + * define the architecture specific hooks */ /* these aren't arch hooks, they are generic routines @@ -24,7 +24,4 @@ extern void trap_init_hook(void); extern void time_init_hook(void); extern void mca_nmi_hook(void); -extern int setup_early_printk(char *); -extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); - #endif diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h new file mode 100644 index 000000000000..1a6980a60fc6 --- /dev/null +++ b/include/asm-x86/asm.h @@ -0,0 +1,32 @@ +#ifndef _ASM_X86_ASM_H +#define _ASM_X86_ASM_H + +#ifdef CONFIG_X86_32 +/* 32 bits */ + +# define _ASM_PTR " .long " +# define _ASM_ALIGN " .balign 4 " +# define _ASM_MOV_UL " movl " + +# define _ASM_INC " incl " +# define _ASM_DEC " decl " +# define _ASM_ADD " addl " +# define _ASM_SUB " subl " +# define _ASM_XADD " xaddl " + +#else +/* 64 bits */ + +# define _ASM_PTR " .quad " +# define _ASM_ALIGN " .balign 8 " +# define _ASM_MOV_UL " movq " + +# define _ASM_INC " incq " +# define _ASM_DEC " decq " +# define _ASM_ADD " addq " +# define _ASM_SUB " subq " +# define _ASM_XADD " xaddq " + +#endif /* CONFIG_X86_32 */ + +#endif /* _ASM_X86_ASM_H */ diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 07e3f6d4fe47..1a23ce1a5697 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -1,5 +1,321 @@ +#ifndef _ASM_X86_BITOPS_H +#define _ASM_X86_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + */ + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <linux/compiler.h> +#include <asm/alternative.h> + +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ + +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) +/* Technically wrong, but this avoids compilation errors on some gcc + versions. */ +#define ADDR "=m" (*(volatile long *) addr) +#else +#define ADDR "+m" (*(volatile long *) addr) +#endif + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile void *addr) +{ + asm volatile(LOCK_PREFIX "bts %1,%0" + : ADDR + : "Ir" (nr) : "memory"); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile void *addr) +{ + asm volatile("bts %1,%0" + : ADDR + : "Ir" (nr) : "memory"); +} + + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile void *addr) +{ + asm volatile(LOCK_PREFIX "btr %1,%0" + : ADDR + : "Ir" (nr)); +} + +/* + * clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and implies release semantics before the memory + * operation. It can be used for an unlock. + */ +static inline void clear_bit_unlock(unsigned nr, volatile void *addr) +{ + barrier(); + clear_bit(nr, addr); +} + +static inline void __clear_bit(int nr, volatile void *addr) +{ + asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); +} + +/* + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * __clear_bit() is non-atomic and implies release semantics before the memory + * operation. It can be used for an unlock if no other CPUs can concurrently + * modify other bits in the word. + * + * No memory barrier is required here, because x86 cannot reorder stores past + * older loads. Same principle as spin_unlock. + */ +static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) +{ + barrier(); + __clear_bit(nr, addr); +} + +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile void *addr) +{ + asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile void *addr) +{ + asm volatile(LOCK_PREFIX "btc %1,%0" + : ADDR : "Ir" (nr)); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile(LOCK_PREFIX "bts %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); + + return oldbit; +} + +/** + * test_and_set_bit_lock - Set a bit and return its old value for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This is the same as test_and_set_bit on x86. + */ +static inline int test_and_set_bit_lock(int nr, volatile void *addr) +{ + return test_and_set_bit(nr, addr); +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm("bts %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + return oldbit; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile(LOCK_PREFIX "btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); + + return oldbit; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile("btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + return oldbit; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile("btc %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); + + return oldbit; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile(LOCK_PREFIX "btc %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); + + return oldbit; +} + +static inline int constant_test_bit(int nr, const volatile void *addr) +{ + return ((1UL << (nr % BITS_PER_LONG)) & + (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; +} + +static inline int variable_test_bit(int nr, volatile const void *addr) +{ + int oldbit; + + asm volatile("bt %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); + + return oldbit; +} + +#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static int test_bit(int nr, const volatile unsigned long *addr); +#endif + +#define test_bit(nr,addr) \ + (__builtin_constant_p(nr) ? \ + constant_test_bit((nr),(addr)) : \ + variable_test_bit((nr),(addr))) + +#undef ADDR + #ifdef CONFIG_X86_32 # include "bitops_32.h" #else # include "bitops_64.h" #endif + +#endif /* _ASM_X86_BITOPS_H */ diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 0b40f6d20bea..e4d75fcf9c03 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -5,320 +5,12 @@ * Copyright 1992, Linus Torvalds. */ -#ifndef _LINUX_BITOPS_H -#error only <linux/bitops.h> can be included directly -#endif - -#include <linux/compiler.h> -#include <asm/alternative.h> - -/* - * These have to be done with inline assembly: that way the bit-setting - * is guaranteed to be atomic. All bit operations return 0 if the bit - * was cleared before the operation and != 0 if it was not. - * - * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). - */ - -#define ADDR (*(volatile long *) addr) - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile unsigned long * addr) -{ - __asm__( - "btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/* - * clear_bit_unlock - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and implies release semantics before the memory - * operation. It can be used for an unlock. - */ -static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) -{ - barrier(); - clear_bit(nr, addr); -} - -static inline void __clear_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( - "btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/* - * __clear_bit_unlock - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * __clear_bit() is non-atomic and implies release semantics before the memory - * operation. It can be used for an unlock if no other CPUs can concurrently - * modify other bits in the word. - * - * No memory barrier is required here, because x86 cannot reorder stores past - * older loads. Same principle as spin_unlock. - */ -static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) -{ - barrier(); - __clear_bit(nr, addr); -} - -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( - "btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * test_and_set_bit_lock - Set a bit and return its old value for lock - * @nr: Bit to set - * @addr: Address to count from - * - * This is the same as test_and_set_bit on x86. - */ -static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) -{ - return test_and_set_bit(nr, addr); -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__( - "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr)); - return oldbit; -} - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__( - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr)); - return oldbit; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ __volatile__( - "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile unsigned long* addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -#if 0 /* Fool kernel-doc since it doesn't do macros yet */ -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static int test_bit(int nr, const volatile void * addr); -#endif - -static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; -} - -static inline int variable_test_bit(int nr, const volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( - "btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"Ir" (nr)); - return oldbit; -} - -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) - -#undef ADDR - /** * find_first_zero_bit - find the first zero bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * - * Returns the bit-number of the first zero bit, not the number of the byte + * Returns the bit number of the first zero bit, not the number of the byte * containing a bit. */ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) @@ -348,7 +40,7 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) /** * find_next_zero_bit - find the first zero bit in a memory region * @addr: The address to base the search on - * @offset: The bitnumber to start searching at + * @offset: The bit number to start searching at * @size: The maximum size to search */ int find_next_zero_bit(const unsigned long *addr, int size, int offset); @@ -372,7 +64,7 @@ static inline unsigned long __ffs(unsigned long word) * @addr: The address to start the search at * @size: The maximum size to search * - * Returns the bit-number of the first set bit, not the number of the byte + * Returns the bit number of the first set bit, not the number of the byte * containing a bit. */ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) @@ -391,7 +83,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) /** * find_next_bit - find the first set bit in a memory region * @addr: The address to base the search on - * @offset: The bitnumber to start searching at + * @offset: The bit number to start searching at * @size: The maximum size to search */ int find_next_bit(const unsigned long *addr, int size, int offset); @@ -460,10 +152,10 @@ static inline int fls(int x) #include <asm-generic/bitops/ext2-non-atomic.h> -#define ext2_set_bit_atomic(lock,nr,addr) \ - test_and_set_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit_atomic(lock,nr, addr) \ - test_and_clear_bit((nr),(unsigned long*)addr) +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit((nr), (unsigned long *)addr) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit((nr), (unsigned long *)addr) #include <asm-generic/bitops/minix.h> diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index 766bcc0470a6..48adbf56ca60 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -5,303 +5,6 @@ * Copyright 1992, Linus Torvalds. */ -#ifndef _LINUX_BITOPS_H -#error only <linux/bitops.h> can be included directly -#endif - -#include <asm/alternative.h> - -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) -/* Technically wrong, but this avoids compilation errors on some gcc - versions. */ -#define ADDR "=m" (*(volatile long *) addr) -#else -#define ADDR "+m" (*(volatile long *) addr) -#endif - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile void *addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btsl %1,%0" - :ADDR - :"dIr" (nr) : "memory"); -} - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile void *addr) -{ - __asm__ volatile( - "btsl %1,%0" - :ADDR - :"dIr" (nr) : "memory"); -} - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile void *addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btrl %1,%0" - :ADDR - :"dIr" (nr)); -} - -/* - * clear_bit_unlock - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and implies release semantics before the memory - * operation. It can be used for an unlock. - */ -static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) -{ - barrier(); - clear_bit(nr, addr); -} - -static inline void __clear_bit(int nr, volatile void *addr) -{ - __asm__ __volatile__( - "btrl %1,%0" - :ADDR - :"dIr" (nr)); -} - -/* - * __clear_bit_unlock - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * __clear_bit() is non-atomic and implies release semantics before the memory - * operation. It can be used for an unlock if no other CPUs can concurrently - * modify other bits in the word. - * - * No memory barrier is required here, because x86 cannot reorder stores past - * older loads. Same principle as spin_unlock. - */ -static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) -{ - barrier(); - __clear_bit(nr, addr); -} - -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile void *addr) -{ - __asm__ __volatile__( - "btcl %1,%0" - :ADDR - :"dIr" (nr)); -} - -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile void *addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btcl %1,%0" - :ADDR - :"dIr" (nr)); -} - -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile void *addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),ADDR - :"dIr" (nr) : "memory"); - return oldbit; -} - -/** - * test_and_set_bit_lock - Set a bit and return its old value for lock - * @nr: Bit to set - * @addr: Address to count from - * - * This is the same as test_and_set_bit on x86. - */ -static inline int test_and_set_bit_lock(int nr, volatile void *addr) -{ - return test_and_set_bit(nr, addr); -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile void *addr) -{ - int oldbit; - - __asm__( - "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),ADDR - :"dIr" (nr)); - return oldbit; -} - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile void *addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),ADDR - :"dIr" (nr) : "memory"); - return oldbit; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile void *addr) -{ - int oldbit; - - __asm__( - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),ADDR - :"dIr" (nr)); - return oldbit; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, volatile void *addr) -{ - int oldbit; - - __asm__ __volatile__( - "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),ADDR - :"dIr" (nr) : "memory"); - return oldbit; -} - -/** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile void *addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),ADDR - :"dIr" (nr) : "memory"); - return oldbit; -} - -#if 0 /* Fool kernel-doc since it doesn't do macros yet */ -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static int test_bit(int nr, const volatile void *addr); -#endif - -static inline int constant_test_bit(int nr, const volatile void *addr) -{ - return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; -} - -static inline int variable_test_bit(int nr, volatile const void *addr) -{ - int oldbit; - - __asm__ __volatile__( - "btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (*(volatile long *)addr),"dIr" (nr)); - return oldbit; -} - -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) - -#undef ADDR - extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); extern long find_next_zero_bit(const unsigned long *addr, long size, long offset); extern long find_first_bit(const unsigned long *addr, unsigned long size); diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h index 19f3ddf2df4b..51151356840f 100644 --- a/include/asm-x86/bootparam.h +++ b/include/asm-x86/bootparam.h @@ -54,13 +54,14 @@ struct sys_desc_table { }; struct efi_info { - __u32 _pad1; + __u32 efi_loader_signature; __u32 efi_systab; __u32 efi_memdesc_size; __u32 efi_memdesc_version; __u32 efi_memmap; __u32 efi_memmap_size; - __u32 _pad2[2]; + __u32 efi_systab_hi; + __u32 efi_memmap_hi; }; /* The so-called "zeropage" */ diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h index fd8bdc639c48..8d477a201392 100644 --- a/include/asm-x86/bug.h +++ b/include/asm-x86/bug.h @@ -33,9 +33,6 @@ } while(0) #endif -void out_of_line_bug(void); -#else /* CONFIG_BUG */ -static inline void out_of_line_bug(void) { } #endif /* !CONFIG_BUG */ #include <asm-generic/bug.h> diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h index aac8317420af..3fcc30dc0731 100644 --- a/include/asm-x86/bugs.h +++ b/include/asm-x86/bugs.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_BUGS_H #define _ASM_X86_BUGS_H -void check_bugs(void); +extern void check_bugs(void); +extern int ppro_with_ram_bug(void); #endif /* _ASM_X86_BUGS_H */ diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 9411a2d3f19c..8dd8c5e3cc7f 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -24,18 +24,35 @@ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) -void global_flush_tlb(void); -int change_page_attr(struct page *page, int numpages, pgprot_t prot); -int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); -void clflush_cache_range(void *addr, int size); - -#ifdef CONFIG_DEBUG_PAGEALLOC -/* internal debugging function */ -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif +int __deprecated_for_modules change_page_attr(struct page *page, int numpages, + pgprot_t prot); + +int set_pages_uc(struct page *page, int numpages); +int set_pages_wb(struct page *page, int numpages); +int set_pages_x(struct page *page, int numpages); +int set_pages_nx(struct page *page, int numpages); +int set_pages_ro(struct page *page, int numpages); +int set_pages_rw(struct page *page, int numpages); + +int set_memory_uc(unsigned long addr, int numpages); +int set_memory_wb(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_np(unsigned long addr, int numpages); + +void clflush_cache_range(void *addr, unsigned int size); #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif +#ifdef CONFIG_DEBUG_RODATA_TEST +void rodata_test(void); +#else +static inline void rodata_test(void) +{ +} +#endif #endif diff --git a/include/asm-x86/calling.h b/include/asm-x86/calling.h index 6f4f63af96e1..f13e62e2cb3e 100644 --- a/include/asm-x86/calling.h +++ b/include/asm-x86/calling.h @@ -1,162 +1,168 @@ -/* +/* * Some macros to handle stack frames in assembly. - */ + */ +#define R15 0 +#define R14 8 +#define R13 16 +#define R12 24 +#define RBP 32 +#define RBX 40 -#define R15 0 -#define R14 8 -#define R13 16 -#define R12 24 -#define RBP 32 -#define RBX 40 /* arguments: interrupts/non tracing syscalls only save upto here*/ -#define R11 48 -#define R10 56 -#define R9 64 -#define R8 72 -#define RAX 80 -#define RCX 88 -#define RDX 96 -#define RSI 104 -#define RDI 112 -#define ORIG_RAX 120 /* + error_code */ -/* end of arguments */ +#define R11 48 +#define R10 56 +#define R9 64 +#define R8 72 +#define RAX 80 +#define RCX 88 +#define RDX 96 +#define RSI 104 +#define RDI 112 +#define ORIG_RAX 120 /* + error_code */ +/* end of arguments */ + /* cpu exception frame or undefined in case of fast syscall. */ -#define RIP 128 -#define CS 136 -#define EFLAGS 144 -#define RSP 152 -#define SS 160 -#define ARGOFFSET R11 -#define SWFRAME ORIG_RAX +#define RIP 128 +#define CS 136 +#define EFLAGS 144 +#define RSP 152 +#define SS 160 + +#define ARGOFFSET R11 +#define SWFRAME ORIG_RAX - .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0 - subq $9*8+\addskip,%rsp + .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 + subq $9*8+\addskip, %rsp CFI_ADJUST_CFA_OFFSET 9*8+\addskip - movq %rdi,8*8(%rsp) - CFI_REL_OFFSET rdi,8*8 - movq %rsi,7*8(%rsp) - CFI_REL_OFFSET rsi,7*8 - movq %rdx,6*8(%rsp) - CFI_REL_OFFSET rdx,6*8 + movq %rdi, 8*8(%rsp) + CFI_REL_OFFSET rdi, 8*8 + movq %rsi, 7*8(%rsp) + CFI_REL_OFFSET rsi, 7*8 + movq %rdx, 6*8(%rsp) + CFI_REL_OFFSET rdx, 6*8 .if \norcx .else - movq %rcx,5*8(%rsp) - CFI_REL_OFFSET rcx,5*8 + movq %rcx, 5*8(%rsp) + CFI_REL_OFFSET rcx, 5*8 .endif - movq %rax,4*8(%rsp) - CFI_REL_OFFSET rax,4*8 + movq %rax, 4*8(%rsp) + CFI_REL_OFFSET rax, 4*8 .if \nor891011 .else - movq %r8,3*8(%rsp) - CFI_REL_OFFSET r8,3*8 - movq %r9,2*8(%rsp) - CFI_REL_OFFSET r9,2*8 - movq %r10,1*8(%rsp) - CFI_REL_OFFSET r10,1*8 - movq %r11,(%rsp) - CFI_REL_OFFSET r11,0*8 + movq %r8, 3*8(%rsp) + CFI_REL_OFFSET r8, 3*8 + movq %r9, 2*8(%rsp) + CFI_REL_OFFSET r9, 2*8 + movq %r10, 1*8(%rsp) + CFI_REL_OFFSET r10, 1*8 + movq %r11, (%rsp) + CFI_REL_OFFSET r11, 0*8 .endif .endm -#define ARG_SKIP 9*8 - .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0 +#define ARG_SKIP 9*8 + + .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ + skipr8910=0, skiprdx=0 .if \skipr11 .else - movq (%rsp),%r11 + movq (%rsp), %r11 CFI_RESTORE r11 .endif .if \skipr8910 .else - movq 1*8(%rsp),%r10 + movq 1*8(%rsp), %r10 CFI_RESTORE r10 - movq 2*8(%rsp),%r9 + movq 2*8(%rsp), %r9 CFI_RESTORE r9 - movq 3*8(%rsp),%r8 + movq 3*8(%rsp), %r8 CFI_RESTORE r8 .endif .if \skiprax .else - movq 4*8(%rsp),%rax + movq 4*8(%rsp), %rax CFI_RESTORE rax .endif .if \skiprcx .else - movq 5*8(%rsp),%rcx + movq 5*8(%rsp), %rcx CFI_RESTORE rcx .endif .if \skiprdx .else - movq 6*8(%rsp),%rdx + movq 6*8(%rsp), %rdx CFI_RESTORE rdx .endif - movq 7*8(%rsp),%rsi + movq 7*8(%rsp), %rsi CFI_RESTORE rsi - movq 8*8(%rsp),%rdi + movq 8*8(%rsp), %rdi CFI_RESTORE rdi .if ARG_SKIP+\addskip > 0 - addq $ARG_SKIP+\addskip,%rsp + addq $ARG_SKIP+\addskip, %rsp CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) .endif - .endm + .endm .macro LOAD_ARGS offset - movq \offset(%rsp),%r11 - movq \offset+8(%rsp),%r10 - movq \offset+16(%rsp),%r9 - movq \offset+24(%rsp),%r8 - movq \offset+40(%rsp),%rcx - movq \offset+48(%rsp),%rdx - movq \offset+56(%rsp),%rsi - movq \offset+64(%rsp),%rdi - movq \offset+72(%rsp),%rax + movq \offset(%rsp), %r11 + movq \offset+8(%rsp), %r10 + movq \offset+16(%rsp), %r9 + movq \offset+24(%rsp), %r8 + movq \offset+40(%rsp), %rcx + movq \offset+48(%rsp), %rdx + movq \offset+56(%rsp), %rsi + movq \offset+64(%rsp), %rdi + movq \offset+72(%rsp), %rax .endm - -#define REST_SKIP 6*8 + +#define REST_SKIP 6*8 + .macro SAVE_REST - subq $REST_SKIP,%rsp + subq $REST_SKIP, %rsp CFI_ADJUST_CFA_OFFSET REST_SKIP - movq %rbx,5*8(%rsp) - CFI_REL_OFFSET rbx,5*8 - movq %rbp,4*8(%rsp) - CFI_REL_OFFSET rbp,4*8 - movq %r12,3*8(%rsp) - CFI_REL_OFFSET r12,3*8 - movq %r13,2*8(%rsp) - CFI_REL_OFFSET r13,2*8 - movq %r14,1*8(%rsp) - CFI_REL_OFFSET r14,1*8 - movq %r15,(%rsp) - CFI_REL_OFFSET r15,0*8 - .endm + movq %rbx, 5*8(%rsp) + CFI_REL_OFFSET rbx, 5*8 + movq %rbp, 4*8(%rsp) + CFI_REL_OFFSET rbp, 4*8 + movq %r12, 3*8(%rsp) + CFI_REL_OFFSET r12, 3*8 + movq %r13, 2*8(%rsp) + CFI_REL_OFFSET r13, 2*8 + movq %r14, 1*8(%rsp) + CFI_REL_OFFSET r14, 1*8 + movq %r15, (%rsp) + CFI_REL_OFFSET r15, 0*8 + .endm .macro RESTORE_REST - movq (%rsp),%r15 + movq (%rsp), %r15 CFI_RESTORE r15 - movq 1*8(%rsp),%r14 + movq 1*8(%rsp), %r14 CFI_RESTORE r14 - movq 2*8(%rsp),%r13 + movq 2*8(%rsp), %r13 CFI_RESTORE r13 - movq 3*8(%rsp),%r12 + movq 3*8(%rsp), %r12 CFI_RESTORE r12 - movq 4*8(%rsp),%rbp + movq 4*8(%rsp), %rbp CFI_RESTORE rbp - movq 5*8(%rsp),%rbx + movq 5*8(%rsp), %rbx CFI_RESTORE rbx - addq $REST_SKIP,%rsp + addq $REST_SKIP, %rsp CFI_ADJUST_CFA_OFFSET -(REST_SKIP) .endm - + .macro SAVE_ALL SAVE_ARGS SAVE_REST .endm - + .macro RESTORE_ALL addskip=0 RESTORE_REST - RESTORE_ARGS 0,\addskip + RESTORE_ARGS 0, \addskip .endm .macro icebp .byte 0xf1 .endm + diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h index 419fe88a0342..e5f79997decc 100644 --- a/include/asm-x86/checksum_64.h +++ b/include/asm-x86/checksum_64.h @@ -4,7 +4,7 @@ /* * Checksums for x86-64 * Copyright 2002 by Andi Kleen, SuSE Labs - * with some code from asm-i386/checksum.h + * with some code from asm-x86/checksum.h */ #include <linux/compiler.h> diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index f86ede28f6dc..cea1dae288a7 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h @@ -105,15 +105,24 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) -#define sync_cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) -#define cmpxchg_local(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#define sync_cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#endif + +#ifdef CONFIG_X86_CMPXCHG64 +#define cmpxchg64(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ + (unsigned long long)(n))) +#define cmpxchg64_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ + (unsigned long long)(n))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, @@ -203,6 +212,34 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, return old; } +static inline unsigned long long __cmpxchg64(volatile void *ptr, + unsigned long long old, unsigned long long new) +{ + unsigned long long prev; + __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" + : "=A"(prev) + : "b"((unsigned long)new), + "c"((unsigned long)(new >> 32)), + "m"(*__xg(ptr)), + "0"(old) + : "memory"); + return prev; +} + +static inline unsigned long long __cmpxchg64_local(volatile void *ptr, + unsigned long long old, unsigned long long new) +{ + unsigned long long prev; + __asm__ __volatile__("cmpxchg8b %3" + : "=A"(prev) + : "b"((unsigned long)new), + "c"((unsigned long)(new >> 32)), + "m"(*__xg(ptr)), + "0"(old) + : "memory"); + return prev; +} + #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to @@ -228,7 +265,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, return old; } -#define cmpxchg(ptr,o,n) \ +#define cmpxchg(ptr, o, n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ @@ -239,7 +276,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, (unsigned long)(n), sizeof(*(ptr))); \ __ret; \ }) -#define cmpxchg_local(ptr,o,n) \ +#define cmpxchg_local(ptr, o, n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ @@ -252,38 +289,37 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, }) #endif -static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, - unsigned long long new) -{ - unsigned long long prev; - __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); - return prev; -} +#ifndef CONFIG_X86_CMPXCHG64 +/* + * Building a kernel capable running on 80386 and 80486. It may be necessary + * to simulate the cmpxchg8b on the 80386 and 80486 CPU. + */ -static inline unsigned long long __cmpxchg64_local(volatile void *ptr, - unsigned long long old, unsigned long long new) -{ - unsigned long long prev; - __asm__ __volatile__("cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); - return prev; -} +extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); + +#define cmpxchg64(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + if (likely(boot_cpu_data.x86 > 4)) \ + __ret = __cmpxchg64((ptr), (unsigned long long)(o), \ + (unsigned long long)(n)); \ + else \ + __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ + (unsigned long long)(n)); \ + __ret; \ +}) +#define cmpxchg64_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + if (likely(boot_cpu_data.x86 > 4)) \ + __ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \ + (unsigned long long)(n)); \ + else \ + __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ + (unsigned long long)(n)); \ + __ret; \ +}) + +#endif -#define cmpxchg64(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ - (unsigned long long)(n))) -#define cmpxchg64_local(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ - (unsigned long long)(n))) #endif diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h index 66ba7987184a..b270ee04959e 100644 --- a/include/asm-x86/compat.h +++ b/include/asm-x86/compat.h @@ -207,7 +207,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) static __inline__ void __user *compat_alloc_user_space(long len) { struct pt_regs *regs = task_pt_regs(current); - return (void __user *)regs->rsp - len; + return (void __user *)regs->sp - len; } static inline int is_compat_task(void) diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h index b1bc7b1b64b0..85ece5f10e9e 100644 --- a/include/asm-x86/cpu.h +++ b/include/asm-x86/cpu.h @@ -7,7 +7,7 @@ #include <linux/nodemask.h> #include <linux/percpu.h> -struct i386_cpu { +struct x86_cpu { struct cpu cpu; }; extern int arch_register_cpu(int num); diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index b7160a4598d7..3fb7dfa7fc91 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -1,5 +1,207 @@ -#ifdef CONFIG_X86_32 -# include "cpufeature_32.h" +/* + * Defines x86 CPU feature bits + */ +#ifndef _ASM_X86_CPUFEATURE_H +#define _ASM_X86_CPUFEATURE_H + +#ifndef __ASSEMBLY__ +#include <linux/bitops.h> +#endif +#include <asm/required-features.h> + +#define NCAPINTS 8 /* N 32-bit words worth of info */ + +/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ +#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ +#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ +#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ +#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ +#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ +#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ +#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ +#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ +#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ +#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ +#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ +#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ +#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ +#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ +#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ +#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ +#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ +#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ +#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ +#define X86_FEATURE_DS (0*32+21) /* Debug Store */ +#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ +#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ + /* of FPU context), and CR4.OSFXSR available */ +#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ +#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ +#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ +#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ +#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ +#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ + +/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ +/* Don't duplicate feature flags which are redundant with Intel! */ +#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ +#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ +#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ +#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ +#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ +#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ + +/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ +#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ +#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ +#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ + +/* Other features, Linux-defined mapping, word 3 */ +/* This range is used for feature bits which conflict or are synthesized */ +#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ +#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ +#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ +/* cpu types for specific tunings: */ +#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ +#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ +#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ +#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ +#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ +#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ +/* 14 free */ +/* 15 free */ +#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ +#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ + +/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ +#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ +#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ +#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ +#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ +#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ +#define X86_FEATURE_CID (4*32+10) /* Context ID */ +#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ +#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ +#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ + +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ +#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ +#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ +#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ +#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ +#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ +#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ +#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ +#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ +#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ + +/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ +#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ + +/* + * Auxiliary flags: Linux defined - For features scattered in various + * CPUID levels like 0x6, 0xA etc + */ +#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ + +#define cpu_has(c, bit) \ + (__builtin_constant_p(bit) && \ + ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ + (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ + (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ + (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ + (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ + (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ + (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ + (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ + ? 1 : \ + test_bit(bit, (unsigned long *)((c)->x86_capability))) +#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) + +#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) +#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) +#define setup_clear_cpu_cap(bit) do { \ + clear_cpu_cap(&boot_cpu_data, bit); \ + set_bit(bit, cleared_cpu_caps); \ +} while (0) +#define setup_force_cpu_cap(bit) do { \ + set_cpu_cap(&boot_cpu_data, bit); \ + clear_bit(bit, cleared_cpu_caps); \ +} while (0) + +#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) +#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) +#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) +#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) +#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) +#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) +#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) +#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) +#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) +#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) +#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) +#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) +#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) +#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) +#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) +#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) +#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) +#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) +#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) +#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) +#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) +#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) +#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) +#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) +#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) +#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) +#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) +#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) +#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) +#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) +#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) +#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) +#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) +#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) +#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) + +#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) +# define cpu_has_invlpg 1 #else -# include "cpufeature_64.h" +# define cpu_has_invlpg (boot_cpu_data.x86 > 3) #endif + +#ifdef CONFIG_X86_64 + +#undef cpu_has_vme +#define cpu_has_vme 0 + +#undef cpu_has_pae +#define cpu_has_pae ___BUG___ + +#undef cpu_has_mp +#define cpu_has_mp 1 + +#undef cpu_has_k6_mtrr +#define cpu_has_k6_mtrr 0 + +#undef cpu_has_cyrix_arr +#define cpu_has_cyrix_arr 0 + +#undef cpu_has_centaur_mcr +#define cpu_has_centaur_mcr 0 + +#endif /* CONFIG_X86_64 */ + +#endif /* _ASM_X86_CPUFEATURE_H */ diff --git a/include/asm-x86/cpufeature_32.h b/include/asm-x86/cpufeature_32.h deleted file mode 100644 index f17e688dfb05..000000000000 --- a/include/asm-x86/cpufeature_32.h +++ /dev/null @@ -1,176 +0,0 @@ -/* - * cpufeature.h - * - * Defines x86 CPU feature bits - */ - -#ifndef __ASM_I386_CPUFEATURE_H -#define __ASM_I386_CPUFEATURE_H - -#ifndef __ASSEMBLY__ -#include <linux/bitops.h> -#endif -#include <asm/required-features.h> - -#define NCAPINTS 8 /* N 32-bit words worth of info */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ -#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ -#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ -#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ -#define X86_FEATURE_DS (0*32+21) /* Debug Store */ -#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ - /* of FPU context), and CR4.OSFXSR available */ -#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ -#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ -#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ -#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ -#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ - -/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ -/* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ - -/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ - -/* Other features, Linux-defined mapping, word 3 */ -/* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ -#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ -#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ -#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ -#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ -#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ -/* 14 free */ -#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */ -#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ -#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ -#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ -#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_CID (4*32+10) /* Context ID */ -#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ - -/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ -#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ -#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ -#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ -#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ -#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ - -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ - -/* - * Auxiliary flags: Linux defined - For features scattered in various - * CPUID levels like 0x6, 0xA etc - */ -#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ - -#define cpu_has(c, bit) \ - (__builtin_constant_p(bit) && \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ - ? 1 : \ - test_bit(bit, (c)->x86_capability)) -#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) - -#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) -#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) -#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) -#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) -#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) -#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) -#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) -#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) -#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) -#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) -#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) -#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) -#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) -#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) -#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) -#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) -#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) -#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) -#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) -#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) -#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) -#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) -#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) -#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) -#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) -#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) -#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) -#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) -#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) -#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) -#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) -#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) -#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) -#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) -#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) - -#endif /* __ASM_I386_CPUFEATURE_H */ - -/* - * Local Variables: - * mode:c - * comment-column:42 - * End: - */ diff --git a/include/asm-x86/cpufeature_64.h b/include/asm-x86/cpufeature_64.h deleted file mode 100644 index e18496b7b850..000000000000 --- a/include/asm-x86/cpufeature_64.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * cpufeature_32.h - * - * Defines x86 CPU feature bits - */ - -#ifndef __ASM_X8664_CPUFEATURE_H -#define __ASM_X8664_CPUFEATURE_H - -#include "cpufeature_32.h" - -#undef cpu_has_vme -#define cpu_has_vme 0 - -#undef cpu_has_pae -#define cpu_has_pae ___BUG___ - -#undef cpu_has_mp -#define cpu_has_mp 1 /* XXX */ - -#undef cpu_has_k6_mtrr -#define cpu_has_k6_mtrr 0 - -#undef cpu_has_cyrix_arr -#define cpu_has_cyrix_arr 0 - -#undef cpu_has_centaur_mcr -#define cpu_has_centaur_mcr 0 - -#endif /* __ASM_X8664_CPUFEATURE_H */ diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 6065c5092265..5b6a05d3a771 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h @@ -1,5 +1,381 @@ +#ifndef _ASM_DESC_H_ +#define _ASM_DESC_H_ + +#ifndef __ASSEMBLY__ +#include <asm/desc_defs.h> +#include <asm/ldt.h> +#include <asm/mmu.h> +#include <linux/smp.h> + +static inline void fill_ldt(struct desc_struct *desc, + const struct user_desc *info) +{ + desc->limit0 = info->limit & 0x0ffff; + desc->base0 = info->base_addr & 0x0000ffff; + + desc->base1 = (info->base_addr & 0x00ff0000) >> 16; + desc->type = (info->read_exec_only ^ 1) << 1; + desc->type |= info->contents << 2; + desc->s = 1; + desc->dpl = 0x3; + desc->p = info->seg_not_present ^ 1; + desc->limit = (info->limit & 0xf0000) >> 16; + desc->avl = info->useable; + desc->d = info->seg_32bit; + desc->g = info->limit_in_pages; + desc->base2 = (info->base_addr & 0xff000000) >> 24; +} + +extern struct desc_ptr idt_descr; +extern gate_desc idt_table[]; + +#ifdef CONFIG_X86_64 +extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; +extern struct desc_ptr cpu_gdt_descr[]; +/* the cpu gdt accessor */ +#define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address) + +static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, + unsigned dpl, unsigned ist, unsigned seg) +{ + gate->offset_low = PTR_LOW(func); + gate->segment = __KERNEL_CS; + gate->ist = ist; + gate->p = 1; + gate->dpl = dpl; + gate->zero0 = 0; + gate->zero1 = 0; + gate->type = type; + gate->offset_middle = PTR_MIDDLE(func); + gate->offset_high = PTR_HIGH(func); +} + +#else +struct gdt_page { + struct desc_struct gdt[GDT_ENTRIES]; +} __attribute__((aligned(PAGE_SIZE))); +DECLARE_PER_CPU(struct gdt_page, gdt_page); + +static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) +{ + return per_cpu(gdt_page, cpu).gdt; +} + +static inline void pack_gate(gate_desc *gate, unsigned char type, + unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) + +{ + gate->a = (seg << 16) | (base & 0xffff); + gate->b = (base & 0xffff0000) | + (((0x80 | type | (dpl << 5)) & 0xff) << 8); +} + +#endif + +static inline int desc_empty(const void *ptr) +{ + const u32 *desc = ptr; + return !(desc[0] | desc[1]); +} + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define load_TR_desc() native_load_tr_desc() +#define load_gdt(dtr) native_load_gdt(dtr) +#define load_idt(dtr) native_load_idt(dtr) +#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) +#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) + +#define store_gdt(dtr) native_store_gdt(dtr) +#define store_idt(dtr) native_store_idt(dtr) +#define store_tr(tr) (tr = native_store_tr()) +#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) + +#define load_TLS(t, cpu) native_load_tls(t, cpu) +#define set_ldt native_set_ldt + +#define write_ldt_entry(dt, entry, desc) \ + native_write_ldt_entry(dt, entry, desc) +#define write_gdt_entry(dt, entry, desc, type) \ + native_write_gdt_entry(dt, entry, desc, type) +#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) +#endif + +static inline void native_write_idt_entry(gate_desc *idt, int entry, + const gate_desc *gate) +{ + memcpy(&idt[entry], gate, sizeof(*gate)); +} + +static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, + const void *desc) +{ + memcpy(&ldt[entry], desc, 8); +} + +static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, + const void *desc, int type) +{ + unsigned int size; + switch (type) { + case DESC_TSS: + size = sizeof(tss_desc); + break; + case DESC_LDT: + size = sizeof(ldt_desc); + break; + default: + size = sizeof(struct desc_struct); + break; + } + memcpy(&gdt[entry], desc, size); +} + +static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, + unsigned long limit, unsigned char type, + unsigned char flags) +{ + desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); + desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | + (limit & 0x000f0000) | ((type & 0xff) << 8) | + ((flags & 0xf) << 20); + desc->p = 1; +} + + +static inline void set_tssldt_descriptor(void *d, unsigned long addr, + unsigned type, unsigned size) +{ +#ifdef CONFIG_X86_64 + struct ldttss_desc64 *desc = d; + memset(desc, 0, sizeof(*desc)); + desc->limit0 = size & 0xFFFF; + desc->base0 = PTR_LOW(addr); + desc->base1 = PTR_MIDDLE(addr) & 0xFF; + desc->type = type; + desc->p = 1; + desc->limit1 = (size >> 16) & 0xF; + desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; + desc->base3 = PTR_HIGH(addr); +#else + + pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); +#endif +} + +static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) +{ + struct desc_struct *d = get_cpu_gdt_table(cpu); + tss_desc tss; + + /* + * sizeof(unsigned long) coming from an extra "long" at the end + * of the iobitmap. See tss_struct definition in processor.h + * + * -1? seg base+limit should be pointing to the address of the + * last valid byte + */ + set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, + IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); + write_gdt_entry(d, entry, &tss, DESC_TSS); +} + +#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) + +static inline void native_set_ldt(const void *addr, unsigned int entries) +{ + if (likely(entries == 0)) + __asm__ __volatile__("lldt %w0"::"q" (0)); + else { + unsigned cpu = smp_processor_id(); + ldt_desc ldt; + + set_tssldt_descriptor(&ldt, (unsigned long)addr, + DESC_LDT, entries * sizeof(ldt) - 1); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, + &ldt, DESC_LDT); + __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); + } +} + +static inline void native_load_tr_desc(void) +{ + asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); +} + +static inline void native_load_gdt(const struct desc_ptr *dtr) +{ + asm volatile("lgdt %0"::"m" (*dtr)); +} + +static inline void native_load_idt(const struct desc_ptr *dtr) +{ + asm volatile("lidt %0"::"m" (*dtr)); +} + +static inline void native_store_gdt(struct desc_ptr *dtr) +{ + asm volatile("sgdt %0":"=m" (*dtr)); +} + +static inline void native_store_idt(struct desc_ptr *dtr) +{ + asm volatile("sidt %0":"=m" (*dtr)); +} + +static inline unsigned long native_store_tr(void) +{ + unsigned long tr; + asm volatile("str %0":"=r" (tr)); + return tr; +} + +static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) +{ + unsigned int i; + struct desc_struct *gdt = get_cpu_gdt_table(cpu); + + for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; +} + +#define _LDT_empty(info) (\ + (info)->base_addr == 0 && \ + (info)->limit == 0 && \ + (info)->contents == 0 && \ + (info)->read_exec_only == 1 && \ + (info)->seg_32bit == 0 && \ + (info)->limit_in_pages == 0 && \ + (info)->seg_not_present == 1 && \ + (info)->useable == 0) + +#ifdef CONFIG_X86_64 +#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) +#else +#define LDT_empty(info) (_LDT_empty(info)) +#endif + +static inline void clear_LDT(void) +{ + set_ldt(NULL, 0); +} + +/* + * load one particular LDT into the current CPU + */ +static inline void load_LDT_nolock(mm_context_t *pc) +{ + set_ldt(pc->ldt, pc->size); +} + +static inline void load_LDT(mm_context_t *pc) +{ + preempt_disable(); + load_LDT_nolock(pc); + preempt_enable(); +} + +static inline unsigned long get_desc_base(const struct desc_struct *desc) +{ + return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); +} + +static inline unsigned long get_desc_limit(const struct desc_struct *desc) +{ + return desc->limit0 | (desc->limit << 16); +} + +static inline void _set_gate(int gate, unsigned type, void *addr, + unsigned dpl, unsigned ist, unsigned seg) +{ + gate_desc s; + pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); + /* + * does not need to be atomic because it is only done once at + * setup time + */ + write_idt_entry(idt_table, gate, &s); +} + +/* + * This needs to use 'idt_table' rather than 'idt', and + * thus use the _nonmapped_ version of the IDT, as the + * Pentium F0 0F bugfix can have resulted in the mapped + * IDT being write-protected. + */ +static inline void set_intr_gate(unsigned int n, void *addr) +{ + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); +} + +/* + * This routine sets up an interrupt gate at directory privilege level 3. + */ +static inline void set_system_intr_gate(unsigned int n, void *addr) +{ + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); +} + +static inline void set_trap_gate(unsigned int n, void *addr) +{ + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); +} + +static inline void set_system_gate(unsigned int n, void *addr) +{ + BUG_ON((unsigned)n > 0xFF); #ifdef CONFIG_X86_32 -# include "desc_32.h" + _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); +#else + _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); +#endif +} + +static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) +{ + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); +} + +static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) +{ + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); +} + +static inline void set_system_gate_ist(int n, void *addr, unsigned ist) +{ + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); +} + #else -# include "desc_64.h" +/* + * GET_DESC_BASE reads the descriptor base of the specified segment. + * + * Args: + * idx - descriptor index + * gdt - GDT pointer + * base - 32bit register to which the base will be written + * lo_w - lo word of the "base" register + * lo_b - lo byte of the "base" register + * hi_b - hi byte of the low word of the "base" register + * + * Example: + * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) + * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. + */ +#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ + movb idx*8+4(gdt), lo_b; \ + movb idx*8+7(gdt), hi_b; \ + shll $16, base; \ + movw idx*8+2(gdt), lo_w; + + +#endif /* __ASSEMBLY__ */ + #endif diff --git a/include/asm-x86/desc_32.h b/include/asm-x86/desc_32.h deleted file mode 100644 index c547403f341d..000000000000 --- a/include/asm-x86/desc_32.h +++ /dev/null @@ -1,244 +0,0 @@ -#ifndef __ARCH_DESC_H -#define __ARCH_DESC_H - -#include <asm/ldt.h> -#include <asm/segment.h> - -#ifndef __ASSEMBLY__ - -#include <linux/preempt.h> -#include <linux/smp.h> -#include <linux/percpu.h> - -#include <asm/mmu.h> - -struct Xgt_desc_struct { - unsigned short size; - unsigned long address __attribute__((packed)); - unsigned short pad; -} __attribute__ ((packed)); - -struct gdt_page -{ - struct desc_struct gdt[GDT_ENTRIES]; -} __attribute__((aligned(PAGE_SIZE))); -DECLARE_PER_CPU(struct gdt_page, gdt_page); - -static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) -{ - return per_cpu(gdt_page, cpu).gdt; -} - -extern struct Xgt_desc_struct idt_descr; -extern struct desc_struct idt_table[]; -extern void set_intr_gate(unsigned int irq, void * addr); - -static inline void pack_descriptor(__u32 *a, __u32 *b, - unsigned long base, unsigned long limit, unsigned char type, unsigned char flags) -{ - *a = ((base & 0xffff) << 16) | (limit & 0xffff); - *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | - (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20); -} - -static inline void pack_gate(__u32 *a, __u32 *b, - unsigned long base, unsigned short seg, unsigned char type, unsigned char flags) -{ - *a = (seg << 16) | (base & 0xffff); - *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff); -} - -#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */ -#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */ -#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */ -#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */ -#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */ -#define DESCTYPE_DPL3 0x60 /* DPL-3 */ -#define DESCTYPE_S 0x10 /* !system */ - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define load_TR_desc() native_load_tr_desc() -#define load_gdt(dtr) native_load_gdt(dtr) -#define load_idt(dtr) native_load_idt(dtr) -#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) -#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) - -#define store_gdt(dtr) native_store_gdt(dtr) -#define store_idt(dtr) native_store_idt(dtr) -#define store_tr(tr) (tr = native_store_tr()) -#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) - -#define load_TLS(t, cpu) native_load_tls(t, cpu) -#define set_ldt native_set_ldt - -#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) -#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) -#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) -#endif - -static inline void write_dt_entry(struct desc_struct *dt, - int entry, u32 entry_low, u32 entry_high) -{ - dt[entry].a = entry_low; - dt[entry].b = entry_high; -} - -static inline void native_set_ldt(const void *addr, unsigned int entries) -{ - if (likely(entries == 0)) - __asm__ __volatile__("lldt %w0"::"q" (0)); - else { - unsigned cpu = smp_processor_id(); - __u32 a, b; - - pack_descriptor(&a, &b, (unsigned long)addr, - entries * sizeof(struct desc_struct) - 1, - DESCTYPE_LDT, 0); - write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); - __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); - } -} - - -static inline void native_load_tr_desc(void) -{ - asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); -} - -static inline void native_load_gdt(const struct Xgt_desc_struct *dtr) -{ - asm volatile("lgdt %0"::"m" (*dtr)); -} - -static inline void native_load_idt(const struct Xgt_desc_struct *dtr) -{ - asm volatile("lidt %0"::"m" (*dtr)); -} - -static inline void native_store_gdt(struct Xgt_desc_struct *dtr) -{ - asm ("sgdt %0":"=m" (*dtr)); -} - -static inline void native_store_idt(struct Xgt_desc_struct *dtr) -{ - asm ("sidt %0":"=m" (*dtr)); -} - -static inline unsigned long native_store_tr(void) -{ - unsigned long tr; - asm ("str %0":"=r" (tr)); - return tr; -} - -static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) -{ - unsigned int i; - struct desc_struct *gdt = get_cpu_gdt_table(cpu); - - for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) - gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; -} - -static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) -{ - __u32 a, b; - pack_gate(&a, &b, (unsigned long)addr, seg, type, 0); - write_idt_entry(idt_table, gate, a, b); -} - -static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr) -{ - __u32 a, b; - pack_descriptor(&a, &b, (unsigned long)addr, - offsetof(struct tss_struct, __cacheline_filler) - 1, - DESCTYPE_TSS, 0); - write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); -} - - -#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) - -#define LDT_entry_a(info) \ - ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) - -#define LDT_entry_b(info) \ - (((info)->base_addr & 0xff000000) | \ - (((info)->base_addr & 0x00ff0000) >> 16) | \ - ((info)->limit & 0xf0000) | \ - (((info)->read_exec_only ^ 1) << 9) | \ - ((info)->contents << 10) | \ - (((info)->seg_not_present ^ 1) << 15) | \ - ((info)->seg_32bit << 22) | \ - ((info)->limit_in_pages << 23) | \ - ((info)->useable << 20) | \ - 0x7000) - -#define LDT_empty(info) (\ - (info)->base_addr == 0 && \ - (info)->limit == 0 && \ - (info)->contents == 0 && \ - (info)->read_exec_only == 1 && \ - (info)->seg_32bit == 0 && \ - (info)->limit_in_pages == 0 && \ - (info)->seg_not_present == 1 && \ - (info)->useable == 0 ) - -static inline void clear_LDT(void) -{ - set_ldt(NULL, 0); -} - -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc) -{ - set_ldt(pc->ldt, pc->size); -} - -static inline void load_LDT(mm_context_t *pc) -{ - preempt_disable(); - load_LDT_nolock(pc); - preempt_enable(); -} - -static inline unsigned long get_desc_base(unsigned long *desc) -{ - unsigned long base; - base = ((desc[0] >> 16) & 0x0000ffff) | - ((desc[1] << 16) & 0x00ff0000) | - (desc[1] & 0xff000000); - return base; -} - -#else /* __ASSEMBLY__ */ - -/* - * GET_DESC_BASE reads the descriptor base of the specified segment. - * - * Args: - * idx - descriptor index - * gdt - GDT pointer - * base - 32bit register to which the base will be written - * lo_w - lo word of the "base" register - * lo_b - lo byte of the "base" register - * hi_b - hi byte of the low word of the "base" register - * - * Example: - * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) - * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. - */ -#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ - movb idx*8+4(gdt), lo_b; \ - movb idx*8+7(gdt), hi_b; \ - shll $16, base; \ - movw idx*8+2(gdt), lo_w; - -#endif /* !__ASSEMBLY__ */ - -#endif diff --git a/include/asm-x86/desc_64.h b/include/asm-x86/desc_64.h index 7d9c938e69fd..8b137891791f 100644 --- a/include/asm-x86/desc_64.h +++ b/include/asm-x86/desc_64.h @@ -1,204 +1 @@ -/* Written 2000 by Andi Kleen */ -#ifndef __ARCH_DESC_H -#define __ARCH_DESC_H -#include <linux/threads.h> -#include <asm/ldt.h> - -#ifndef __ASSEMBLY__ - -#include <linux/string.h> -#include <linux/smp.h> -#include <asm/desc_defs.h> - -#include <asm/segment.h> -#include <asm/mmu.h> - -extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; - -#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) -#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) -#define clear_LDT() asm volatile("lldt %w0"::"r" (0)) - -static inline unsigned long __store_tr(void) -{ - unsigned long tr; - - asm volatile ("str %w0":"=r" (tr)); - return tr; -} - -#define store_tr(tr) (tr) = __store_tr() - -/* - * This is the ldt that every process will get unless we need - * something other than this. - */ -extern struct desc_struct default_ldt[]; -extern struct gate_struct idt_table[]; -extern struct desc_ptr cpu_gdt_descr[]; - -/* the cpu gdt accessor */ -#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address) - -static inline void load_gdt(const struct desc_ptr *ptr) -{ - asm volatile("lgdt %w0"::"m" (*ptr)); -} - -static inline void store_gdt(struct desc_ptr *ptr) -{ - asm("sgdt %w0":"=m" (*ptr)); -} - -static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) -{ - struct gate_struct s; - s.offset_low = PTR_LOW(func); - s.segment = __KERNEL_CS; - s.ist = ist; - s.p = 1; - s.dpl = dpl; - s.zero0 = 0; - s.zero1 = 0; - s.type = type; - s.offset_middle = PTR_MIDDLE(func); - s.offset_high = PTR_HIGH(func); - /* does not need to be atomic because it is only done once at setup time */ - memcpy(adr, &s, 16); -} - -static inline void set_intr_gate(int nr, void *func) -{ - BUG_ON((unsigned)nr > 0xFF); - _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); -} - -static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) -{ - BUG_ON((unsigned)nr > 0xFF); - _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); -} - -static inline void set_system_gate(int nr, void *func) -{ - BUG_ON((unsigned)nr > 0xFF); - _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); -} - -static inline void set_system_gate_ist(int nr, void *func, unsigned ist) -{ - _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist); -} - -static inline void load_idt(const struct desc_ptr *ptr) -{ - asm volatile("lidt %w0"::"m" (*ptr)); -} - -static inline void store_idt(struct desc_ptr *dtr) -{ - asm("sidt %w0":"=m" (*dtr)); -} - -static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, - unsigned size) -{ - struct ldttss_desc d; - memset(&d,0,sizeof(d)); - d.limit0 = size & 0xFFFF; - d.base0 = PTR_LOW(tss); - d.base1 = PTR_MIDDLE(tss) & 0xFF; - d.type = type; - d.p = 1; - d.limit1 = (size >> 16) & 0xF; - d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; - d.base3 = PTR_HIGH(tss); - memcpy(ptr, &d, 16); -} - -static inline void set_tss_desc(unsigned cpu, void *addr) -{ - /* - * sizeof(unsigned long) coming from an extra "long" at the end - * of the iobitmap. See tss_struct definition in processor.h - * - * -1? seg base+limit should be pointing to the address of the - * last valid byte - */ - set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS], - (unsigned long)addr, DESC_TSS, - IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); -} - -static inline void set_ldt_desc(unsigned cpu, void *addr, int size) -{ - set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr, - DESC_LDT, size * 8 - 1); -} - -#define LDT_entry_a(info) \ - ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) -/* Don't allow setting of the lm bit. It is useless anyways because - 64bit system calls require __USER_CS. */ -#define LDT_entry_b(info) \ - (((info)->base_addr & 0xff000000) | \ - (((info)->base_addr & 0x00ff0000) >> 16) | \ - ((info)->limit & 0xf0000) | \ - (((info)->read_exec_only ^ 1) << 9) | \ - ((info)->contents << 10) | \ - (((info)->seg_not_present ^ 1) << 15) | \ - ((info)->seg_32bit << 22) | \ - ((info)->limit_in_pages << 23) | \ - ((info)->useable << 20) | \ - /* ((info)->lm << 21) | */ \ - 0x7000) - -#define LDT_empty(info) (\ - (info)->base_addr == 0 && \ - (info)->limit == 0 && \ - (info)->contents == 0 && \ - (info)->read_exec_only == 1 && \ - (info)->seg_32bit == 0 && \ - (info)->limit_in_pages == 0 && \ - (info)->seg_not_present == 1 && \ - (info)->useable == 0 && \ - (info)->lm == 0) - -static inline void load_TLS(struct thread_struct *t, unsigned int cpu) -{ - unsigned int i; - u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN); - - for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) - gdt[i] = t->tls_array[i]; -} - -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock (mm_context_t *pc, int cpu) -{ - int count = pc->size; - - if (likely(!count)) { - clear_LDT(); - return; - } - - set_ldt_desc(cpu, pc->ldt, count); - load_LDT_desc(); -} - -static inline void load_LDT(mm_context_t *pc) -{ - int cpu = get_cpu(); - load_LDT_nolock(pc, cpu); - put_cpu(); -} - -extern struct desc_ptr idt_descr; - -#endif /* !__ASSEMBLY__ */ - -#endif diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h index 089004070099..e33f078b3e54 100644 --- a/include/asm-x86/desc_defs.h +++ b/include/asm-x86/desc_defs.h @@ -11,26 +11,36 @@ #include <linux/types.h> +/* + * FIXME: Acessing the desc_struct through its fields is more elegant, + * and should be the one valid thing to do. However, a lot of open code + * still touches the a and b acessors, and doing this allow us to do it + * incrementally. We keep the signature as a struct, rather than an union, + * so we can get rid of it transparently in the future -- glommer + */ // 8 byte segment descriptor struct desc_struct { - u16 limit0; - u16 base0; - unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; - unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; -} __attribute__((packed)); + union { + struct { unsigned int a, b; }; + struct { + u16 limit0; + u16 base0; + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; + }; -struct n_desc_struct { - unsigned int a,b; -}; + }; +} __attribute__((packed)); enum { GATE_INTERRUPT = 0xE, GATE_TRAP = 0xF, GATE_CALL = 0xC, + GATE_TASK = 0x5, }; // 16byte gate -struct gate_struct { +struct gate_struct64 { u16 offset_low; u16 segment; unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; @@ -39,17 +49,18 @@ struct gate_struct { u32 zero1; } __attribute__((packed)); -#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) -#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) -#define PTR_HIGH(x) ((unsigned long)(x) >> 32) +#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF) +#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF) +#define PTR_HIGH(x) ((unsigned long long)(x) >> 32) enum { DESC_TSS = 0x9, DESC_LDT = 0x2, + DESCTYPE_S = 0x10, /* !system */ }; // LDT or TSS descriptor in the GDT. 16 bytes. -struct ldttss_desc { +struct ldttss_desc64 { u16 limit0; u16 base0; unsigned base1 : 8, type : 5, dpl : 2, p : 1; @@ -58,6 +69,16 @@ struct ldttss_desc { u32 zero1; } __attribute__((packed)); +#ifdef CONFIG_X86_64 +typedef struct gate_struct64 gate_desc; +typedef struct ldttss_desc64 ldt_desc; +typedef struct ldttss_desc64 tss_desc; +#else +typedef struct desc_struct gate_desc; +typedef struct desc_struct ldt_desc; +typedef struct desc_struct tss_desc; +#endif + struct desc_ptr { unsigned short size; unsigned long address; diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h index 9f936c61a4e5..e9733ce89880 100644 --- a/include/asm-x86/dma.h +++ b/include/asm-x86/dma.h @@ -1,5 +1,319 @@ +/* + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_X86_DMA_H +#define _ASM_X86_DMA_H + +#include <linux/spinlock.h> /* And spinlocks */ +#include <asm/io.h> /* need byte IO */ +#include <linux/delay.h> + + +#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER +#define dma_outb outb_p +#else +#define dma_outb outb +#endif + +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + #ifdef CONFIG_X86_32 -# include "dma_32.h" + +/* The maximum address that we can perform a DMA transfer to on this platform */ +#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) + +#else + +/* 16MB ISA DMA zone */ +#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) + +/* 4GB broken PCI/AGP hardware bus master zone */ +#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) + +/* Compat define for old dma zone */ +#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) + +#endif + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +/* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_READ 0x44 +/* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 +/* pass thru DREQ->HRQ, DACK<-HLDA only */ +#define DMA_MODE_CASCADE 0xC0 + +#define DMA_AUTOINIT 0x10 + + +extern spinlock_t dma_spin_lock; + +static __inline__ unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static __inline__ void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static __inline__ void enable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static __inline__ void disable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while holding the DMA lock ! --- + */ +static __inline__ void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr <= 3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register, but a 64k boundary + * may have been crossed. + */ +static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) +{ + switch (dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + set_dma_page(dmanr, a>>16); + if (dmanr <= 3) { + dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + } else { + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + } +} + + +/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + } else { + dma_outb((count >> 1) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static __inline__ int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port; + /* using short to get 16-bit wrap around */ + unsigned short count; + + io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE + : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr <= 3) ? count : (count << 1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char *device_id); +extern void free_dma(unsigned int dmanr); + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; #else -# include "dma_64.h" +#define isa_dma_bridge_buggy (0) #endif + +#endif /* _ASM_X86_DMA_H */ diff --git a/include/asm-x86/dma_32.h b/include/asm-x86/dma_32.h deleted file mode 100644 index d23aac8e1a50..000000000000 --- a/include/asm-x86/dma_32.h +++ /dev/null @@ -1,297 +0,0 @@ -/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen - * and John Boyd, Nov. 1992. - */ - -#ifndef _ASM_DMA_H -#define _ASM_DMA_H - -#include <linux/spinlock.h> /* And spinlocks */ -#include <asm/io.h> /* need byte IO */ -#include <linux/delay.h> - - -#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER -#define dma_outb outb_p -#else -#define dma_outb outb -#endif - -#define dma_inb inb - -/* - * NOTES about DMA transfers: - * - * controller 1: channels 0-3, byte operations, ports 00-1F - * controller 2: channels 4-7, word operations, ports C0-DF - * - * - ALL registers are 8 bits only, regardless of transfer size - * - channel 4 is not used - cascades 1 into 2. - * - channels 0-3 are byte - addresses/counts are for physical bytes - * - channels 5-7 are word - addresses/counts are for physical words - * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries - * - transfer count loaded to registers is 1 less than actual count - * - controller 2 offsets are all even (2x offsets for controller 1) - * - page registers for 5-7 don't use data bit 0, represent 128K pages - * - page registers for 0-3 use bit 0, represent 64K pages - * - * DMA transfers are limited to the lower 16MB of _physical_ memory. - * Note that addresses loaded into registers must be _physical_ addresses, - * not logical addresses (which may differ if paging is active). - * - * Address mapping for channels 0-3: - * - * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * P7 ... P0 A7 ... A0 A7 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Address mapping for channels 5-7: - * - * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) - * | ... | \ \ ... \ \ \ ... \ \ - * | ... | \ \ ... \ \ \ ... \ (not used) - * | ... | \ \ ... \ \ \ ... \ - * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses - * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at - * the hardware level, so odd-byte transfers aren't possible). - * - * Transfer count (_not # bytes_) is limited to 64K, represented as actual - * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, - * and up to 128K bytes may be transferred on channels 5-7 in one operation. - * - */ - -#define MAX_DMA_CHANNELS 8 - -/* The maximum address that we can perform a DMA transfer to on this platform */ -#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) - -/* 8237 DMA controllers */ -#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ -#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ - -/* DMA controller registers */ -#define DMA1_CMD_REG 0x08 /* command register (w) */ -#define DMA1_STAT_REG 0x08 /* status register (r) */ -#define DMA1_REQ_REG 0x09 /* request register (w) */ -#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ -#define DMA1_MODE_REG 0x0B /* mode register (w) */ -#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ -#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ -#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ -#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ -#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ - -#define DMA2_CMD_REG 0xD0 /* command register (w) */ -#define DMA2_STAT_REG 0xD0 /* status register (r) */ -#define DMA2_REQ_REG 0xD2 /* request register (w) */ -#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ -#define DMA2_MODE_REG 0xD6 /* mode register (w) */ -#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ -#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ -#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ -#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ -#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ - -#define DMA_ADDR_0 0x00 /* DMA address registers */ -#define DMA_ADDR_1 0x02 -#define DMA_ADDR_2 0x04 -#define DMA_ADDR_3 0x06 -#define DMA_ADDR_4 0xC0 -#define DMA_ADDR_5 0xC4 -#define DMA_ADDR_6 0xC8 -#define DMA_ADDR_7 0xCC - -#define DMA_CNT_0 0x01 /* DMA count registers */ -#define DMA_CNT_1 0x03 -#define DMA_CNT_2 0x05 -#define DMA_CNT_3 0x07 -#define DMA_CNT_4 0xC2 -#define DMA_CNT_5 0xC6 -#define DMA_CNT_6 0xCA -#define DMA_CNT_7 0xCE - -#define DMA_PAGE_0 0x87 /* DMA page registers */ -#define DMA_PAGE_1 0x83 -#define DMA_PAGE_2 0x81 -#define DMA_PAGE_3 0x82 -#define DMA_PAGE_5 0x8B -#define DMA_PAGE_6 0x89 -#define DMA_PAGE_7 0x8A - -#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ -#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ -#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ - -#define DMA_AUTOINIT 0x10 - - -extern spinlock_t dma_spin_lock; - -static __inline__ unsigned long claim_dma_lock(void) -{ - unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); - return flags; -} - -static __inline__ void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} - -/* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr, DMA1_MASK_REG); - else - dma_outb(dmanr & 3, DMA2_MASK_REG); -} - -static __inline__ void disable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr | 4, DMA1_MASK_REG); - else - dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); -} - -/* Clear the 'DMA Pointer Flip Flop'. - * Write 0 for LSB/MSB, 1 for MSB/LSB access. - * Use this once to initialize the FF to a known state. - * After that, keep track of it. :-) - * --- In order to do that, the DMA routines below should --- - * --- only be used while holding the DMA lock ! --- - */ -static __inline__ void clear_dma_ff(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(0, DMA1_CLEAR_FF_REG); - else - dma_outb(0, DMA2_CLEAR_FF_REG); -} - -/* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) -{ - if (dmanr<=3) - dma_outb(mode | dmanr, DMA1_MODE_REG); - else - dma_outb(mode | (dmanr&3), DMA2_MODE_REG); -} - -/* Set only the page register bits of the transfer address. - * This is used for successive transfers when we know the contents of - * the lower 16 bits of the DMA current address register, but a 64k boundary - * may have been crossed. - */ -static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) -{ - switch(dmanr) { - case 0: - dma_outb(pagenr, DMA_PAGE_0); - break; - case 1: - dma_outb(pagenr, DMA_PAGE_1); - break; - case 2: - dma_outb(pagenr, DMA_PAGE_2); - break; - case 3: - dma_outb(pagenr, DMA_PAGE_3); - break; - case 5: - dma_outb(pagenr & 0xfe, DMA_PAGE_5); - break; - case 6: - dma_outb(pagenr & 0xfe, DMA_PAGE_6); - break; - case 7: - dma_outb(pagenr & 0xfe, DMA_PAGE_7); - break; - } -} - - -/* Set transfer address & page bits for specific DMA channel. - * Assumes dma flipflop is clear. - */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) -{ - set_dma_page(dmanr, a>>16); - if (dmanr <= 3) { - dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - } else { - dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - } -} - - -/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for - * a specific DMA channel. - * You must ensure the parameters are valid. - * NOTE: from a manual: "the number of transfers is one more - * than the initial word count"! This is taken into account. - * Assumes dma flip-flop is clear. - * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. - */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; - if (dmanr <= 3) { - dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - } else { - dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - } -} - - -/* Get DMA residue count. After a DMA transfer, this - * should return zero. Reading this while a DMA transfer is - * still in progress will return unpredictable results. - * If called before the channel has been used, it may return 1. - * Otherwise, it returns the number of _bytes_ left to transfer. - * - * Assumes DMA flip-flop is clear. - */ -static __inline__ int get_dma_residue(unsigned int dmanr) -{ - unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE - : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; - - /* using short to get 16-bit wrap around */ - unsigned short count; - - count = 1 + dma_inb(io_port); - count += dma_inb(io_port) << 8; - - return (dmanr<=3)? count : (count<<1); -} - - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - -#endif /* _ASM_DMA_H */ diff --git a/include/asm-x86/dma_64.h b/include/asm-x86/dma_64.h deleted file mode 100644 index a37c16f06289..000000000000 --- a/include/asm-x86/dma_64.h +++ /dev/null @@ -1,304 +0,0 @@ -/* - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen - * and John Boyd, Nov. 1992. - */ - -#ifndef _ASM_DMA_H -#define _ASM_DMA_H - -#include <linux/spinlock.h> /* And spinlocks */ -#include <asm/io.h> /* need byte IO */ -#include <linux/delay.h> - - -#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER -#define dma_outb outb_p -#else -#define dma_outb outb -#endif - -#define dma_inb inb - -/* - * NOTES about DMA transfers: - * - * controller 1: channels 0-3, byte operations, ports 00-1F - * controller 2: channels 4-7, word operations, ports C0-DF - * - * - ALL registers are 8 bits only, regardless of transfer size - * - channel 4 is not used - cascades 1 into 2. - * - channels 0-3 are byte - addresses/counts are for physical bytes - * - channels 5-7 are word - addresses/counts are for physical words - * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries - * - transfer count loaded to registers is 1 less than actual count - * - controller 2 offsets are all even (2x offsets for controller 1) - * - page registers for 5-7 don't use data bit 0, represent 128K pages - * - page registers for 0-3 use bit 0, represent 64K pages - * - * DMA transfers are limited to the lower 16MB of _physical_ memory. - * Note that addresses loaded into registers must be _physical_ addresses, - * not logical addresses (which may differ if paging is active). - * - * Address mapping for channels 0-3: - * - * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * P7 ... P0 A7 ... A0 A7 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Address mapping for channels 5-7: - * - * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) - * | ... | \ \ ... \ \ \ ... \ \ - * | ... | \ \ ... \ \ \ ... \ (not used) - * | ... | \ \ ... \ \ \ ... \ - * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses - * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at - * the hardware level, so odd-byte transfers aren't possible). - * - * Transfer count (_not # bytes_) is limited to 64K, represented as actual - * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, - * and up to 128K bytes may be transferred on channels 5-7 in one operation. - * - */ - -#define MAX_DMA_CHANNELS 8 - - -/* 16MB ISA DMA zone */ -#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) - -/* 4GB broken PCI/AGP hardware bus master zone */ -#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) - -/* Compat define for old dma zone */ -#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) - -/* 8237 DMA controllers */ -#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ -#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ - -/* DMA controller registers */ -#define DMA1_CMD_REG 0x08 /* command register (w) */ -#define DMA1_STAT_REG 0x08 /* status register (r) */ -#define DMA1_REQ_REG 0x09 /* request register (w) */ -#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ -#define DMA1_MODE_REG 0x0B /* mode register (w) */ -#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ -#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ -#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ -#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ -#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ - -#define DMA2_CMD_REG 0xD0 /* command register (w) */ -#define DMA2_STAT_REG 0xD0 /* status register (r) */ -#define DMA2_REQ_REG 0xD2 /* request register (w) */ -#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ -#define DMA2_MODE_REG 0xD6 /* mode register (w) */ -#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ -#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ -#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ -#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ -#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ - -#define DMA_ADDR_0 0x00 /* DMA address registers */ -#define DMA_ADDR_1 0x02 -#define DMA_ADDR_2 0x04 -#define DMA_ADDR_3 0x06 -#define DMA_ADDR_4 0xC0 -#define DMA_ADDR_5 0xC4 -#define DMA_ADDR_6 0xC8 -#define DMA_ADDR_7 0xCC - -#define DMA_CNT_0 0x01 /* DMA count registers */ -#define DMA_CNT_1 0x03 -#define DMA_CNT_2 0x05 -#define DMA_CNT_3 0x07 -#define DMA_CNT_4 0xC2 -#define DMA_CNT_5 0xC6 -#define DMA_CNT_6 0xCA -#define DMA_CNT_7 0xCE - -#define DMA_PAGE_0 0x87 /* DMA page registers */ -#define DMA_PAGE_1 0x83 -#define DMA_PAGE_2 0x81 -#define DMA_PAGE_3 0x82 -#define DMA_PAGE_5 0x8B -#define DMA_PAGE_6 0x89 -#define DMA_PAGE_7 0x8A - -#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ -#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ -#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ - -#define DMA_AUTOINIT 0x10 - - -extern spinlock_t dma_spin_lock; - -static __inline__ unsigned long claim_dma_lock(void) -{ - unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); - return flags; -} - -static __inline__ void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} - -/* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr, DMA1_MASK_REG); - else - dma_outb(dmanr & 3, DMA2_MASK_REG); -} - -static __inline__ void disable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr | 4, DMA1_MASK_REG); - else - dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); -} - -/* Clear the 'DMA Pointer Flip Flop'. - * Write 0 for LSB/MSB, 1 for MSB/LSB access. - * Use this once to initialize the FF to a known state. - * After that, keep track of it. :-) - * --- In order to do that, the DMA routines below should --- - * --- only be used while holding the DMA lock ! --- - */ -static __inline__ void clear_dma_ff(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(0, DMA1_CLEAR_FF_REG); - else - dma_outb(0, DMA2_CLEAR_FF_REG); -} - -/* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) -{ - if (dmanr<=3) - dma_outb(mode | dmanr, DMA1_MODE_REG); - else - dma_outb(mode | (dmanr&3), DMA2_MODE_REG); -} - -/* Set only the page register bits of the transfer address. - * This is used for successive transfers when we know the contents of - * the lower 16 bits of the DMA current address register, but a 64k boundary - * may have been crossed. - */ -static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) -{ - switch(dmanr) { - case 0: - dma_outb(pagenr, DMA_PAGE_0); - break; - case 1: - dma_outb(pagenr, DMA_PAGE_1); - break; - case 2: - dma_outb(pagenr, DMA_PAGE_2); - break; - case 3: - dma_outb(pagenr, DMA_PAGE_3); - break; - case 5: - dma_outb(pagenr & 0xfe, DMA_PAGE_5); - break; - case 6: - dma_outb(pagenr & 0xfe, DMA_PAGE_6); - break; - case 7: - dma_outb(pagenr & 0xfe, DMA_PAGE_7); - break; - } -} - - -/* Set transfer address & page bits for specific DMA channel. - * Assumes dma flipflop is clear. - */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) -{ - set_dma_page(dmanr, a>>16); - if (dmanr <= 3) { - dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - } else { - dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - } -} - - -/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for - * a specific DMA channel. - * You must ensure the parameters are valid. - * NOTE: from a manual: "the number of transfers is one more - * than the initial word count"! This is taken into account. - * Assumes dma flip-flop is clear. - * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. - */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; - if (dmanr <= 3) { - dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - } else { - dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - } -} - - -/* Get DMA residue count. After a DMA transfer, this - * should return zero. Reading this while a DMA transfer is - * still in progress will return unpredictable results. - * If called before the channel has been used, it may return 1. - * Otherwise, it returns the number of _bytes_ left to transfer. - * - * Assumes DMA flip-flop is clear. - */ -static __inline__ int get_dma_residue(unsigned int dmanr) -{ - unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE - : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; - - /* using short to get 16-bit wrap around */ - unsigned short count; - - count = 1 + dma_inb(io_port); - count += dma_inb(io_port) << 8; - - return (dmanr<=3)? count : (count<<1); -} - - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - -#endif /* _ASM_DMA_H */ diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h index 8e2b0e6aa8e7..1241e6ad1935 100644 --- a/include/asm-x86/dmi.h +++ b/include/asm-x86/dmi.h @@ -5,9 +5,6 @@ #ifdef CONFIG_X86_32 -/* Use early IO mappings for DMI because it's initialized early */ -#define dmi_ioremap bt_ioremap -#define dmi_iounmap bt_iounmap #define dmi_alloc alloc_bootmem #else /* CONFIG_X86_32 */ @@ -22,14 +19,15 @@ extern char dmi_alloc_data[DMI_MAX_DATA]; static inline void *dmi_alloc(unsigned len) { int idx = dmi_alloc_index; - if ((dmi_alloc_index += len) > DMI_MAX_DATA) + if ((dmi_alloc_index + len) > DMI_MAX_DATA) return NULL; + dmi_alloc_index += len; return dmi_alloc_data + idx; } +#endif + #define dmi_ioremap early_ioremap #define dmi_iounmap early_iounmap #endif - -#endif diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h new file mode 100644 index 000000000000..7881368142fa --- /dev/null +++ b/include/asm-x86/ds.h @@ -0,0 +1,72 @@ +/* + * Debug Store (DS) support + * + * This provides a low-level interface to the hardware's Debug Store + * feature that is used for last branch recording (LBR) and + * precise-event based sampling (PEBS). + * + * Different architectures use a different DS layout/pointer size. + * The below functions therefore work on a void*. + * + * + * Since there is no user for PEBS, yet, only LBR (or branch + * trace store, BTS) is supported. + * + * + * Copyright (C) 2007 Intel Corporation. + * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 + */ + +#ifndef _ASM_X86_DS_H +#define _ASM_X86_DS_H + +#include <linux/types.h> +#include <linux/init.h> + +struct cpuinfo_x86; + + +/* a branch trace record entry + * + * In order to unify the interface between various processor versions, + * we use the below data structure for all processors. + */ +enum bts_qualifier { + BTS_INVALID = 0, + BTS_BRANCH, + BTS_TASK_ARRIVES, + BTS_TASK_DEPARTS +}; + +struct bts_struct { + u64 qualifier; + union { + /* BTS_BRANCH */ + struct { + u64 from_ip; + u64 to_ip; + } lbr; + /* BTS_TASK_ARRIVES or + BTS_TASK_DEPARTS */ + u64 jiffies; + } variant; +}; + +/* Overflow handling mechanisms */ +#define DS_O_SIGNAL 1 /* send overflow signal */ +#define DS_O_WRAP 2 /* wrap around */ + +extern int ds_allocate(void **, size_t); +extern int ds_free(void **); +extern int ds_get_bts_size(void *); +extern int ds_get_bts_end(void *); +extern int ds_get_bts_index(void *); +extern int ds_set_overflow(void *, int); +extern int ds_get_overflow(void *); +extern int ds_clear(void *); +extern int ds_read_bts(void *, int, struct bts_struct *); +extern int ds_write_bts(void *, const struct bts_struct *); +extern unsigned long ds_debugctl_mask(void); +extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c); + +#endif /* _ASM_X86_DS_H */ diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h index 3e214f39fad3..7004251fc66b 100644 --- a/include/asm-x86/e820.h +++ b/include/asm-x86/e820.h @@ -22,6 +22,12 @@ struct e820map { }; #endif /* __ASSEMBLY__ */ +#define ISA_START_ADDRESS 0xa0000 +#define ISA_END_ADDRESS 0x100000 + +#define BIOS_BEGIN 0x000a0000 +#define BIOS_END 0x00100000 + #ifdef __KERNEL__ #ifdef CONFIG_X86_32 # include "e820_32.h" diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index 03f60c690c8a..f1da7ebd1905 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h @@ -12,20 +12,28 @@ #ifndef __E820_HEADER #define __E820_HEADER +#include <linux/ioport.h> + #define HIGH_MEMORY (1024*1024) #ifndef __ASSEMBLY__ extern struct e820map e820; +extern void update_e820(void); extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_any_mapped(u64 start, u64 end, unsigned type); extern void find_max_pfn(void); extern void register_bootmem_low_pages(unsigned long max_low_pfn); +extern void add_memory_region(unsigned long long start, + unsigned long long size, int type); extern void e820_register_memory(void); extern void limit_regions(unsigned long long size); extern void print_memory_map(char *who); +extern void init_iomem_resources(struct resource *code_resource, + struct resource *data_resource, + struct resource *bss_resource); #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) extern void e820_mark_nosave_regions(void); @@ -35,5 +43,6 @@ static inline void e820_mark_nosave_regions(void) } #endif + #endif/*!__ASSEMBLY__*/ #endif/*__E820_HEADER*/ diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 0bd4787a5d57..51e4170f9ca5 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -11,6 +11,8 @@ #ifndef __E820_HEADER #define __E820_HEADER +#include <linux/ioport.h> + #ifndef __ASSEMBLY__ extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned size); @@ -19,11 +21,15 @@ extern void add_memory_region(unsigned long start, unsigned long size, extern void setup_memory_region(void); extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); -extern void e820_reserve_resources(void); +extern void e820_reserve_resources(struct resource *code_resource, + struct resource *data_resource, struct resource *bss_resource); extern void e820_mark_nosave_regions(void); -extern void e820_print_map(char *who); extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); +extern int e820_any_non_reserved(unsigned long start, unsigned long end); +extern int is_memory_any_valid(unsigned long start, unsigned long end); +extern int e820_all_non_reserved(unsigned long start, unsigned long end); +extern int is_memory_all_valid(unsigned long start, unsigned long end); extern unsigned long e820_hole_size(unsigned long start, unsigned long end); extern void e820_setup_gap(void); @@ -33,9 +39,11 @@ extern void e820_register_active_regions(int nid, extern void finish_e820_parsing(void); extern struct e820map e820; +extern void update_e820(void); + +extern void reserve_early(unsigned long start, unsigned long end); +extern void early_res_to_bootmem(void); -extern unsigned ebda_addr, ebda_size; -extern unsigned long nodemap_addr, nodemap_size; #endif/*!__ASSEMBLY__*/ #endif/*__E820_HEADER*/ diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h new file mode 100644 index 000000000000..9c68a1f098d8 --- /dev/null +++ b/include/asm-x86/efi.h @@ -0,0 +1,97 @@ +#ifndef _ASM_X86_EFI_H +#define _ASM_X86_EFI_H + +#ifdef CONFIG_X86_32 + +extern unsigned long asmlinkage efi_call_phys(void *, ...); + +#define efi_call_phys0(f) efi_call_phys(f) +#define efi_call_phys1(f, a1) efi_call_phys(f, a1) +#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2) +#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3) +#define efi_call_phys4(f, a1, a2, a3, a4) \ + efi_call_phys(f, a1, a2, a3, a4) +#define efi_call_phys5(f, a1, a2, a3, a4, a5) \ + efi_call_phys(f, a1, a2, a3, a4, a5) +#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \ + efi_call_phys(f, a1, a2, a3, a4, a5, a6) +/* + * Wrap all the virtual calls in a way that forces the parameters on the stack. + */ + +#define efi_call_virt(f, args...) \ + ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) + +#define efi_call_virt0(f) efi_call_virt(f) +#define efi_call_virt1(f, a1) efi_call_virt(f, a1) +#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2) +#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3) +#define efi_call_virt4(f, a1, a2, a3, a4) \ + efi_call_virt(f, a1, a2, a3, a4) +#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ + efi_call_virt(f, a1, a2, a3, a4, a5) +#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ + efi_call_virt(f, a1, a2, a3, a4, a5, a6) + +#define efi_ioremap(addr, size) ioremap(addr, size) + +#else /* !CONFIG_X86_32 */ + +#define MAX_EFI_IO_PAGES 100 + +extern u64 efi_call0(void *fp); +extern u64 efi_call1(void *fp, u64 arg1); +extern u64 efi_call2(void *fp, u64 arg1, u64 arg2); +extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3); +extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4); +extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3, + u64 arg4, u64 arg5); +extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, + u64 arg4, u64 arg5, u64 arg6); + +#define efi_call_phys0(f) \ + efi_call0((void *)(f)) +#define efi_call_phys1(f, a1) \ + efi_call1((void *)(f), (u64)(a1)) +#define efi_call_phys2(f, a1, a2) \ + efi_call2((void *)(f), (u64)(a1), (u64)(a2)) +#define efi_call_phys3(f, a1, a2, a3) \ + efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3)) +#define efi_call_phys4(f, a1, a2, a3, a4) \ + efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ + (u64)(a4)) +#define efi_call_phys5(f, a1, a2, a3, a4, a5) \ + efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ + (u64)(a4), (u64)(a5)) +#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \ + efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ + (u64)(a4), (u64)(a5), (u64)(a6)) + +#define efi_call_virt0(f) \ + efi_call0((void *)(efi.systab->runtime->f)) +#define efi_call_virt1(f, a1) \ + efi_call1((void *)(efi.systab->runtime->f), (u64)(a1)) +#define efi_call_virt2(f, a1, a2) \ + efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2)) +#define efi_call_virt3(f, a1, a2, a3) \ + efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + (u64)(a3)) +#define efi_call_virt4(f, a1, a2, a3, a4) \ + efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + (u64)(a3), (u64)(a4)) +#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ + efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + (u64)(a3), (u64)(a4), (u64)(a5)) +#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ + efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) + +extern void *efi_ioremap(unsigned long offset, unsigned long size); + +#endif /* CONFIG_X86_32 */ + +extern void efi_reserve_bootmem(void); +extern void efi_call_phys_prelog(void); +extern void efi_call_phys_epilog(void); + +#endif diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index ec42a4d2e83b..d9c94e707289 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h @@ -73,18 +73,23 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #endif #ifdef __KERNEL__ +#include <asm/vdso.h> -#ifdef CONFIG_X86_32 -#include <asm/processor.h> -#include <asm/system.h> /* for savesegment */ -#include <asm/desc.h> +extern unsigned int vdso_enabled; /* * This is used to ensure we don't load something for the wrong architecture. */ -#define elf_check_arch(x) \ +#define elf_check_arch_ia32(x) \ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) +#ifdef CONFIG_X86_32 +#include <asm/processor.h> +#include <asm/system.h> /* for savesegment */ +#include <asm/desc.h> + +#define elf_check_arch(x) elf_check_arch_ia32(x) + /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for @@ -96,36 +101,38 @@ typedef struct user_fxsr_struct elf_fpxregset_t; just to make things more deterministic. */ #define ELF_PLAT_INIT(_r, load_addr) do { \ - _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ - _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ - _r->eax = 0; \ + _r->bx = 0; _r->cx = 0; _r->dx = 0; \ + _r->si = 0; _r->di = 0; _r->bp = 0; \ + _r->ax = 0; \ } while (0) -/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is - now struct_user_regs, they are different) */ - -#define ELF_CORE_COPY_REGS(pr_reg, regs) \ - pr_reg[0] = regs->ebx; \ - pr_reg[1] = regs->ecx; \ - pr_reg[2] = regs->edx; \ - pr_reg[3] = regs->esi; \ - pr_reg[4] = regs->edi; \ - pr_reg[5] = regs->ebp; \ - pr_reg[6] = regs->eax; \ - pr_reg[7] = regs->xds & 0xffff; \ - pr_reg[8] = regs->xes & 0xffff; \ - pr_reg[9] = regs->xfs & 0xffff; \ - savesegment(gs,pr_reg[10]); \ - pr_reg[11] = regs->orig_eax; \ - pr_reg[12] = regs->eip; \ - pr_reg[13] = regs->xcs & 0xffff; \ - pr_reg[14] = regs->eflags; \ - pr_reg[15] = regs->esp; \ - pr_reg[16] = regs->xss & 0xffff; +/* + * regs is struct pt_regs, pr_reg is elf_gregset_t (which is + * now struct_user_regs, they are different) + */ + +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->bx; \ + pr_reg[1] = regs->cx; \ + pr_reg[2] = regs->dx; \ + pr_reg[3] = regs->si; \ + pr_reg[4] = regs->di; \ + pr_reg[5] = regs->bp; \ + pr_reg[6] = regs->ax; \ + pr_reg[7] = regs->ds & 0xffff; \ + pr_reg[8] = regs->es & 0xffff; \ + pr_reg[9] = regs->fs & 0xffff; \ + savesegment(gs, pr_reg[10]); \ + pr_reg[11] = regs->orig_ax; \ + pr_reg[12] = regs->ip; \ + pr_reg[13] = regs->cs & 0xffff; \ + pr_reg[14] = regs->flags; \ + pr_reg[15] = regs->sp; \ + pr_reg[16] = regs->ss & 0xffff; \ +} while (0); #define ELF_PLATFORM (utsname()->machine) #define set_personality_64bit() do { } while (0) -extern unsigned int vdso_enabled; #else /* CONFIG_X86_32 */ @@ -137,28 +144,57 @@ extern unsigned int vdso_enabled; #define elf_check_arch(x) \ ((x)->e_machine == EM_X86_64) +#define compat_elf_check_arch(x) elf_check_arch_ia32(x) + +static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) +{ + asm volatile("movl %0,%%fs" :: "r" (0)); + asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); + load_gs_index(0); + regs->ip = ip; + regs->sp = sp; + regs->flags = X86_EFLAGS_IF; + regs->cs = __USER32_CS; + regs->ss = __USER32_DS; +} + +static inline void elf_common_init(struct thread_struct *t, + struct pt_regs *regs, const u16 ds) +{ + regs->ax = regs->bx = regs->cx = regs->dx = 0; + regs->si = regs->di = regs->bp = 0; + regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0; + regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; + t->fs = t->gs = 0; + t->fsindex = t->gsindex = 0; + t->ds = t->es = ds; +} + #define ELF_PLAT_INIT(_r, load_addr) do { \ - struct task_struct *cur = current; \ - (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \ - (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \ - (_r)->rax = 0; \ - (_r)->r8 = 0; \ - (_r)->r9 = 0; \ - (_r)->r10 = 0; \ - (_r)->r11 = 0; \ - (_r)->r12 = 0; \ - (_r)->r13 = 0; \ - (_r)->r14 = 0; \ - (_r)->r15 = 0; \ - cur->thread.fs = 0; cur->thread.gs = 0; \ - cur->thread.fsindex = 0; cur->thread.gsindex = 0; \ - cur->thread.ds = 0; cur->thread.es = 0; \ + elf_common_init(¤t->thread, _r, 0); \ clear_thread_flag(TIF_IA32); \ } while (0) -/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is - now struct_user_regs, they are different). Assumes current is the process - getting dumped. */ +#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ + elf_common_init(¤t->thread, regs, __USER_DS) +#define compat_start_thread(regs, ip, sp) do { \ + start_ia32_thread(regs, ip, sp); \ + set_fs(USER_DS); \ + } while (0) +#define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \ + if (test_thread_flag(TIF_IA32)) \ + clear_thread_flag(TIF_ABI_PENDING); \ + else \ + set_thread_flag(TIF_ABI_PENDING); \ + current->personality |= force_personality32; \ + } while (0) +#define COMPAT_ELF_PLATFORM ("i686") + +/* + * regs is struct pt_regs, pr_reg is elf_gregset_t (which is + * now struct_user_regs, they are different). Assumes current is the process + * getting dumped. + */ #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ unsigned v; \ @@ -166,22 +202,22 @@ extern unsigned int vdso_enabled; (pr_reg)[1] = (regs)->r14; \ (pr_reg)[2] = (regs)->r13; \ (pr_reg)[3] = (regs)->r12; \ - (pr_reg)[4] = (regs)->rbp; \ - (pr_reg)[5] = (regs)->rbx; \ + (pr_reg)[4] = (regs)->bp; \ + (pr_reg)[5] = (regs)->bx; \ (pr_reg)[6] = (regs)->r11; \ (pr_reg)[7] = (regs)->r10; \ (pr_reg)[8] = (regs)->r9; \ (pr_reg)[9] = (regs)->r8; \ - (pr_reg)[10] = (regs)->rax; \ - (pr_reg)[11] = (regs)->rcx; \ - (pr_reg)[12] = (regs)->rdx; \ - (pr_reg)[13] = (regs)->rsi; \ - (pr_reg)[14] = (regs)->rdi; \ - (pr_reg)[15] = (regs)->orig_rax; \ - (pr_reg)[16] = (regs)->rip; \ + (pr_reg)[10] = (regs)->ax; \ + (pr_reg)[11] = (regs)->cx; \ + (pr_reg)[12] = (regs)->dx; \ + (pr_reg)[13] = (regs)->si; \ + (pr_reg)[14] = (regs)->di; \ + (pr_reg)[15] = (regs)->orig_ax; \ + (pr_reg)[16] = (regs)->ip; \ (pr_reg)[17] = (regs)->cs; \ - (pr_reg)[18] = (regs)->eflags; \ - (pr_reg)[19] = (regs)->rsp; \ + (pr_reg)[18] = (regs)->flags; \ + (pr_reg)[19] = (regs)->sp; \ (pr_reg)[20] = (regs)->ss; \ (pr_reg)[21] = current->thread.fs; \ (pr_reg)[22] = current->thread.gs; \ @@ -189,15 +225,17 @@ extern unsigned int vdso_enabled; asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ -} while(0); +} while (0); /* I'm not sure if we can use '-' here */ #define ELF_PLATFORM ("x86_64") extern void set_personality_64bit(void); -extern int vdso_enabled; +extern unsigned int sysctl_vsyscall32; +extern int force_personality32; #endif /* !CONFIG_X86_32 */ +#define CORE_DUMP_USE_REGSET #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 @@ -232,43 +270,24 @@ extern int vdso_enabled; struct task_struct; -extern int dump_task_regs (struct task_struct *, elf_gregset_t *); -extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); - -#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) -#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) +#define ARCH_DLINFO_IA32(vdso_enabled) \ +do if (vdso_enabled) { \ + NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ +} while (0) #ifdef CONFIG_X86_32 -extern int dump_task_extended_fpu (struct task_struct *, - struct user_fxsr_struct *); -#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) \ - dump_task_extended_fpu(tsk, elf_xfpregs) -#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) -#define VDSO_PRELINK 0 - -#define VDSO_SYM(x) \ - (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK) - -#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) -#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE) -extern void __kernel_vsyscall; - -#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) +#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled) /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ -#define ARCH_DLINFO \ -do if (vdso_enabled) { \ - NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ -} while (0) - #else /* CONFIG_X86_32 */ +#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ + /* 1GB for 64bit, 8MB for 32bit */ #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) @@ -277,14 +296,31 @@ do if (vdso_enabled) { \ NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ } while (0) +#define AT_SYSINFO 32 + +#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32) + +#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + #endif /* !CONFIG_X86_32 */ +#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) + +#define VDSO_ENTRY \ + ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) + struct linux_binprm; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); +extern int syscall32_setup_pages(struct linux_binprm *, int exstack); +#define compat_arch_setup_additional_pages syscall32_setup_pages + +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk + #endif /* __KERNEL__ */ #endif diff --git a/include/asm-x86/emergency-restart.h b/include/asm-x86/emergency-restart.h index 680c39563345..8e6aef19f8f0 100644 --- a/include/asm-x86/emergency-restart.h +++ b/include/asm-x86/emergency-restart.h @@ -1,6 +1,18 @@ #ifndef _ASM_EMERGENCY_RESTART_H #define _ASM_EMERGENCY_RESTART_H +enum reboot_type { + BOOT_TRIPLE = 't', + BOOT_KBD = 'k', +#ifdef CONFIG_X86_32 + BOOT_BIOS = 'b', +#endif + BOOT_ACPI = 'a', + BOOT_EFI = 'e' +}; + +extern enum reboot_type reboot_type; + extern void machine_emergency_restart(void); #endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index 249e753ac805..a7404d50686b 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h @@ -65,7 +65,7 @@ enum fixed_addresses { #endif #ifdef CONFIG_X86_VISWS_APIC FIX_CO_CPU, /* Cobalt timer */ - FIX_CO_APIC, /* Cobalt APIC Redirection Table */ + FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif @@ -74,7 +74,7 @@ enum fixed_addresses { #endif #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ -#endif +#endif #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, @@ -90,11 +90,23 @@ enum fixed_addresses { FIX_PARAVIRT_BOOTMAP, #endif __end_of_permanent_fixed_addresses, - /* temporary boot-time mappings, used before ioremap() is functional */ -#define NR_FIX_BTMAPS 16 - FIX_BTMAP_END = __end_of_permanent_fixed_addresses, - FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, + /* + * 256 temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. + * + * We round it up to the next 512 pages boundary so that we + * can have a single pgd entry and a single pte table: + */ +#define NR_FIX_BTMAPS 64 +#define FIX_BTMAPS_NESTING 4 + FIX_BTMAP_END = + __end_of_permanent_fixed_addresses + 512 - + (__end_of_permanent_fixed_addresses & 511), + FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, FIX_WP_TEST, +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT + FIX_OHCI1394_BASE, +#endif __end_of_fixed_addresses }; diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index cdfbe4a6ae6f..70ddb21e6458 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h @@ -15,6 +15,7 @@ #include <asm/apicdef.h> #include <asm/page.h> #include <asm/vsyscall.h> +#include <asm/efi.h> /* * Here we define all the compile-time 'special' virtual @@ -41,6 +42,11 @@ enum fixed_addresses { FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, + FIX_EFI_IO_MAP_LAST_PAGE, + FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1, +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT + FIX_OHCI1394_BASE, +#endif __end_of_fixed_addresses }; diff --git a/include/asm-x86/fpu32.h b/include/asm-x86/fpu32.h deleted file mode 100644 index 4153db5c0c31..000000000000 --- a/include/asm-x86/fpu32.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _FPU32_H -#define _FPU32_H 1 - -struct _fpstate_ia32; - -int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave); -int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, - struct pt_regs *regs, int fsave); - -#endif diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index 1f4610e0c613..62828d63f1b1 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h @@ -1,5 +1,135 @@ -#ifdef CONFIG_X86_32 -# include "futex_32.h" -#else -# include "futex_64.h" +#ifndef _ASM_X86_FUTEX_H +#define _ASM_X86_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> + +#include <asm/asm.h> +#include <asm/errno.h> +#include <asm/processor.h> +#include <asm/system.h> +#include <asm/uaccess.h> + +#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile( \ +"1: " insn "\n" \ +"2: .section .fixup,\"ax\"\n \ +3: mov %3, %1\n \ + jmp 2b\n \ + .previous\n \ + .section __ex_table,\"a\"\n \ + .align 8\n" \ + _ASM_PTR "1b,3b\n \ + .previous" \ + : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ + : "i" (-EFAULT), "0" (oparg), "1" (0)) + +#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile( \ +"1: movl %2, %0\n \ + movl %0, %3\n" \ + insn "\n" \ +"2: " LOCK_PREFIX "cmpxchgl %3, %2\n \ + jnz 1b\n \ +3: .section .fixup,\"ax\"\n \ +4: mov %5, %1\n \ + jmp 3b\n \ + .previous\n \ + .section __ex_table,\"a\"\n \ + .align 8\n" \ + _ASM_PTR "1b,4b,2b,4b\n \ + .previous" \ + : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ + "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) + +static inline int +futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret, tem; + + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + +#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) + /* Real i386 machines can only support FUTEX_OP_SET */ + if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3) + return -ENOSYS; +#endif + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, + uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__( + "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" + + "2: .section .fixup, \"ax\" \n" + "3: mov %2, %0 \n" + " jmp 2b \n" + " .previous \n" + + " .section __ex_table, \"a\" \n" + " .align 8 \n" + _ASM_PTR " 1b,3b \n" + " .previous \n" + + : "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "memory" + ); + + return oldval; +} + +#endif #endif diff --git a/include/asm-x86/futex_32.h b/include/asm-x86/futex_32.h deleted file mode 100644 index 438ef0ec7101..000000000000 --- a/include/asm-x86/futex_32.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#ifdef __KERNEL__ - -#include <linux/futex.h> -#include <asm/errno.h> -#include <asm/system.h> -#include <asm/processor.h> -#include <asm/uaccess.h> - -#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile ( \ -"1: " insn "\n" \ -"2: .section .fixup,\"ax\"\n\ -3: mov %3, %1\n\ - jmp 2b\n\ - .previous\n\ - .section __ex_table,\"a\"\n\ - .align 8\n\ - .long 1b,3b\n\ - .previous" \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ - : "i" (-EFAULT), "0" (oparg), "1" (0)) - -#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile ( \ -"1: movl %2, %0\n\ - movl %0, %3\n" \ - insn "\n" \ -"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ - jnz 1b\n\ -3: .section .fixup,\"ax\"\n\ -4: mov %5, %1\n\ - jmp 3b\n\ - .previous\n\ - .section __ex_table,\"a\"\n\ - .align 8\n\ - .long 1b,4b,2b,4b\n\ - .previous" \ - : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ - "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "1" (0)) - -static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) -{ - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret, tem; - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - pagefault_disable(); - - if (op == FUTEX_OP_SET) - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); - else { -#ifndef CONFIG_X86_BSWAP - if (boot_cpu_data.x86 == 3) - ret = -ENOSYS; - else -#endif - switch (op) { - case FUTEX_OP_ADD: - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, - oldval, uaddr, oparg); - break; - case FUTEX_OP_OR: - __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, - oparg); - break; - case FUTEX_OP_ANDN: - __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, - ~oparg); - break; - case FUTEX_OP_XOR: - __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, - oparg); - break; - default: - ret = -ENOSYS; - } - } - - pagefault_enable(); - - if (!ret) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; - } - } - return ret; -} - -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) -{ - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - __asm__ __volatile__( - "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" - - "2: .section .fixup, \"ax\" \n" - "3: mov %2, %0 \n" - " jmp 2b \n" - " .previous \n" - - " .section __ex_table, \"a\" \n" - " .align 8 \n" - " .long 1b,3b \n" - " .previous \n" - - : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) - : "memory" - ); - - return oldval; -} - -#endif -#endif diff --git a/include/asm-x86/futex_64.h b/include/asm-x86/futex_64.h deleted file mode 100644 index 5cdfb08013c3..000000000000 --- a/include/asm-x86/futex_64.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#ifdef __KERNEL__ - -#include <linux/futex.h> -#include <asm/errno.h> -#include <asm/system.h> -#include <asm/uaccess.h> - -#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile ( \ -"1: " insn "\n" \ -"2: .section .fixup,\"ax\"\n\ -3: mov %3, %1\n\ - jmp 2b\n\ - .previous\n\ - .section __ex_table,\"a\"\n\ - .align 8\n\ - .quad 1b,3b\n\ - .previous" \ - : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ - : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) - -#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile ( \ -"1: movl %2, %0\n\ - movl %0, %3\n" \ - insn "\n" \ -"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ - jnz 1b\n\ -3: .section .fixup,\"ax\"\n\ -4: mov %5, %1\n\ - jmp 3b\n\ - .previous\n\ - .section __ex_table,\"a\"\n\ - .align 8\n\ - .quad 1b,4b,2b,4b\n\ - .previous" \ - : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ - "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) - -static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) -{ - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret, tem; - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - pagefault_disable(); - - switch (op) { - case FUTEX_OP_SET: - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); - break; - case FUTEX_OP_ADD: - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, - uaddr, oparg); - break; - case FUTEX_OP_OR: - __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); - break; - case FUTEX_OP_ANDN: - __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); - break; - case FUTEX_OP_XOR: - __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); - break; - default: - ret = -ENOSYS; - } - - pagefault_enable(); - - if (!ret) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; - } - } - return ret; -} - -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) -{ - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - __asm__ __volatile__( - "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" - - "2: .section .fixup, \"ax\" \n" - "3: mov %2, %0 \n" - " jmp 2b \n" - " .previous \n" - - " .section __ex_table, \"a\" \n" - " .align 8 \n" - " .quad 1b,3b \n" - " .previous \n" - - : "=a" (oldval), "=m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) - : "memory" - ); - - return oldval; -} - -#endif -#endif diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index f704c50519b8..90958ed993fa 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h @@ -9,6 +9,7 @@ extern int iommu_detected; extern void gart_iommu_init(void); extern void gart_iommu_shutdown(void); extern void __init gart_parse_options(char *); +extern void early_gart_iommu_check(void); extern void gart_iommu_hole_init(void); extern int fallback_aper_order; extern int fallback_aper_force; @@ -20,6 +21,10 @@ extern int fix_aperture; #define gart_iommu_aperture 0 #define gart_iommu_aperture_allowed 0 +static inline void early_gart_iommu_check(void) +{ +} + static inline void gart_iommu_shutdown(void) { } diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 771af336734f..811fe14f70b2 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -121,9 +121,15 @@ extern int geode_get_dev_base(unsigned int dev); #define GPIO_MAP_Z 0xE8 #define GPIO_MAP_W 0xEC -extern void geode_gpio_set(unsigned int, unsigned int); -extern void geode_gpio_clear(unsigned int, unsigned int); -extern int geode_gpio_isset(unsigned int, unsigned int); +static inline u32 geode_gpio(unsigned int nr) +{ + BUG_ON(nr > 28); + return 1 << nr; +} + +extern void geode_gpio_set(u32, unsigned int); +extern void geode_gpio_clear(u32, unsigned int); +extern int geode_gpio_isset(u32, unsigned int); extern void geode_gpio_setup_event(unsigned int, int, int); extern void geode_gpio_set_irq(unsigned int, unsigned int); diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h new file mode 100644 index 000000000000..ff87fca0caf9 --- /dev/null +++ b/include/asm-x86/gpio.h @@ -0,0 +1,6 @@ +#ifndef _ASM_I386_GPIO_H +#define _ASM_I386_GPIO_H + +#include <gpio.h> + +#endif /* _ASM_I386_GPIO_H */ diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h index ad8d6e758785..6a9b4ac59bf7 100644 --- a/include/asm-x86/hpet.h +++ b/include/asm-x86/hpet.h @@ -69,6 +69,7 @@ extern void force_hpet_resume(void); #include <linux/interrupt.h> +typedef irqreturn_t (*rtc_irq_handler)(int interrupt, void *cookie); extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, @@ -77,13 +78,16 @@ extern int hpet_set_periodic_freq(unsigned long freq); extern int hpet_rtc_dropped_irq(void); extern int hpet_rtc_timer_init(void); extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); +extern int hpet_register_irq_handler(rtc_irq_handler handler); +extern void hpet_unregister_irq_handler(rtc_irq_handler handler); #endif /* CONFIG_HPET_EMULATE_RTC */ -#else +#else /* CONFIG_HPET_TIMER */ static inline int hpet_enable(void) { return 0; } static inline unsigned long hpet_readl(unsigned long a) { return 0; } +static inline int is_hpet_enabled(void) { return 0; } -#endif /* CONFIG_HPET_TIMER */ +#endif #endif /* ASM_X86_HPET_H */ diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h index 0bedbdf5e907..6d65fbb6358b 100644 --- a/include/asm-x86/hw_irq_32.h +++ b/include/asm-x86/hw_irq_32.h @@ -26,19 +26,19 @@ * Interrupt entry/exit code at both C and assembly level */ -extern void (*interrupt[NR_IRQS])(void); +extern void (*const interrupt[NR_IRQS])(void); #ifdef CONFIG_SMP -fastcall void reschedule_interrupt(void); -fastcall void invalidate_interrupt(void); -fastcall void call_function_interrupt(void); +void reschedule_interrupt(void); +void invalidate_interrupt(void); +void call_function_interrupt(void); #endif #ifdef CONFIG_X86_LOCAL_APIC -fastcall void apic_timer_interrupt(void); -fastcall void error_interrupt(void); -fastcall void spurious_interrupt(void); -fastcall void thermal_interrupt(void); +void apic_timer_interrupt(void); +void error_interrupt(void); +void spurious_interrupt(void); +void thermal_interrupt(void); #define platform_legacy_irq(irq) ((irq) < 16) #endif diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h index a470d59da678..312a58d6dac6 100644 --- a/include/asm-x86/hw_irq_64.h +++ b/include/asm-x86/hw_irq_64.h @@ -135,11 +135,13 @@ extern void init_8259A(int aeoi); extern void send_IPI_self(int vector); extern void init_VISWS_APIC_irqs(void); extern void setup_IO_APIC(void); +extern void enable_IO_APIC(void); extern void disable_IO_APIC(void); extern void print_IO_APIC(void); extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); extern void send_IPI(int dest, int vector); extern void setup_ioapic_dest(void); +extern void native_init_IRQ(void); extern unsigned long io_apic_irqs; diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index a8bbed349664..ba8105ca822b 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -1,5 +1,360 @@ -#ifdef CONFIG_X86_32 -# include "i387_32.h" +/* + * Copyright (C) 1994 Linus Torvalds + * + * Pentium III FXSR, SSE support + * General FPU state handling cleanups + * Gareth Hughes <gareth@valinux.com>, May 2000 + * x86-64 work by Andi Kleen 2002 + */ + +#ifndef _ASM_X86_I387_H +#define _ASM_X86_I387_H + +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/regset.h> +#include <asm/processor.h> +#include <asm/sigcontext.h> +#include <asm/user.h> +#include <asm/uaccess.h> + +extern void fpu_init(void); +extern unsigned int mxcsr_feature_mask; +extern void mxcsr_feature_mask_init(void); +extern void init_fpu(struct task_struct *child); +extern asmlinkage void math_state_restore(void); + +extern user_regset_active_fn fpregs_active, xfpregs_active; +extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; +extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; + +#ifdef CONFIG_IA32_EMULATION +struct _fpstate_ia32; +extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); +extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); +#endif + +#ifdef CONFIG_X86_64 + +/* Ignore delayed exceptions from user space */ +static inline void tolerant_fwait(void) +{ + asm volatile("1: fwait\n" + "2:\n" + " .section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 1b,2b\n" + " .previous\n"); +} + +static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) +{ + int err; + + asm volatile("1: rex64/fxrstor (%[fx])\n\t" + "2:\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 1b,3b\n" + ".previous" + : [err] "=r" (err) +#if 0 /* See comment in __save_init_fpu() below. */ + : [fx] "r" (fx), "m" (*fx), "0" (0)); +#else + : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); +#endif + if (unlikely(err)) + init_fpu(current); + return err; +} + +#define X87_FSW_ES (1 << 7) /* Exception Summary */ + +/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception + is pending. Clear the x87 state here by setting it to fixed + values. The kernel data segment can be sometimes 0 and sometimes + new user value. Both should be ok. + Use the PDA as safe address because it should be already in L1. */ +static inline void clear_fpu_state(struct i387_fxsave_struct *fx) +{ + if (unlikely(fx->swd & X87_FSW_ES)) + asm volatile("fnclex"); + alternative_input(ASM_NOP8 ASM_NOP2, + " emms\n" /* clear stack tags */ + " fildl %%gs:0", /* load to clear state */ + X86_FEATURE_FXSAVE_LEAK); +} + +static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) +{ + int err; + + asm volatile("1: rex64/fxsave (%[fx])\n\t" + "2:\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 1b,3b\n" + ".previous" + : [err] "=r" (err), "=m" (*fx) +#if 0 /* See comment in __fxsave_clear() below. */ + : [fx] "r" (fx), "0" (0)); +#else + : [fx] "cdaSDb" (fx), "0" (0)); +#endif + if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) + err = -EFAULT; + /* No need to clear here because the caller clears USED_MATH */ + return err; +} + +static inline void __save_init_fpu(struct task_struct *tsk) +{ + /* Using "rex64; fxsave %0" is broken because, if the memory operand + uses any extended registers for addressing, a second REX prefix + will be generated (to the assembler, rex64 followed by semicolon + is a separate instruction), and hence the 64-bitness is lost. */ +#if 0 + /* Using "fxsaveq %0" would be the ideal choice, but is only supported + starting with gas 2.16. */ + __asm__ __volatile__("fxsaveq %0" + : "=m" (tsk->thread.i387.fxsave)); +#elif 0 + /* Using, as a workaround, the properly prefixed form below isn't + accepted by any binutils version so far released, complaining that + the same type of prefix is used twice if an extended register is + needed for addressing (fix submitted to mainline 2005-11-21). */ + __asm__ __volatile__("rex64/fxsave %0" + : "=m" (tsk->thread.i387.fxsave)); +#else + /* This, however, we can work around by forcing the compiler to select + an addressing mode that doesn't require extended registers. */ + __asm__ __volatile__("rex64/fxsave %P2(%1)" + : "=m" (tsk->thread.i387.fxsave) + : "cdaSDb" (tsk), + "i" (offsetof(__typeof__(*tsk), + thread.i387.fxsave))); +#endif + clear_fpu_state(&tsk->thread.i387.fxsave); + task_thread_info(tsk)->status &= ~TS_USEDFPU; +} + +/* + * Signal frame handlers. + */ + +static inline int save_i387(struct _fpstate __user *buf) +{ + struct task_struct *tsk = current; + int err = 0; + + BUILD_BUG_ON(sizeof(struct user_i387_struct) != + sizeof(tsk->thread.i387.fxsave)); + + if ((unsigned long)buf % 16) + printk("save_i387: bad fpstate %p\n", buf); + + if (!used_math()) + return 0; + clear_used_math(); /* trigger finit */ + if (task_thread_info(tsk)->status & TS_USEDFPU) { + err = save_i387_checking((struct i387_fxsave_struct __user *)buf); + if (err) return err; + task_thread_info(tsk)->status &= ~TS_USEDFPU; + stts(); + } else { + if (__copy_to_user(buf, &tsk->thread.i387.fxsave, + sizeof(struct i387_fxsave_struct))) + return -1; + } + return 1; +} + +/* + * This restores directly out of user space. Exceptions are handled. + */ +static inline int restore_i387(struct _fpstate __user *buf) +{ + set_used_math(); + if (!(task_thread_info(current)->status & TS_USEDFPU)) { + clts(); + task_thread_info(current)->status |= TS_USEDFPU; + } + return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); +} + +#else /* CONFIG_X86_32 */ + +static inline void tolerant_fwait(void) +{ + asm volatile("fnclex ; fwait"); +} + +static inline void restore_fpu(struct task_struct *tsk) +{ + /* + * The "nop" is needed to make the instructions the same + * length. + */ + alternative_input( + "nop ; frstor %1", + "fxrstor %1", + X86_FEATURE_FXSR, + "m" ((tsk)->thread.i387.fxsave)); +} + +/* We need a safe address that is cheap to find and that is already + in L1 during context switch. The best choices are unfortunately + different for UP and SMP */ +#ifdef CONFIG_SMP +#define safe_address (__per_cpu_offset[0]) #else -# include "i387_64.h" +#define safe_address (kstat_cpu(0).cpustat.user) #endif + +/* + * These must be called with preempt disabled + */ +static inline void __save_init_fpu(struct task_struct *tsk) +{ + /* Use more nops than strictly needed in case the compiler + varies code */ + alternative_input( + "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, + "fxsave %[fx]\n" + "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", + X86_FEATURE_FXSR, + [fx] "m" (tsk->thread.i387.fxsave), + [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); + /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception + is pending. Clear the x87 state here by setting it to fixed + values. safe_address is a random variable that should be in L1 */ + alternative_input( + GENERIC_NOP8 GENERIC_NOP2, + "emms\n\t" /* clear stack tags */ + "fildl %[addr]", /* set F?P to defined value */ + X86_FEATURE_FXSAVE_LEAK, + [addr] "m" (safe_address)); + task_thread_info(tsk)->status &= ~TS_USEDFPU; +} + +/* + * Signal frame handlers... + */ +extern int save_i387(struct _fpstate __user *buf); +extern int restore_i387(struct _fpstate __user *buf); + +#endif /* CONFIG_X86_64 */ + +static inline void __unlazy_fpu(struct task_struct *tsk) +{ + if (task_thread_info(tsk)->status & TS_USEDFPU) { + __save_init_fpu(tsk); + stts(); + } else + tsk->fpu_counter = 0; +} + +static inline void __clear_fpu(struct task_struct *tsk) +{ + if (task_thread_info(tsk)->status & TS_USEDFPU) { + tolerant_fwait(); + task_thread_info(tsk)->status &= ~TS_USEDFPU; + stts(); + } +} + +static inline void kernel_fpu_begin(void) +{ + struct thread_info *me = current_thread_info(); + preempt_disable(); + if (me->status & TS_USEDFPU) + __save_init_fpu(me->task); + else + clts(); +} + +static inline void kernel_fpu_end(void) +{ + stts(); + preempt_enable(); +} + +#ifdef CONFIG_X86_64 + +static inline void save_init_fpu(struct task_struct *tsk) +{ + __save_init_fpu(tsk); + stts(); +} + +#define unlazy_fpu __unlazy_fpu +#define clear_fpu __clear_fpu + +#else /* CONFIG_X86_32 */ + +/* + * These disable preemption on their own and are safe + */ +static inline void save_init_fpu(struct task_struct *tsk) +{ + preempt_disable(); + __save_init_fpu(tsk); + stts(); + preempt_enable(); +} + +static inline void unlazy_fpu(struct task_struct *tsk) +{ + preempt_disable(); + __unlazy_fpu(tsk); + preempt_enable(); +} + +static inline void clear_fpu(struct task_struct *tsk) +{ + preempt_disable(); + __clear_fpu(tsk); + preempt_enable(); +} + +#endif /* CONFIG_X86_64 */ + +/* + * i387 state interaction + */ +static inline unsigned short get_fpu_cwd(struct task_struct *tsk) +{ + if (cpu_has_fxsr) { + return tsk->thread.i387.fxsave.cwd; + } else { + return (unsigned short)tsk->thread.i387.fsave.cwd; + } +} + +static inline unsigned short get_fpu_swd(struct task_struct *tsk) +{ + if (cpu_has_fxsr) { + return tsk->thread.i387.fxsave.swd; + } else { + return (unsigned short)tsk->thread.i387.fsave.swd; + } +} + +static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) +{ + if (cpu_has_xmm) { + return tsk->thread.i387.fxsave.mxcsr; + } else { + return MXCSR_DEFAULT; + } +} + +#endif /* _ASM_X86_I387_H */ diff --git a/include/asm-x86/i387_32.h b/include/asm-x86/i387_32.h deleted file mode 100644 index cdd1e248e3b4..000000000000 --- a/include/asm-x86/i387_32.h +++ /dev/null @@ -1,151 +0,0 @@ -/* - * include/asm-i386/i387.h - * - * Copyright (C) 1994 Linus Torvalds - * - * Pentium III FXSR, SSE support - * General FPU state handling cleanups - * Gareth Hughes <gareth@valinux.com>, May 2000 - */ - -#ifndef __ASM_I386_I387_H -#define __ASM_I386_I387_H - -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/kernel_stat.h> -#include <asm/processor.h> -#include <asm/sigcontext.h> -#include <asm/user.h> - -extern void mxcsr_feature_mask_init(void); -extern void init_fpu(struct task_struct *); - -/* - * FPU lazy state save handling... - */ - -/* - * The "nop" is needed to make the instructions the same - * length. - */ -#define restore_fpu(tsk) \ - alternative_input( \ - "nop ; frstor %1", \ - "fxrstor %1", \ - X86_FEATURE_FXSR, \ - "m" ((tsk)->thread.i387.fxsave)) - -extern void kernel_fpu_begin(void); -#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) - -/* We need a safe address that is cheap to find and that is already - in L1 during context switch. The best choices are unfortunately - different for UP and SMP */ -#ifdef CONFIG_SMP -#define safe_address (__per_cpu_offset[0]) -#else -#define safe_address (kstat_cpu(0).cpustat.user) -#endif - -/* - * These must be called with preempt disabled - */ -static inline void __save_init_fpu( struct task_struct *tsk ) -{ - /* Use more nops than strictly needed in case the compiler - varies code */ - alternative_input( - "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, - "fxsave %[fx]\n" - "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", - X86_FEATURE_FXSR, - [fx] "m" (tsk->thread.i387.fxsave), - [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); - /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception - is pending. Clear the x87 state here by setting it to fixed - values. safe_address is a random variable that should be in L1 */ - alternative_input( - GENERIC_NOP8 GENERIC_NOP2, - "emms\n\t" /* clear stack tags */ - "fildl %[addr]", /* set F?P to defined value */ - X86_FEATURE_FXSAVE_LEAK, - [addr] "m" (safe_address)); - task_thread_info(tsk)->status &= ~TS_USEDFPU; -} - -#define __unlazy_fpu( tsk ) do { \ - if (task_thread_info(tsk)->status & TS_USEDFPU) { \ - __save_init_fpu(tsk); \ - stts(); \ - } else \ - tsk->fpu_counter = 0; \ -} while (0) - -#define __clear_fpu( tsk ) \ -do { \ - if (task_thread_info(tsk)->status & TS_USEDFPU) { \ - asm volatile("fnclex ; fwait"); \ - task_thread_info(tsk)->status &= ~TS_USEDFPU; \ - stts(); \ - } \ -} while (0) - - -/* - * These disable preemption on their own and are safe - */ -static inline void save_init_fpu( struct task_struct *tsk ) -{ - preempt_disable(); - __save_init_fpu(tsk); - stts(); - preempt_enable(); -} - -#define unlazy_fpu( tsk ) do { \ - preempt_disable(); \ - __unlazy_fpu(tsk); \ - preempt_enable(); \ -} while (0) - -#define clear_fpu( tsk ) do { \ - preempt_disable(); \ - __clear_fpu( tsk ); \ - preempt_enable(); \ -} while (0) - -/* - * FPU state interaction... - */ -extern unsigned short get_fpu_cwd( struct task_struct *tsk ); -extern unsigned short get_fpu_swd( struct task_struct *tsk ); -extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); -extern asmlinkage void math_state_restore(void); - -/* - * Signal frame handlers... - */ -extern int save_i387( struct _fpstate __user *buf ); -extern int restore_i387( struct _fpstate __user *buf ); - -/* - * ptrace request handers... - */ -extern int get_fpregs( struct user_i387_struct __user *buf, - struct task_struct *tsk ); -extern int set_fpregs( struct task_struct *tsk, - struct user_i387_struct __user *buf ); - -extern int get_fpxregs( struct user_fxsr_struct __user *buf, - struct task_struct *tsk ); -extern int set_fpxregs( struct task_struct *tsk, - struct user_fxsr_struct __user *buf ); - -/* - * FPU state for core dumps... - */ -extern int dump_fpu( struct pt_regs *regs, - struct user_i387_struct *fpu ); - -#endif /* __ASM_I386_I387_H */ diff --git a/include/asm-x86/i387_64.h b/include/asm-x86/i387_64.h deleted file mode 100644 index 3a4ffba3d6bc..000000000000 --- a/include/asm-x86/i387_64.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * include/asm-x86_64/i387.h - * - * Copyright (C) 1994 Linus Torvalds - * - * Pentium III FXSR, SSE support - * General FPU state handling cleanups - * Gareth Hughes <gareth@valinux.com>, May 2000 - * x86-64 work by Andi Kleen 2002 - */ - -#ifndef __ASM_X86_64_I387_H -#define __ASM_X86_64_I387_H - -#include <linux/sched.h> -#include <asm/processor.h> -#include <asm/sigcontext.h> -#include <asm/user.h> -#include <asm/thread_info.h> -#include <asm/uaccess.h> - -extern void fpu_init(void); -extern unsigned int mxcsr_feature_mask; -extern void mxcsr_feature_mask_init(void); -extern void init_fpu(struct task_struct *child); -extern int save_i387(struct _fpstate __user *buf); -extern asmlinkage void math_state_restore(void); - -/* - * FPU lazy state save handling... - */ - -#define unlazy_fpu(tsk) do { \ - if (task_thread_info(tsk)->status & TS_USEDFPU) \ - save_init_fpu(tsk); \ - else \ - tsk->fpu_counter = 0; \ -} while (0) - -/* Ignore delayed exceptions from user space */ -static inline void tolerant_fwait(void) -{ - asm volatile("1: fwait\n" - "2:\n" - " .section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 1b,2b\n" - " .previous\n"); -} - -#define clear_fpu(tsk) do { \ - if (task_thread_info(tsk)->status & TS_USEDFPU) { \ - tolerant_fwait(); \ - task_thread_info(tsk)->status &= ~TS_USEDFPU; \ - stts(); \ - } \ -} while (0) - -/* - * ptrace request handers... - */ -extern int get_fpregs(struct user_i387_struct __user *buf, - struct task_struct *tsk); -extern int set_fpregs(struct task_struct *tsk, - struct user_i387_struct __user *buf); - -/* - * i387 state interaction - */ -#define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr) -#define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd) -#define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd) -#define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd) -#define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val)) -#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) -#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) - -#define X87_FSW_ES (1 << 7) /* Exception Summary */ - -/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception - is pending. Clear the x87 state here by setting it to fixed - values. The kernel data segment can be sometimes 0 and sometimes - new user value. Both should be ok. - Use the PDA as safe address because it should be already in L1. */ -static inline void clear_fpu_state(struct i387_fxsave_struct *fx) -{ - if (unlikely(fx->swd & X87_FSW_ES)) - asm volatile("fnclex"); - alternative_input(ASM_NOP8 ASM_NOP2, - " emms\n" /* clear stack tags */ - " fildl %%gs:0", /* load to clear state */ - X86_FEATURE_FXSAVE_LEAK); -} - -static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) -{ - int err; - - asm volatile("1: rex64/fxrstor (%[fx])\n\t" - "2:\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 1b,3b\n" - ".previous" - : [err] "=r" (err) -#if 0 /* See comment in __fxsave_clear() below. */ - : [fx] "r" (fx), "m" (*fx), "0" (0)); -#else - : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); -#endif - if (unlikely(err)) - init_fpu(current); - return err; -} - -static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) -{ - int err; - - asm volatile("1: rex64/fxsave (%[fx])\n\t" - "2:\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 1b,3b\n" - ".previous" - : [err] "=r" (err), "=m" (*fx) -#if 0 /* See comment in __fxsave_clear() below. */ - : [fx] "r" (fx), "0" (0)); -#else - : [fx] "cdaSDb" (fx), "0" (0)); -#endif - if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) - err = -EFAULT; - /* No need to clear here because the caller clears USED_MATH */ - return err; -} - -static inline void __fxsave_clear(struct task_struct *tsk) -{ - /* Using "rex64; fxsave %0" is broken because, if the memory operand - uses any extended registers for addressing, a second REX prefix - will be generated (to the assembler, rex64 followed by semicolon - is a separate instruction), and hence the 64-bitness is lost. */ -#if 0 - /* Using "fxsaveq %0" would be the ideal choice, but is only supported - starting with gas 2.16. */ - __asm__ __volatile__("fxsaveq %0" - : "=m" (tsk->thread.i387.fxsave)); -#elif 0 - /* Using, as a workaround, the properly prefixed form below isn't - accepted by any binutils version so far released, complaining that - the same type of prefix is used twice if an extended register is - needed for addressing (fix submitted to mainline 2005-11-21). */ - __asm__ __volatile__("rex64/fxsave %0" - : "=m" (tsk->thread.i387.fxsave)); -#else - /* This, however, we can work around by forcing the compiler to select - an addressing mode that doesn't require extended registers. */ - __asm__ __volatile__("rex64/fxsave %P2(%1)" - : "=m" (tsk->thread.i387.fxsave) - : "cdaSDb" (tsk), - "i" (offsetof(__typeof__(*tsk), - thread.i387.fxsave))); -#endif - clear_fpu_state(&tsk->thread.i387.fxsave); -} - -static inline void kernel_fpu_begin(void) -{ - struct thread_info *me = current_thread_info(); - preempt_disable(); - if (me->status & TS_USEDFPU) { - __fxsave_clear(me->task); - me->status &= ~TS_USEDFPU; - return; - } - clts(); -} - -static inline void kernel_fpu_end(void) -{ - stts(); - preempt_enable(); -} - -static inline void save_init_fpu(struct task_struct *tsk) -{ - __fxsave_clear(tsk); - task_thread_info(tsk)->status &= ~TS_USEDFPU; - stts(); -} - -/* - * This restores directly out of user space. Exceptions are handled. - */ -static inline int restore_i387(struct _fpstate __user *buf) -{ - set_used_math(); - if (!(task_thread_info(current)->status & TS_USEDFPU)) { - clts(); - task_thread_info(current)->status |= TS_USEDFPU; - } - return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); -} - -#endif /* __ASM_X86_64_I387_H */ diff --git a/include/asm-x86/i8253.h b/include/asm-x86/i8253.h index 747548ec5d1d..b51c0487fc41 100644 --- a/include/asm-x86/i8253.h +++ b/include/asm-x86/i8253.h @@ -12,4 +12,7 @@ extern struct clock_event_device *global_clock_event; extern void setup_pit_timer(void); +#define inb_pit inb_p +#define outb_pit outb_p + #endif /* __ASM_I8253_H__ */ diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 29d8f9a6b3fc..67c319e0efc7 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h @@ -3,10 +3,25 @@ extern unsigned int cached_irq_mask; -#define __byte(x,y) (((unsigned char *) &(y))[x]) +#define __byte(x,y) (((unsigned char *) &(y))[x]) #define cached_master_mask (__byte(0, cached_irq_mask)) #define cached_slave_mask (__byte(1, cached_irq_mask)) +/* i8259A PIC registers */ +#define PIC_MASTER_CMD 0x20 +#define PIC_MASTER_IMR 0x21 +#define PIC_MASTER_ISR PIC_MASTER_CMD +#define PIC_MASTER_POLL PIC_MASTER_ISR +#define PIC_MASTER_OCW3 PIC_MASTER_ISR +#define PIC_SLAVE_CMD 0xa0 +#define PIC_SLAVE_IMR 0xa1 + +/* i8259A PIC related value */ +#define PIC_CASCADE_IR 2 +#define MASTER_ICW4_DEFAULT 0x01 +#define SLAVE_ICW4_DEFAULT 0x01 +#define PIC_ICW4_AEOI 2 + extern spinlock_t i8259A_lock; extern void init_8259A(int auto_eoi); @@ -14,4 +29,7 @@ extern void enable_8259A_irq(unsigned int irq); extern void disable_8259A_irq(unsigned int irq); extern unsigned int startup_8259A_irq(unsigned int irq); +#define inb_pic inb_p +#define outb_pic outb_p + #endif /* __ASM_I8259_H__ */ diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h index 0190b7c4e319..aa9733206e29 100644 --- a/include/asm-x86/ia32.h +++ b/include/asm-x86/ia32.h @@ -159,12 +159,6 @@ struct ustat32 { #define IA32_STACK_TOP IA32_PAGE_OFFSET #ifdef __KERNEL__ -struct user_desc; -struct siginfo_t; -int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info); -int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info); -int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs); - struct linux_binprm; extern int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec_stack); diff --git a/include/asm-x86/ia32_unistd.h b/include/asm-x86/ia32_unistd.h index 5b52ce507338..61cea9e7c5c1 100644 --- a/include/asm-x86/ia32_unistd.h +++ b/include/asm-x86/ia32_unistd.h @@ -5,7 +5,7 @@ * This file contains the system call numbers of the ia32 port, * this is for the kernel only. * Only add syscalls here where some part of the kernel needs to know - * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK + * the number. This should be otherwise in sync with asm-x86/unistd_32.h. -AK */ #define __NR_ia32_restart_syscall 0 diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h index 42130adf9c7c..c2552d8bebf7 100644 --- a/include/asm-x86/ide.h +++ b/include/asm-x86/ide.h @@ -1,6 +1,4 @@ /* - * linux/include/asm-i386/ide.h - * * Copyright (C) 1994-1996 Linus Torvalds & authors */ diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h index 6bd47dcf2067..d240e5b30a45 100644 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h @@ -6,7 +6,6 @@ struct notifier_block; void idle_notifier_register(struct notifier_block *n); -void idle_notifier_unregister(struct notifier_block *n); void enter_idle(void); void exit_idle(void); diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index fe881cd1e6f4..586d7aa54ceb 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h @@ -100,8 +100,6 @@ static inline void * phys_to_virt(unsigned long address) */ #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) -extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); - /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory @@ -111,32 +109,39 @@ extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsign * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual - * address. + * address. * * If the area you are trying to map is a PCI BAR you should have a * look at pci_iomap(). */ +extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); +extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); -static inline void __iomem * ioremap(unsigned long offset, unsigned long size) +/* + * The default ioremap() behavior is non-cached: + */ +static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { - return __ioremap(offset, size, 0); + return ioremap_nocache(offset, size); } -extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); /* - * bt_ioremap() and bt_iounmap() are for temporary early boot-time + * early_ioremap() and early_iounmap() are for temporary early boot-time * mappings, before the real ioremap() is functional. * A boot-time mapping is currently limited to at most 16 pages. */ -extern void *bt_ioremap(unsigned long offset, unsigned long size); -extern void bt_iounmap(void *addr, unsigned long size); +extern void early_ioremap_init(void); +extern void early_ioremap_clear(void); +extern void early_ioremap_reset(void); +extern void *early_ioremap(unsigned long offset, unsigned long size); +extern void early_iounmap(void *addr, unsigned long size); extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); /* Use early IO mappings for DMI because it's initialized early */ -#define dmi_ioremap bt_ioremap -#define dmi_iounmap bt_iounmap +#define dmi_ioremap early_ioremap +#define dmi_iounmap early_iounmap #define dmi_alloc alloc_bootmem /* @@ -250,10 +255,10 @@ static inline void flush_write_buffers(void) #endif /* __KERNEL__ */ -static inline void native_io_delay(void) -{ - asm volatile("outb %%al,$0x80" : : : "memory"); -} +extern void native_io_delay(void); + +extern int io_delay_type; +extern void io_delay_init(void); #if defined(CONFIG_PARAVIRT) #include <asm/paravirt.h> diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index a037b0794332..f64a59cc396d 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h @@ -35,12 +35,24 @@ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> */ -#define __SLOW_DOWN_IO "\noutb %%al,$0x80" +extern void native_io_delay(void); -#ifdef REALLY_SLOW_IO -#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO +extern int io_delay_type; +extern void io_delay_init(void); + +#if defined(CONFIG_PARAVIRT) +#include <asm/paravirt.h> #else -#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO + +static inline void slow_down_io(void) +{ + native_io_delay(); +#ifdef REALLY_SLOW_IO + native_io_delay(); + native_io_delay(); + native_io_delay(); +#endif +} #endif /* @@ -52,9 +64,15 @@ static inline void out##s(unsigned x value, unsigned short port) { #define __OUT2(s,s1,s2) \ __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" +#ifndef REALLY_SLOW_IO +#define REALLY_SLOW_IO +#define UNSET_REALLY_SLOW_IO +#endif + #define __OUT(s,s1,x) \ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ -__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ +__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ + slow_down_io(); } #define __IN1(s) \ static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; @@ -63,8 +81,13 @@ static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" #define __IN(s,s1,i...) \ -__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ -__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ +__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \ +__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ + slow_down_io(); return _v; } + +#ifdef UNSET_REALLY_SLOW_IO +#undef REALLY_SLOW_IO +#endif #define __INS(s) \ static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ @@ -127,13 +150,6 @@ static inline void * phys_to_virt(unsigned long address) #include <asm-generic/iomap.h> -extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); - -static inline void __iomem * ioremap (unsigned long offset, unsigned long size) -{ - return __ioremap(offset, size, 0); -} - extern void *early_ioremap(unsigned long addr, unsigned long size); extern void early_iounmap(void *addr, unsigned long size); @@ -142,8 +158,19 @@ extern void early_iounmap(void *addr, unsigned long size); * it's useful if some control registers are in such an area and write combining * or read caching is not desirable: */ -extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); +extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); +extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); + +/* + * The default ioremap() behavior is non-cached: + */ +static inline void __iomem *ioremap(unsigned long offset, unsigned long size) +{ + return ioremap_nocache(offset, size); +} + extern void iounmap(volatile void __iomem *addr); + extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); /* diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 88494966beeb..0f5b3fef0b08 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h @@ -1,5 +1,159 @@ +#ifndef __ASM_IO_APIC_H +#define __ASM_IO_APIC_H + +#include <asm/types.h> +#include <asm/mpspec.h> +#include <asm/apicdef.h> + +/* + * Intel IO-APIC support for SMP and UP systems. + * + * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar + */ + +/* + * The structure of the IO-APIC: + */ +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2 : 14, + LTS : 1, + delivery_type : 1, + __reserved_1 : 8, + ID : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version : 8, + __reserved_2 : 7, + PRQ : 1, + entries : 8, + __reserved_1 : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2 : 24, + arbitration : 4, + __reserved_1 : 4; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT : 1, + __reserved_1 : 31; + } __attribute__ ((packed)) bits; +}; + +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_SMI = 2, + dest__reserved_1 = 3, + dest_NMI = 4, + dest_INIT = 5, + dest__reserved_2 = 6, + dest_ExtINT = 7 +}; + +struct IO_APIC_route_entry { + __u32 vector : 8, + delivery_mode : 3, /* 000: FIXED + * 001: lowest prio + * 111: ExtINT + */ + dest_mode : 1, /* 0: physical, 1: logical */ + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, /* 0: edge, 1: level */ + mask : 1, /* 0: enabled, 1: disabled */ + __reserved_2 : 15; + #ifdef CONFIG_X86_32 -# include "io_apic_32.h" + union { + struct { + __u32 __reserved_1 : 24, + physical_dest : 4, + __reserved_2 : 4; + } physical; + + struct { + __u32 __reserved_1 : 24, + logical_dest : 8; + } logical; + } dest; #else -# include "io_apic_64.h" + __u32 __reserved_3 : 24, + dest : 8; +#endif + +} __attribute__ ((packed)); + +#ifdef CONFIG_X86_IO_APIC + +/* + * # of IO-APICs and # of IRQ routing registers + */ +extern int nr_ioapics; +extern int nr_ioapic_registers[MAX_IO_APICS]; + +/* + * MP-BIOS irq configuration table structures: + */ + +/* I/O APIC entries */ +extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; + +/* # of MP IRQ source entries */ +extern int mp_irq_entries; + +/* MP IRQ source entries */ +extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; + +/* non-0 if default (table-less) MP configuration */ +extern int mpc_default_type; + +/* Older SiS APIC requires we rewrite the index register */ +extern int sis_apic_bug; + +/* 1 if "noapic" boot option passed */ +extern int skip_ioapic_setup; + +static inline void disable_ioapic_setup(void) +{ + skip_ioapic_setup = 1; +} + +/* + * If we use the IO-APIC for IRQ routing, disable automatic + * assignment of PCI IRQ's. + */ +#define io_apic_assign_pci_irqs \ + (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) + +#ifdef CONFIG_ACPI +extern int io_apic_get_unique_id(int ioapic, int apic_id); +extern int io_apic_get_version(int ioapic); +extern int io_apic_get_redir_entries(int ioapic); +extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, + int edge_level, int active_high_low); +extern int timer_uses_ioapic_pin_0; +#endif /* CONFIG_ACPI */ + +extern int (*ioapic_renumber_irq)(int ioapic, int irq); +extern void ioapic_init_mappings(void); + +#else /* !CONFIG_X86_IO_APIC */ +#define io_apic_assign_pci_irqs 0 +#endif + #endif diff --git a/include/asm-x86/io_apic_32.h b/include/asm-x86/io_apic_32.h deleted file mode 100644 index 3f087883ea48..000000000000 --- a/include/asm-x86/io_apic_32.h +++ /dev/null @@ -1,155 +0,0 @@ -#ifndef __ASM_IO_APIC_H -#define __ASM_IO_APIC_H - -#include <asm/types.h> -#include <asm/mpspec.h> -#include <asm/apicdef.h> - -/* - * Intel IO-APIC support for SMP and UP systems. - * - * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar - */ - -/* - * The structure of the IO-APIC: - */ -union IO_APIC_reg_00 { - u32 raw; - struct { - u32 __reserved_2 : 14, - LTS : 1, - delivery_type : 1, - __reserved_1 : 8, - ID : 8; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_01 { - u32 raw; - struct { - u32 version : 8, - __reserved_2 : 7, - PRQ : 1, - entries : 8, - __reserved_1 : 8; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_02 { - u32 raw; - struct { - u32 __reserved_2 : 24, - arbitration : 4, - __reserved_1 : 4; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_03 { - u32 raw; - struct { - u32 boot_DT : 1, - __reserved_1 : 31; - } __attribute__ ((packed)) bits; -}; - -enum ioapic_irq_destination_types { - dest_Fixed = 0, - dest_LowestPrio = 1, - dest_SMI = 2, - dest__reserved_1 = 3, - dest_NMI = 4, - dest_INIT = 5, - dest__reserved_2 = 6, - dest_ExtINT = 7 -}; - -struct IO_APIC_route_entry { - __u32 vector : 8, - delivery_mode : 3, /* 000: FIXED - * 001: lowest prio - * 111: ExtINT - */ - dest_mode : 1, /* 0: physical, 1: logical */ - delivery_status : 1, - polarity : 1, - irr : 1, - trigger : 1, /* 0: edge, 1: level */ - mask : 1, /* 0: enabled, 1: disabled */ - __reserved_2 : 15; - - union { struct { __u32 - __reserved_1 : 24, - physical_dest : 4, - __reserved_2 : 4; - } physical; - - struct { __u32 - __reserved_1 : 24, - logical_dest : 8; - } logical; - } dest; - -} __attribute__ ((packed)); - -#ifdef CONFIG_X86_IO_APIC - -/* - * # of IO-APICs and # of IRQ routing registers - */ -extern int nr_ioapics; -extern int nr_ioapic_registers[MAX_IO_APICS]; - -/* - * MP-BIOS irq configuration table structures: - */ - -/* I/O APIC entries */ -extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; - -/* # of MP IRQ source entries */ -extern int mp_irq_entries; - -/* MP IRQ source entries */ -extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; - -/* non-0 if default (table-less) MP configuration */ -extern int mpc_default_type; - -/* Older SiS APIC requires we rewrite the index register */ -extern int sis_apic_bug; - -/* 1 if "noapic" boot option passed */ -extern int skip_ioapic_setup; - -static inline void disable_ioapic_setup(void) -{ - skip_ioapic_setup = 1; -} - -static inline int ioapic_setup_disabled(void) -{ - return skip_ioapic_setup; -} - -/* - * If we use the IO-APIC for IRQ routing, disable automatic - * assignment of PCI IRQ's. - */ -#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) - -#ifdef CONFIG_ACPI -extern int io_apic_get_unique_id (int ioapic, int apic_id); -extern int io_apic_get_version (int ioapic); -extern int io_apic_get_redir_entries (int ioapic); -extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); -extern int timer_uses_ioapic_pin_0; -#endif /* CONFIG_ACPI */ - -extern int (*ioapic_renumber_irq)(int ioapic, int irq); - -#else /* !CONFIG_X86_IO_APIC */ -#define io_apic_assign_pci_irqs 0 -#endif - -#endif diff --git a/include/asm-x86/io_apic_64.h b/include/asm-x86/io_apic_64.h deleted file mode 100644 index e2c13675ee4e..000000000000 --- a/include/asm-x86/io_apic_64.h +++ /dev/null @@ -1,138 +0,0 @@ -#ifndef __ASM_IO_APIC_H -#define __ASM_IO_APIC_H - -#include <asm/types.h> -#include <asm/mpspec.h> -#include <asm/apicdef.h> - -/* - * Intel IO-APIC support for SMP and UP systems. - * - * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar - */ - -#define APIC_MISMATCH_DEBUG - -/* - * The structure of the IO-APIC: - */ -union IO_APIC_reg_00 { - u32 raw; - struct { - u32 __reserved_2 : 14, - LTS : 1, - delivery_type : 1, - __reserved_1 : 8, - ID : 8; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_01 { - u32 raw; - struct { - u32 version : 8, - __reserved_2 : 7, - PRQ : 1, - entries : 8, - __reserved_1 : 8; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_02 { - u32 raw; - struct { - u32 __reserved_2 : 24, - arbitration : 4, - __reserved_1 : 4; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_03 { - u32 raw; - struct { - u32 boot_DT : 1, - __reserved_1 : 31; - } __attribute__ ((packed)) bits; -}; - -/* - * # of IO-APICs and # of IRQ routing registers - */ -extern int nr_ioapics; -extern int nr_ioapic_registers[MAX_IO_APICS]; - -enum ioapic_irq_destination_types { - dest_Fixed = 0, - dest_LowestPrio = 1, - dest_SMI = 2, - dest__reserved_1 = 3, - dest_NMI = 4, - dest_INIT = 5, - dest__reserved_2 = 6, - dest_ExtINT = 7 -}; - -struct IO_APIC_route_entry { - __u32 vector : 8, - delivery_mode : 3, /* 000: FIXED - * 001: lowest prio - * 111: ExtINT - */ - dest_mode : 1, /* 0: physical, 1: logical */ - delivery_status : 1, - polarity : 1, - irr : 1, - trigger : 1, /* 0: edge, 1: level */ - mask : 1, /* 0: enabled, 1: disabled */ - __reserved_2 : 15; - - __u32 __reserved_3 : 24, - dest : 8; -} __attribute__ ((packed)); - -/* - * MP-BIOS irq configuration table structures: - */ - -/* I/O APIC entries */ -extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; - -/* # of MP IRQ source entries */ -extern int mp_irq_entries; - -/* MP IRQ source entries */ -extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; - -/* non-0 if default (table-less) MP configuration */ -extern int mpc_default_type; - -/* 1 if "noapic" boot option passed */ -extern int skip_ioapic_setup; - -static inline void disable_ioapic_setup(void) -{ - skip_ioapic_setup = 1; -} - - -/* - * If we use the IO-APIC for IRQ routing, disable automatic - * assignment of PCI IRQ's. - */ -#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) - -#ifdef CONFIG_ACPI -extern int io_apic_get_version (int ioapic); -extern int io_apic_get_redir_entries (int ioapic); -extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); -#endif - -extern int sis_apic_bug; /* dummy */ - -void enable_NMI_through_LVT0 (void * dummy); - -extern spinlock_t i8259A_lock; - -extern int timer_over_8254; - -#endif diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 1b695ff52687..92021c1ffa3a 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h @@ -1,5 +1,245 @@ -#ifdef CONFIG_X86_32 -# include "irqflags_32.h" +#ifndef _X86_IRQFLAGS_H_ +#define _X86_IRQFLAGS_H_ + +#include <asm/processor-flags.h> + +#ifndef __ASSEMBLY__ +/* + * Interrupt control: + */ + +static inline unsigned long native_save_fl(void) +{ + unsigned long flags; + + __asm__ __volatile__( + "# __raw_save_flags\n\t" + "pushf ; pop %0" + : "=g" (flags) + : /* no input */ + : "memory" + ); + + return flags; +} + +static inline void native_restore_fl(unsigned long flags) +{ + __asm__ __volatile__( + "push %0 ; popf" + : /* no output */ + :"g" (flags) + :"memory", "cc" + ); +} + +static inline void native_irq_disable(void) +{ + asm volatile("cli": : :"memory"); +} + +static inline void native_irq_enable(void) +{ + asm volatile("sti": : :"memory"); +} + +static inline void native_safe_halt(void) +{ + asm volatile("sti; hlt": : :"memory"); +} + +static inline void native_halt(void) +{ + asm volatile("hlt": : :"memory"); +} + +#endif + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#ifndef __ASSEMBLY__ + +static inline unsigned long __raw_local_save_flags(void) +{ + return native_save_fl(); +} + +static inline void raw_local_irq_restore(unsigned long flags) +{ + native_restore_fl(flags); +} + +static inline void raw_local_irq_disable(void) +{ + native_irq_disable(); +} + +static inline void raw_local_irq_enable(void) +{ + native_irq_enable(); +} + +/* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +static inline void raw_safe_halt(void) +{ + native_safe_halt(); +} + +/* + * Used when interrupts are already enabled or to + * shutdown the processor: + */ +static inline void halt(void) +{ + native_halt(); +} + +/* + * For spinlocks, etc: + */ +static inline unsigned long __raw_local_irq_save(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_disable(); + + return flags; +} +#else + +#define ENABLE_INTERRUPTS(x) sti +#define DISABLE_INTERRUPTS(x) cli + +#ifdef CONFIG_X86_64 +#define INTERRUPT_RETURN iretq +#define ENABLE_INTERRUPTS_SYSCALL_RET \ + movq %gs:pda_oldrsp, %rsp; \ + swapgs; \ + sysretq; +#else +#define INTERRUPT_RETURN iret +#define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit +#define GET_CR0_INTO_EAX movl %cr0, %eax +#endif + + +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_PARAVIRT */ + +#ifndef __ASSEMBLY__ +#define raw_local_save_flags(flags) \ + do { (flags) = __raw_local_save_flags(); } while (0) + +#define raw_local_irq_save(flags) \ + do { (flags) = __raw_local_irq_save(); } while (0) + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & X86_EFLAGS_IF); +} + +static inline int raw_irqs_disabled(void) +{ + unsigned long flags = __raw_local_save_flags(); + + return raw_irqs_disabled_flags(flags); +} + +/* + * makes the traced hardirq state match with the machine state + * + * should be a rarely used function, only in places where its + * otherwise impossible to know the irq state, like in traps. + */ +static inline void trace_hardirqs_fixup_flags(unsigned long flags) +{ + if (raw_irqs_disabled_flags(flags)) + trace_hardirqs_off(); + else + trace_hardirqs_on(); +} + +static inline void trace_hardirqs_fixup(void) +{ + unsigned long flags = __raw_local_save_flags(); + + trace_hardirqs_fixup_flags(flags); +} + #else -# include "irqflags_64.h" + +#ifdef CONFIG_X86_64 +/* + * Currently paravirt can't handle swapgs nicely when we + * don't have a stack we can rely on (such as a user space + * stack). So we either find a way around these or just fault + * and emulate if a guest tries to call swapgs directly. + * + * Either way, this is a good way to document that we don't + * have a reliable stack. x86_64 only. + */ +#define SWAPGS_UNSAFE_STACK swapgs +#define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk +#define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk +#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk +#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ + TRACE_IRQS_ON; \ + sti; \ + SAVE_REST; \ + LOCKDEP_SYS_EXIT; \ + RESTORE_REST; \ + cli; \ + TRACE_IRQS_OFF; + +#else +#define ARCH_TRACE_IRQS_ON \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call trace_hardirqs_on; \ + popl %edx; \ + popl %ecx; \ + popl %eax; + +#define ARCH_TRACE_IRQS_OFF \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call trace_hardirqs_off; \ + popl %edx; \ + popl %ecx; \ + popl %eax; + +#define ARCH_LOCKDEP_SYS_EXIT \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call lockdep_sys_exit; \ + popl %edx; \ + popl %ecx; \ + popl %eax; + +#define ARCH_LOCKDEP_SYS_EXIT_IRQ +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS +# define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON +# define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF +#else +# define TRACE_IRQS_ON +# define TRACE_IRQS_OFF +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT +# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ +# else +# define LOCKDEP_SYS_EXIT +# define LOCKDEP_SYS_EXIT_IRQ +# endif + +#endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-x86/irqflags_32.h b/include/asm-x86/irqflags_32.h deleted file mode 100644 index 4c7720089cb5..000000000000 --- a/include/asm-x86/irqflags_32.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - * include/asm-i386/irqflags.h - * - * IRQ flags handling - * - * This file gets included from lowlevel asm headers too, to provide - * wrapped versions of the local_irq_*() APIs, based on the - * raw_local_irq_*() functions from the lowlevel headers. - */ -#ifndef _ASM_IRQFLAGS_H -#define _ASM_IRQFLAGS_H -#include <asm/processor-flags.h> - -#ifndef __ASSEMBLY__ -static inline unsigned long native_save_fl(void) -{ - unsigned long f; - asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); - return f; -} - -static inline void native_restore_fl(unsigned long f) -{ - asm volatile("pushl %0 ; popfl": /* no output */ - :"g" (f) - :"memory", "cc"); -} - -static inline void native_irq_disable(void) -{ - asm volatile("cli": : :"memory"); -} - -static inline void native_irq_enable(void) -{ - asm volatile("sti": : :"memory"); -} - -static inline void native_safe_halt(void) -{ - asm volatile("sti; hlt": : :"memory"); -} - -static inline void native_halt(void) -{ - asm volatile("hlt": : :"memory"); -} -#endif /* __ASSEMBLY__ */ - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#ifndef __ASSEMBLY__ - -static inline unsigned long __raw_local_save_flags(void) -{ - return native_save_fl(); -} - -static inline void raw_local_irq_restore(unsigned long flags) -{ - native_restore_fl(flags); -} - -static inline void raw_local_irq_disable(void) -{ - native_irq_disable(); -} - -static inline void raw_local_irq_enable(void) -{ - native_irq_enable(); -} - -/* - * Used in the idle loop; sti takes one instruction cycle - * to complete: - */ -static inline void raw_safe_halt(void) -{ - native_safe_halt(); -} - -/* - * Used when interrupts are already enabled or to - * shutdown the processor: - */ -static inline void halt(void) -{ - native_halt(); -} - -/* - * For spinlocks, etc: - */ -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_disable(); - - return flags; -} - -#else -#define DISABLE_INTERRUPTS(clobbers) cli -#define ENABLE_INTERRUPTS(clobbers) sti -#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit -#define INTERRUPT_RETURN iret -#define GET_CR0_INTO_EAX movl %cr0, %eax -#endif /* __ASSEMBLY__ */ -#endif /* CONFIG_PARAVIRT */ - -#ifndef __ASSEMBLY__ -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return !(flags & X86_EFLAGS_IF); -} - -static inline int raw_irqs_disabled(void) -{ - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); -} - -/* - * makes the traced hardirq state match with the machine state - * - * should be a rarely used function, only in places where its - * otherwise impossible to know the irq state, like in traps. - */ -static inline void trace_hardirqs_fixup_flags(unsigned long flags) -{ - if (raw_irqs_disabled_flags(flags)) - trace_hardirqs_off(); - else - trace_hardirqs_on(); -} - -static inline void trace_hardirqs_fixup(void) -{ - unsigned long flags = __raw_local_save_flags(); - - trace_hardirqs_fixup_flags(flags); -} -#endif /* __ASSEMBLY__ */ - -/* - * Do the CPU's IRQ-state tracing from assembly code. We call a - * C function, so save all the C-clobbered registers: - */ -#ifdef CONFIG_TRACE_IRQFLAGS - -# define TRACE_IRQS_ON \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call trace_hardirqs_on; \ - popl %edx; \ - popl %ecx; \ - popl %eax; - -# define TRACE_IRQS_OFF \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call trace_hardirqs_off; \ - popl %edx; \ - popl %ecx; \ - popl %eax; - -#else -# define TRACE_IRQS_ON -# define TRACE_IRQS_OFF -#endif - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCKDEP_SYS_EXIT \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call lockdep_sys_exit; \ - popl %edx; \ - popl %ecx; \ - popl %eax; -#else -# define LOCKDEP_SYS_EXIT -#endif - -#endif diff --git a/include/asm-x86/irqflags_64.h b/include/asm-x86/irqflags_64.h deleted file mode 100644 index bb9163bb29d1..000000000000 --- a/include/asm-x86/irqflags_64.h +++ /dev/null @@ -1,176 +0,0 @@ -/* - * include/asm-x86_64/irqflags.h - * - * IRQ flags handling - * - * This file gets included from lowlevel asm headers too, to provide - * wrapped versions of the local_irq_*() APIs, based on the - * raw_local_irq_*() functions from the lowlevel headers. - */ -#ifndef _ASM_IRQFLAGS_H -#define _ASM_IRQFLAGS_H -#include <asm/processor-flags.h> - -#ifndef __ASSEMBLY__ -/* - * Interrupt control: - */ - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__( - "# __raw_save_flags\n\t" - "pushfq ; popq %q0" - : "=g" (flags) - : /* no input */ - : "memory" - ); - - return flags; -} - -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) -{ - __asm__ __volatile__( - "pushq %0 ; popfq" - : /* no output */ - :"g" (flags) - :"memory", "cc" - ); -} - -#ifdef CONFIG_X86_VSMP - -/* - * Interrupt control for the VSMP architecture: - */ - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); -} - -static inline void raw_local_irq_enable(void) -{ - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); -} - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); -} - -#else /* CONFIG_X86_VSMP */ - -static inline void raw_local_irq_disable(void) -{ - __asm__ __volatile__("cli" : : : "memory"); -} - -static inline void raw_local_irq_enable(void) -{ - __asm__ __volatile__("sti" : : : "memory"); -} - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return !(flags & X86_EFLAGS_IF); -} - -#endif - -/* - * For spinlocks, etc.: - */ - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_disable(); - - return flags; -} - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline int raw_irqs_disabled(void) -{ - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); -} - -/* - * makes the traced hardirq state match with the machine state - * - * should be a rarely used function, only in places where its - * otherwise impossible to know the irq state, like in traps. - */ -static inline void trace_hardirqs_fixup_flags(unsigned long flags) -{ - if (raw_irqs_disabled_flags(flags)) - trace_hardirqs_off(); - else - trace_hardirqs_on(); -} - -static inline void trace_hardirqs_fixup(void) -{ - unsigned long flags = __raw_local_save_flags(); - - trace_hardirqs_fixup_flags(flags); -} -/* - * Used in the idle loop; sti takes one instruction cycle - * to complete: - */ -static inline void raw_safe_halt(void) -{ - __asm__ __volatile__("sti; hlt" : : : "memory"); -} - -/* - * Used when interrupts are already enabled or to - * shutdown the processor: - */ -static inline void halt(void) -{ - __asm__ __volatile__("hlt": : :"memory"); -} - -#else /* __ASSEMBLY__: */ -# ifdef CONFIG_TRACE_IRQFLAGS -# define TRACE_IRQS_ON call trace_hardirqs_on_thunk -# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk -# else -# define TRACE_IRQS_ON -# define TRACE_IRQS_OFF -# endif -# ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk -# define LOCKDEP_SYS_EXIT_IRQ \ - TRACE_IRQS_ON; \ - sti; \ - SAVE_REST; \ - LOCKDEP_SYS_EXIT; \ - RESTORE_REST; \ - cli; \ - TRACE_IRQS_OFF; -# else -# define LOCKDEP_SYS_EXIT -# define LOCKDEP_SYS_EXIT_IRQ -# endif -#endif - -#endif diff --git a/include/asm-x86/k8.h b/include/asm-x86/k8.h index 699dd6961eda..452e2b696ff4 100644 --- a/include/asm-x86/k8.h +++ b/include/asm-x86/k8.h @@ -10,5 +10,6 @@ extern struct pci_dev **k8_northbridges; extern int num_k8_northbridges; extern int cache_k8_northbridges(void); extern void k8_flush_garts(void); +extern int k8_scan_nodes(unsigned long start, unsigned long end); #endif diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index e2f9b62e535e..dd442a1632c0 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h @@ -22,12 +22,17 @@ enum die_val { DIE_PAGE_FAULT, }; -extern void printk_address(unsigned long address); +extern void printk_address(unsigned long address, int reliable); extern void die(const char *,struct pt_regs *,long); -extern void __die(const char *,struct pt_regs *,long); +extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_registers(struct pt_regs *regs); +extern void __show_registers(struct pt_regs *, int all); +extern void show_trace(struct task_struct *t, struct pt_regs *regs, + unsigned long *sp, unsigned long bp); +extern void __show_regs(struct pt_regs *regs); +extern void show_regs(struct pt_regs *regs); extern void dump_pagetable(unsigned long); extern unsigned long oops_begin(void); -extern void oops_end(unsigned long); +extern void oops_end(unsigned long, struct pt_regs *, int signr); #endif diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index 718ddbfb9516..c90d3c77afc2 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@ -1,5 +1,170 @@ +#ifndef _KEXEC_H +#define _KEXEC_H + #ifdef CONFIG_X86_32 -# include "kexec_32.h" +# define PA_CONTROL_PAGE 0 +# define VA_CONTROL_PAGE 1 +# define PA_PGD 2 +# define VA_PGD 3 +# define PA_PTE_0 4 +# define VA_PTE_0 5 +# define PA_PTE_1 6 +# define VA_PTE_1 7 +# ifdef CONFIG_X86_PAE +# define PA_PMD_0 8 +# define VA_PMD_0 9 +# define PA_PMD_1 10 +# define VA_PMD_1 11 +# define PAGES_NR 12 +# else +# define PAGES_NR 8 +# endif #else -# include "kexec_64.h" +# define PA_CONTROL_PAGE 0 +# define VA_CONTROL_PAGE 1 +# define PA_PGD 2 +# define VA_PGD 3 +# define PA_PUD_0 4 +# define VA_PUD_0 5 +# define PA_PMD_0 6 +# define VA_PMD_0 7 +# define PA_PTE_0 8 +# define VA_PTE_0 9 +# define PA_PUD_1 10 +# define VA_PUD_1 11 +# define PA_PMD_1 12 +# define VA_PMD_1 13 +# define PA_PTE_1 14 +# define VA_PTE_1 15 +# define PA_TABLE_PAGE 16 +# define PAGES_NR 17 #endif + +#ifndef __ASSEMBLY__ + +#include <linux/string.h> + +#include <asm/page.h> +#include <asm/ptrace.h> + +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + * + * So far x86_64 is limited to 40 physical address bits. + */ +#ifdef CONFIG_X86_32 +/* Maximum physical address we can use pages from */ +# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE + +# define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +# define KEXEC_ARCH KEXEC_ARCH_386 + +/* We can also handle crash dumps from 64 bit kernel. */ +# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) +#else +/* Maximum physical address we can use pages from */ +# define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) +/* Maximum address we can reach in physical address mode */ +# define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) +/* Maximum address we can use for the control pages */ +# define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) + +/* Allocate one page for the pdp and the second for the code */ +# define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) + +/* The native architecture */ +# define KEXEC_ARCH KEXEC_ARCH_X86_64 +#endif + +/* + * CPU does not save ss and sp on stack if execution is already + * running in kernel mode at the time of NMI occurrence. This code + * fixes it. + */ +static inline void crash_fixup_ss_esp(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ +#ifdef CONFIG_X86_32 + newregs->sp = (unsigned long)&(oldregs->sp); + __asm__ __volatile__( + "xorl %%eax, %%eax\n\t" + "movw %%ss, %%ax\n\t" + :"=a"(newregs->ss)); +#endif +} + +/* + * This function is responsible for capturing register states if coming + * via panic otherwise just fix up the ss and sp if coming via kernel + * mode exception. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) { + memcpy(newregs, oldregs, sizeof(*newregs)); + crash_fixup_ss_esp(newregs, oldregs); + } else { +#ifdef CONFIG_X86_32 + __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx)); + __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx)); + __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx)); + __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si)); + __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di)); + __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp)); + __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax)); + __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp)); + __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); + __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); + __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds)); + __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es)); + __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags)); +#else + __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx)); + __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx)); + __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx)); + __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si)); + __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di)); + __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp)); + __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax)); + __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp)); + __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); + __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); + __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); + __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); + __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); + __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); + __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); + __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); + __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); + __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); + __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags)); +#endif + newregs->ip = (unsigned long)current_text_addr(); + } +} + +#ifdef CONFIG_X86_32 +asmlinkage NORET_TYPE void +relocate_kernel(unsigned long indirection_page, + unsigned long control_page, + unsigned long start_address, + unsigned int has_pae) ATTRIB_NORET; +#else +NORET_TYPE void +relocate_kernel(unsigned long indirection_page, + unsigned long page_list, + unsigned long start_address) ATTRIB_NORET; +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _KEXEC_H */ diff --git a/include/asm-x86/kexec_32.h b/include/asm-x86/kexec_32.h deleted file mode 100644 index 4b9dc9e6b701..000000000000 --- a/include/asm-x86/kexec_32.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef _I386_KEXEC_H -#define _I386_KEXEC_H - -#define PA_CONTROL_PAGE 0 -#define VA_CONTROL_PAGE 1 -#define PA_PGD 2 -#define VA_PGD 3 -#define PA_PTE_0 4 -#define VA_PTE_0 5 -#define PA_PTE_1 6 -#define VA_PTE_1 7 -#ifdef CONFIG_X86_PAE -#define PA_PMD_0 8 -#define VA_PMD_0 9 -#define PA_PMD_1 10 -#define VA_PMD_1 11 -#define PAGES_NR 12 -#else -#define PAGES_NR 8 -#endif - -#ifndef __ASSEMBLY__ - -#include <asm/ptrace.h> -#include <asm/string.h> - -/* - * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. - * I.e. Maximum page that is mapped directly into kernel memory, - * and kmap is not required. - */ - -/* Maximum physical address we can use pages from */ -#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) -/* Maximum address we can reach in physical address mode */ -#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) -/* Maximum address we can use for the control code buffer */ -#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE - -#define KEXEC_CONTROL_CODE_SIZE 4096 - -/* The native architecture */ -#define KEXEC_ARCH KEXEC_ARCH_386 - -/* We can also handle crash dumps from 64 bit kernel. */ -#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) - -/* CPU does not save ss and esp on stack if execution is already - * running in kernel mode at the time of NMI occurrence. This code - * fixes it. - */ -static inline void crash_fixup_ss_esp(struct pt_regs *newregs, - struct pt_regs *oldregs) -{ - memcpy(newregs, oldregs, sizeof(*newregs)); - newregs->esp = (unsigned long)&(oldregs->esp); - __asm__ __volatile__( - "xorl %%eax, %%eax\n\t" - "movw %%ss, %%ax\n\t" - :"=a"(newregs->xss)); -} - -/* - * This function is responsible for capturing register states if coming - * via panic otherwise just fix up the ss and esp if coming via kernel - * mode exception. - */ -static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) -{ - if (oldregs) - crash_fixup_ss_esp(newregs, oldregs); - else { - __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); - __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); - __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); - __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); - __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); - __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); - __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); - __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); - __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); - __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); - __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); - __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); - __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); - - newregs->eip = (unsigned long)current_text_addr(); - } -} -asmlinkage NORET_TYPE void -relocate_kernel(unsigned long indirection_page, - unsigned long control_page, - unsigned long start_address, - unsigned int has_pae) ATTRIB_NORET; - -#endif /* __ASSEMBLY__ */ - -#endif /* _I386_KEXEC_H */ diff --git a/include/asm-x86/kexec_64.h b/include/asm-x86/kexec_64.h deleted file mode 100644 index 738e581b67f8..000000000000 --- a/include/asm-x86/kexec_64.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef _X86_64_KEXEC_H -#define _X86_64_KEXEC_H - -#define PA_CONTROL_PAGE 0 -#define VA_CONTROL_PAGE 1 -#define PA_PGD 2 -#define VA_PGD 3 -#define PA_PUD_0 4 -#define VA_PUD_0 5 -#define PA_PMD_0 6 -#define VA_PMD_0 7 -#define PA_PTE_0 8 -#define VA_PTE_0 9 -#define PA_PUD_1 10 -#define VA_PUD_1 11 -#define PA_PMD_1 12 -#define VA_PMD_1 13 -#define PA_PTE_1 14 -#define VA_PTE_1 15 -#define PA_TABLE_PAGE 16 -#define PAGES_NR 17 - -#ifndef __ASSEMBLY__ - -#include <linux/string.h> - -#include <asm/page.h> -#include <asm/ptrace.h> - -/* - * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. - * I.e. Maximum page that is mapped directly into kernel memory, - * and kmap is not required. - * - * So far x86_64 is limited to 40 physical address bits. - */ - -/* Maximum physical address we can use pages from */ -#define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) -/* Maximum address we can reach in physical address mode */ -#define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) -/* Maximum address we can use for the control pages */ -#define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) - -/* Allocate one page for the pdp and the second for the code */ -#define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) - -/* The native architecture */ -#define KEXEC_ARCH KEXEC_ARCH_X86_64 - -/* - * Saving the registers of the cpu on which panic occured in - * crash_kexec to save a valid sp. The registers of other cpus - * will be saved in machine_crash_shutdown while shooting down them. - */ - -static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) -{ - if (oldregs) - memcpy(newregs, oldregs, sizeof(*newregs)); - else { - __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx)); - __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx)); - __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx)); - __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi)); - __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi)); - __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp)); - __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax)); - __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp)); - __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); - __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); - __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); - __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); - __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); - __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); - __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); - __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); - __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); - __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); - __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags)); - - newregs->rip = (unsigned long)current_text_addr(); - } -} - -NORET_TYPE void -relocate_kernel(unsigned long indirection_page, - unsigned long page_list, - unsigned long start_address) ATTRIB_NORET; - -#endif /* __ASSEMBLY__ */ - -#endif /* _X86_64_KEXEC_H */ diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index b7bbd25ba2a6..143476a3cb52 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h @@ -1,5 +1,98 @@ -#ifdef CONFIG_X86_32 -# include "kprobes_32.h" -#else -# include "kprobes_64.h" -#endif +#ifndef _ASM_KPROBES_H +#define _ASM_KPROBES_H +/* + * Kernel Probes (KProbes) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * + * See arch/x86/kernel/kprobes.c for x86 kprobes history. + */ +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/percpu.h> + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +struct pt_regs; +struct kprobe; + +typedef u8 kprobe_opcode_t; +#define BREAKPOINT_INSTRUCTION 0xcc +#define RELATIVEJUMP_INSTRUCTION 0xe9 +#define MAX_INSN_SIZE 16 +#define MAX_STACK_SIZE 64 +#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ + (((unsigned long)current_thread_info()) + THREAD_SIZE \ + - (unsigned long)(ADDR))) \ + ? (MAX_STACK_SIZE) \ + : (((unsigned long)current_thread_info()) + THREAD_SIZE \ + - (unsigned long)(ADDR))) + +#define ARCH_SUPPORTS_KRETPROBES +#define flush_insn_slot(p) do { } while (0) + +extern const int kretprobe_blacklist_size; + +void arch_remove_kprobe(struct kprobe *p); +void kretprobe_trampoline(void); + +/* Architecture specific copy of original instruction*/ +struct arch_specific_insn { + /* copy of the original instruction */ + kprobe_opcode_t *insn; + /* + * boostable = -1: This instruction type is not boostable. + * boostable = 0: This instruction type is boostable. + * boostable = 1: This instruction has been boosted: we have + * added a relative jump after the instruction copy in insn, + * so no single-step and fixup are needed (unless there's + * a post_handler or break_handler). + */ + int boostable; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; + unsigned long old_flags; + unsigned long saved_flags; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + unsigned long kprobe_old_flags; + unsigned long kprobe_saved_flags; + unsigned long *jprobe_saved_sp; + struct pt_regs jprobe_saved_regs; + kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; + struct prev_kprobe prev_kprobe; +}; + +/* trap3/1 are intr gates for kprobes. So, restore the status of IF, + * if necessary, before executing the original int3/1 (trap) handler. + */ +static inline void restore_interrupts(struct pt_regs *regs) +{ + if (regs->flags & X86_EFLAGS_IF) + local_irq_enable(); +} + +extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); +extern int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); +#endif /* _ASM_KPROBES_H */ diff --git a/include/asm-x86/kprobes_32.h b/include/asm-x86/kprobes_32.h deleted file mode 100644 index 9fe8f3bddfd5..000000000000 --- a/include/asm-x86/kprobes_32.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef _ASM_KPROBES_H -#define _ASM_KPROBES_H -/* - * Kernel Probes (KProbes) - * include/asm-i386/kprobes.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2002, 2004 - * - * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel - * Probes initial implementation ( includes suggestions from - * Rusty Russell). - */ -#include <linux/types.h> -#include <linux/ptrace.h> - -#define __ARCH_WANT_KPROBES_INSN_SLOT - -struct kprobe; -struct pt_regs; - -typedef u8 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xcc -#define RELATIVEJUMP_INSTRUCTION 0xe9 -#define MAX_INSN_SIZE 16 -#define MAX_STACK_SIZE 64 -#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ - (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ - ? (MAX_STACK_SIZE) \ - : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) - -#define ARCH_SUPPORTS_KRETPROBES -#define flush_insn_slot(p) do { } while (0) - -extern const int kretprobe_blacklist_size; - -void arch_remove_kprobe(struct kprobe *p); -void kretprobe_trampoline(void); - -/* Architecture specific copy of original instruction*/ -struct arch_specific_insn { - /* copy of the original instruction */ - kprobe_opcode_t *insn; - /* - * If this flag is not 0, this kprobe can be boost when its - * post_handler and break_handler is not set. - */ - int boostable; -}; - -struct prev_kprobe { - struct kprobe *kp; - unsigned long status; - unsigned long old_eflags; - unsigned long saved_eflags; -}; - -/* per-cpu kprobe control block */ -struct kprobe_ctlblk { - unsigned long kprobe_status; - unsigned long kprobe_old_eflags; - unsigned long kprobe_saved_eflags; - unsigned long *jprobe_saved_esp; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; - struct prev_kprobe prev_kprobe; -}; - -/* trap3/1 are intr gates for kprobes. So, restore the status of IF, - * if necessary, before executing the original int3/1 (trap) handler. - */ -static inline void restore_interrupts(struct pt_regs *regs) -{ - if (regs->eflags & IF_MASK) - local_irq_enable(); -} - -extern int kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data); -extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); -#endif /* _ASM_KPROBES_H */ diff --git a/include/asm-x86/kprobes_64.h b/include/asm-x86/kprobes_64.h deleted file mode 100644 index 743d76218fc9..000000000000 --- a/include/asm-x86/kprobes_64.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _ASM_KPROBES_H -#define _ASM_KPROBES_H -/* - * Kernel Probes (KProbes) - * include/asm-x86_64/kprobes.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2002, 2004 - * - * 2004-Oct Prasanna S Panchamukhi <prasanna@in.ibm.com> and Jim Keniston - * kenistoj@us.ibm.com adopted from i386. - */ -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/percpu.h> - -#define __ARCH_WANT_KPROBES_INSN_SLOT - -struct pt_regs; -struct kprobe; - -typedef u8 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xcc -#define MAX_INSN_SIZE 15 -#define MAX_STACK_SIZE 64 -#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ - (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ - ? (MAX_STACK_SIZE) \ - : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) - -#define ARCH_SUPPORTS_KRETPROBES -extern const int kretprobe_blacklist_size; - -void kretprobe_trampoline(void); -extern void arch_remove_kprobe(struct kprobe *p); -#define flush_insn_slot(p) do { } while (0) - -/* Architecture specific copy of original instruction*/ -struct arch_specific_insn { - /* copy of the original instruction */ - kprobe_opcode_t *insn; -}; - -struct prev_kprobe { - struct kprobe *kp; - unsigned long status; - unsigned long old_rflags; - unsigned long saved_rflags; -}; - -/* per-cpu kprobe control block */ -struct kprobe_ctlblk { - unsigned long kprobe_status; - unsigned long kprobe_old_rflags; - unsigned long kprobe_saved_rflags; - unsigned long *jprobe_saved_rsp; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; - struct prev_kprobe prev_kprobe; -}; - -/* trap3/1 are intr gates for kprobes. So, restore the status of IF, - * if necessary, before executing the original int3/1 (trap) handler. - */ -static inline void restore_interrupts(struct pt_regs *regs) -{ - if (regs->eflags & IF_MASK) - local_irq_enable(); -} - -extern int post_kprobe_handler(struct pt_regs *regs); -extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); -extern int kprobe_handler(struct pt_regs *regs); - -extern int kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data); -#endif /* _ASM_KPROBES_H */ diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h new file mode 100644 index 000000000000..7a71120426a3 --- /dev/null +++ b/include/asm-x86/kvm.h @@ -0,0 +1,191 @@ +#ifndef __LINUX_KVM_X86_H +#define __LINUX_KVM_X86_H + +/* + * KVM x86 specific structures and definitions + * + */ + +#include <asm/types.h> +#include <linux/ioctl.h> + +/* Architectural interrupt line count. */ +#define KVM_NR_INTERRUPTS 256 + +struct kvm_memory_alias { + __u32 slot; /* this has a different namespace than memory slots */ + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; + __u64 target_phys_addr; +}; + +/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ +struct kvm_pic_state { + __u8 last_irr; /* edge detection */ + __u8 irr; /* interrupt request register */ + __u8 imr; /* interrupt mask register */ + __u8 isr; /* interrupt service register */ + __u8 priority_add; /* highest irq priority */ + __u8 irq_base; + __u8 read_reg_select; + __u8 poll; + __u8 special_mask; + __u8 init_state; + __u8 auto_eoi; + __u8 rotate_on_auto_eoi; + __u8 special_fully_nested_mode; + __u8 init4; /* true if 4 byte init */ + __u8 elcr; /* PIIX edge/trigger selection */ + __u8 elcr_mask; +}; + +#define KVM_IOAPIC_NUM_PINS 24 +struct kvm_ioapic_state { + __u64 base_address; + __u32 ioregsel; + __u32 id; + __u32 irr; + __u32 pad; + union { + __u64 bits; + struct { + __u8 vector; + __u8 delivery_mode:3; + __u8 dest_mode:1; + __u8 delivery_status:1; + __u8 polarity:1; + __u8 remote_irr:1; + __u8 trig_mode:1; + __u8 mask:1; + __u8 reserve:7; + __u8 reserved[4]; + __u8 dest_id; + } fields; + } redirtbl[KVM_IOAPIC_NUM_PINS]; +}; + +#define KVM_IRQCHIP_PIC_MASTER 0 +#define KVM_IRQCHIP_PIC_SLAVE 1 +#define KVM_IRQCHIP_IOAPIC 2 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 rax, rbx, rcx, rdx; + __u64 rsi, rdi, rsp, rbp; + __u64 r8, r9, r10, r11; + __u64 r12, r13, r14, r15; + __u64 rip, rflags; +}; + +/* for KVM_GET_LAPIC and KVM_SET_LAPIC */ +#define KVM_APIC_REG_SIZE 0x400 +struct kvm_lapic_state { + char regs[KVM_APIC_REG_SIZE]; +}; + +struct kvm_segment { + __u64 base; + __u32 limit; + __u16 selector; + __u8 type; + __u8 present, dpl, db, s, l, g, avl; + __u8 unusable; + __u8 padding; +}; + +struct kvm_dtable { + __u64 base; + __u16 limit; + __u16 padding[3]; +}; + + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { + /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ + struct kvm_segment cs, ds, es, fs, gs, ss; + struct kvm_segment tr, ldt; + struct kvm_dtable gdt, idt; + __u64 cr0, cr2, cr3, cr4, cr8; + __u64 efer; + __u64 apic_base; + __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { + __u8 fpr[8][16]; + __u16 fcw; + __u16 fsw; + __u8 ftwx; /* in fxsave format */ + __u8 pad1; + __u16 last_opcode; + __u64 last_ip; + __u64 last_dp; + __u8 xmm[16][16]; + __u32 mxcsr; + __u32 pad2; +}; + +struct kvm_msr_entry { + __u32 index; + __u32 reserved; + __u64 data; +}; + +/* for KVM_GET_MSRS and KVM_SET_MSRS */ +struct kvm_msrs { + __u32 nmsrs; /* number of msrs in entries */ + __u32 pad; + + struct kvm_msr_entry entries[0]; +}; + +/* for KVM_GET_MSR_INDEX_LIST */ +struct kvm_msr_list { + __u32 nmsrs; /* number of msrs in entries */ + __u32 indices[0]; +}; + + +struct kvm_cpuid_entry { + __u32 function; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding; +}; + +/* for KVM_SET_CPUID */ +struct kvm_cpuid { + __u32 nent; + __u32 padding; + struct kvm_cpuid_entry entries[0]; +}; + +struct kvm_cpuid_entry2 { + __u32 function; + __u32 index; + __u32 flags; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding[3]; +}; + +#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1 +#define KVM_CPUID_FLAG_STATEFUL_FUNC 2 +#define KVM_CPUID_FLAG_STATE_READ_NEXT 4 + +/* for KVM_SET_CPUID2 */ +struct kvm_cpuid2 { + __u32 nent; + __u32 padding; + struct kvm_cpuid_entry2 entries[0]; +}; + +#endif diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h new file mode 100644 index 000000000000..4702b04b979a --- /dev/null +++ b/include/asm-x86/kvm_host.h @@ -0,0 +1,611 @@ +#/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines architecture specific interfaces, x86 version + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef ASM_KVM_HOST_H +#define ASM_KVM_HOST_H + +#include <linux/types.h> +#include <linux/mm.h> + +#include <linux/kvm.h> +#include <linux/kvm_para.h> +#include <linux/kvm_types.h> + +#include <asm/desc.h> + +#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) +#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) +#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) + +#define KVM_GUEST_CR0_MASK \ + (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ + | X86_CR0_NW | X86_CR0_CD) +#define KVM_VM_CR0_ALWAYS_ON \ + (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ + | X86_CR0_MP) +#define KVM_GUEST_CR4_MASK \ + (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) +#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) +#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) + +#define INVALID_PAGE (~(hpa_t)0) +#define UNMAPPED_GVA (~(gpa_t)0) + +#define DE_VECTOR 0 +#define UD_VECTOR 6 +#define NM_VECTOR 7 +#define DF_VECTOR 8 +#define TS_VECTOR 10 +#define NP_VECTOR 11 +#define SS_VECTOR 12 +#define GP_VECTOR 13 +#define PF_VECTOR 14 + +#define SELECTOR_TI_MASK (1 << 2) +#define SELECTOR_RPL_MASK 0x03 + +#define IOPL_SHIFT 12 + +#define KVM_ALIAS_SLOTS 4 + +#define KVM_PERMILLE_MMU_PAGES 20 +#define KVM_MIN_ALLOC_MMU_PAGES 64 +#define KVM_NUM_MMU_PAGES 1024 +#define KVM_MIN_FREE_MMU_PAGES 5 +#define KVM_REFILL_PAGES 25 +#define KVM_MAX_CPUID_ENTRIES 40 + +extern spinlock_t kvm_lock; +extern struct list_head vm_list; + +struct kvm_vcpu; +struct kvm; + +enum { + VCPU_REGS_RAX = 0, + VCPU_REGS_RCX = 1, + VCPU_REGS_RDX = 2, + VCPU_REGS_RBX = 3, + VCPU_REGS_RSP = 4, + VCPU_REGS_RBP = 5, + VCPU_REGS_RSI = 6, + VCPU_REGS_RDI = 7, +#ifdef CONFIG_X86_64 + VCPU_REGS_R8 = 8, + VCPU_REGS_R9 = 9, + VCPU_REGS_R10 = 10, + VCPU_REGS_R11 = 11, + VCPU_REGS_R12 = 12, + VCPU_REGS_R13 = 13, + VCPU_REGS_R14 = 14, + VCPU_REGS_R15 = 15, +#endif + NR_VCPU_REGS +}; + +enum { + VCPU_SREG_CS, + VCPU_SREG_DS, + VCPU_SREG_ES, + VCPU_SREG_FS, + VCPU_SREG_GS, + VCPU_SREG_SS, + VCPU_SREG_TR, + VCPU_SREG_LDTR, +}; + +#include <asm/kvm_x86_emulate.h> + +#define KVM_NR_MEM_OBJS 40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +}; + +#define NR_PTE_CHAIN_ENTRIES 5 + +struct kvm_pte_chain { + u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; + struct hlist_node link; +}; + +/* + * kvm_mmu_page_role, below, is defined as: + * + * bits 0:3 - total guest paging levels (2-4, or zero for real mode) + * bits 4:7 - page table level for this shadow (1-4) + * bits 8:9 - page table quadrant for 2-level guests + * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) + * bits 17:19 - common access permissions for all ptes in this shadow page + */ +union kvm_mmu_page_role { + unsigned word; + struct { + unsigned glevels : 4; + unsigned level : 4; + unsigned quadrant : 2; + unsigned pad_for_nice_hex_output : 6; + unsigned metaphysical : 1; + unsigned access : 3; + }; +}; + +struct kvm_mmu_page { + struct list_head link; + struct hlist_node hash_link; + + /* + * The following two entries are used to key the shadow page in the + * hash table. + */ + gfn_t gfn; + union kvm_mmu_page_role role; + + u64 *spt; + /* hold the gfn of each spte inside spt */ + gfn_t *gfns; + unsigned long slot_bitmap; /* One bit set per slot which has memory + * in this shadow page. + */ + int multimapped; /* More than one parent_pte? */ + int root_count; /* Currently serving as active root */ + union { + u64 *parent_pte; /* !multimapped */ + struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ + }; +}; + +/* + * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level + * 32-bit). The kvm_mmu structure abstracts the details of the current mmu + * mode. + */ +struct kvm_mmu { + void (*new_cr3)(struct kvm_vcpu *vcpu); + int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); + void (*free)(struct kvm_vcpu *vcpu); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); + void (*prefetch_page)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *page); + hpa_t root_hpa; + int root_level; + int shadow_root_level; + + u64 *pae_root; +}; + +struct kvm_vcpu_arch { + u64 host_tsc; + int interrupt_window_open; + unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ + DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); + unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ + unsigned long rip; /* needs vcpu_load_rsp_rip() */ + + unsigned long cr0; + unsigned long cr2; + unsigned long cr3; + unsigned long cr4; + unsigned long cr8; + u64 pdptrs[4]; /* pae */ + u64 shadow_efer; + u64 apic_base; + struct kvm_lapic *apic; /* kernel irqchip context */ +#define VCPU_MP_STATE_RUNNABLE 0 +#define VCPU_MP_STATE_UNINITIALIZED 1 +#define VCPU_MP_STATE_INIT_RECEIVED 2 +#define VCPU_MP_STATE_SIPI_RECEIVED 3 +#define VCPU_MP_STATE_HALTED 4 + int mp_state; + int sipi_vector; + u64 ia32_misc_enable_msr; + bool tpr_access_reporting; + + struct kvm_mmu mmu; + + struct kvm_mmu_memory_cache mmu_pte_chain_cache; + struct kvm_mmu_memory_cache mmu_rmap_desc_cache; + struct kvm_mmu_memory_cache mmu_page_cache; + struct kvm_mmu_memory_cache mmu_page_header_cache; + + gfn_t last_pt_write_gfn; + int last_pt_write_count; + u64 *last_pte_updated; + + struct { + gfn_t gfn; /* presumed gfn during guest pte update */ + struct page *page; /* page corresponding to that gfn */ + } update_pte; + + struct i387_fxsave_struct host_fx_image; + struct i387_fxsave_struct guest_fx_image; + + gva_t mmio_fault_cr2; + struct kvm_pio_request pio; + void *pio_data; + + struct kvm_queued_exception { + bool pending; + bool has_error_code; + u8 nr; + u32 error_code; + } exception; + + struct { + int active; + u8 save_iopl; + struct kvm_save_segment { + u16 selector; + unsigned long base; + u32 limit; + u32 ar; + } tr, es, ds, fs, gs; + } rmode; + int halt_request; /* real mode on Intel only */ + + int cpuid_nent; + struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; + /* emulate context */ + + struct x86_emulate_ctxt emulate_ctxt; +}; + +struct kvm_mem_alias { + gfn_t base_gfn; + unsigned long npages; + gfn_t target_gfn; +}; + +struct kvm_arch{ + int naliases; + struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; + + unsigned int n_free_mmu_pages; + unsigned int n_requested_mmu_pages; + unsigned int n_alloc_mmu_pages; + struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; + /* + * Hash table of struct kvm_mmu_page. + */ + struct list_head active_mmu_pages; + struct kvm_pic *vpic; + struct kvm_ioapic *vioapic; + + int round_robin_prev_vcpu; + unsigned int tss_addr; + struct page *apic_access_page; +}; + +struct kvm_vm_stat { + u32 mmu_shadow_zapped; + u32 mmu_pte_write; + u32 mmu_pte_updated; + u32 mmu_pde_zapped; + u32 mmu_flooded; + u32 mmu_recycled; + u32 mmu_cache_miss; + u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { + u32 pf_fixed; + u32 pf_guest; + u32 tlb_flush; + u32 invlpg; + + u32 exits; + u32 io_exits; + u32 mmio_exits; + u32 signal_exits; + u32 irq_window_exits; + u32 halt_exits; + u32 halt_wakeup; + u32 request_irq_exits; + u32 irq_exits; + u32 host_state_reload; + u32 efer_reload; + u32 fpu_reload; + u32 insn_emulation; + u32 insn_emulation_fail; +}; + +struct descriptor_table { + u16 limit; + unsigned long base; +} __attribute__((packed)); + +struct kvm_x86_ops { + int (*cpu_has_kvm_support)(void); /* __init */ + int (*disabled_by_bios)(void); /* __init */ + void (*hardware_enable)(void *dummy); /* __init */ + void (*hardware_disable)(void *dummy); + void (*check_processor_compatibility)(void *rtn); + int (*hardware_setup)(void); /* __init */ + void (*hardware_unsetup)(void); /* __exit */ + bool (*cpu_has_accelerated_tpr)(void); + + /* Create, but do not attach this VCPU */ + struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); + void (*vcpu_free)(struct kvm_vcpu *vcpu); + int (*vcpu_reset)(struct kvm_vcpu *vcpu); + + void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); + void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); + void (*vcpu_put)(struct kvm_vcpu *vcpu); + void (*vcpu_decache)(struct kvm_vcpu *vcpu); + + int (*set_guest_debug)(struct kvm_vcpu *vcpu, + struct kvm_debug_guest *dbg); + void (*guest_debug_pre)(struct kvm_vcpu *vcpu); + int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); + int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); + u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); + void (*get_segment)(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg); + void (*set_segment)(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg); + void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); + void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); + void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); + void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); + void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); + void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); + void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); + void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); + void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); + void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); + unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); + void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, + int *exception); + void (*cache_regs)(struct kvm_vcpu *vcpu); + void (*decache_regs)(struct kvm_vcpu *vcpu); + unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); + void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); + + void (*tlb_flush)(struct kvm_vcpu *vcpu); + + void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); + int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); + void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); + void (*patch_hypercall)(struct kvm_vcpu *vcpu, + unsigned char *hypercall_addr); + int (*get_irq)(struct kvm_vcpu *vcpu); + void (*set_irq)(struct kvm_vcpu *vcpu, int vec); + void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, + bool has_error_code, u32 error_code); + bool (*exception_injected)(struct kvm_vcpu *vcpu); + void (*inject_pending_irq)(struct kvm_vcpu *vcpu); + void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, + struct kvm_run *run); + + int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); +}; + +extern struct kvm_x86_ops *kvm_x86_ops; + +int kvm_mmu_module_init(void); +void kvm_mmu_module_exit(void); + +void kvm_mmu_destroy(struct kvm_vcpu *vcpu); +int kvm_mmu_create(struct kvm_vcpu *vcpu); +int kvm_mmu_setup(struct kvm_vcpu *vcpu); +void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); + +int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); +void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); +void kvm_mmu_zap_all(struct kvm *kvm); +unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ + EMULATE_FAIL, /* can't emulate this instruction */ +}; + +#define EMULTYPE_NO_DECODE (1 << 0) +#define EMULTYPE_TRAP_UD (1 << 1) +int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long cr2, u16 error_code, int emulation_type); +void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); +void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); +void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); +void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, + unsigned long *rflags); + +unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); +void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, + unsigned long *rflags); +int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); +int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); + +struct x86_emulate_ctxt; + +int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, + int size, unsigned port); +int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, + int size, unsigned long count, int down, + gva_t address, int rep, unsigned port); +void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); +int kvm_emulate_halt(struct kvm_vcpu *vcpu); +int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); +int emulate_clts(struct kvm_vcpu *vcpu); +int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, + unsigned long *dest); +int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, + unsigned long value); + +void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); +void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); +void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); +void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); +unsigned long get_cr8(struct kvm_vcpu *vcpu); +void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); +void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); + +int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); +int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); + +void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); +void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); +void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, + u32 error_code); + +void fx_init(struct kvm_vcpu *vcpu); + +int emulator_read_std(unsigned long addr, + void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); +int emulator_write_emulated(unsigned long addr, + const void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); + +unsigned long segment_base(u16 selector); + +void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); +void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *new, int bytes); +int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); +void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); +int kvm_mmu_load(struct kvm_vcpu *vcpu); +void kvm_mmu_unload(struct kvm_vcpu *vcpu); + +int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); + +int kvm_fix_hypercall(struct kvm_vcpu *vcpu); + +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); + +int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); +int complete_pio(struct kvm_vcpu *vcpu); + +static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) +{ + struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); + + return (struct kvm_mmu_page *)page_private(page); +} + +static inline u16 read_fs(void) +{ + u16 seg; + asm("mov %%fs, %0" : "=g"(seg)); + return seg; +} + +static inline u16 read_gs(void) +{ + u16 seg; + asm("mov %%gs, %0" : "=g"(seg)); + return seg; +} + +static inline u16 read_ldt(void) +{ + u16 ldt; + asm("sldt %0" : "=g"(ldt)); + return ldt; +} + +static inline void load_fs(u16 sel) +{ + asm("mov %0, %%fs" : : "rm"(sel)); +} + +static inline void load_gs(u16 sel) +{ + asm("mov %0, %%gs" : : "rm"(sel)); +} + +#ifndef load_ldt +static inline void load_ldt(u16 sel) +{ + asm("lldt %0" : : "rm"(sel)); +} +#endif + +static inline void get_idt(struct descriptor_table *table) +{ + asm("sidt %0" : "=m"(*table)); +} + +static inline void get_gdt(struct descriptor_table *table) +{ + asm("sgdt %0" : "=m"(*table)); +} + +static inline unsigned long read_tr_base(void) +{ + u16 tr; + asm("str %0" : "=g"(tr)); + return segment_base(tr); +} + +#ifdef CONFIG_X86_64 +static inline unsigned long read_msr(unsigned long msr) +{ + u64 value; + + rdmsrl(msr, value); + return value; +} +#endif + +static inline void fx_save(struct i387_fxsave_struct *image) +{ + asm("fxsave (%0)":: "r" (image)); +} + +static inline void fx_restore(struct i387_fxsave_struct *image) +{ + asm("fxrstor (%0)":: "r" (image)); +} + +static inline void fpu_init(void) +{ + asm("finit"); +} + +static inline u32 get_rdx_init_val(void) +{ + return 0x600; /* P6 family */ +} + +static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) +{ + kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); +} + +#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" +#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" +#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" +#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" +#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" +#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" +#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" +#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" +#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" + +#define MSR_IA32_TIME_STAMP_COUNTER 0x010 + +#define TSS_IOPB_BASE_OFFSET 0x66 +#define TSS_BASE_SIZE 0x68 +#define TSS_IOPB_SIZE (65536 / 8) +#define TSS_REDIRECTION_SIZE (256 / 8) +#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) + +#endif diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h new file mode 100644 index 000000000000..c6f3fd8d8c53 --- /dev/null +++ b/include/asm-x86/kvm_para.h @@ -0,0 +1,105 @@ +#ifndef __X86_KVM_PARA_H +#define __X86_KVM_PARA_H + +/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It + * should be used to determine that a VM is running under KVM. + */ +#define KVM_CPUID_SIGNATURE 0x40000000 + +/* This CPUID returns a feature bitmap in eax. Before enabling a particular + * paravirtualization, the appropriate feature bit should be checked. + */ +#define KVM_CPUID_FEATURES 0x40000001 + +#ifdef __KERNEL__ +#include <asm/processor.h> + +/* This instruction is vmcall. On non-VT architectures, it will generate a + * trap that we will then rewrite to the appropriate instruction. + */ +#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" + +/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun + * instruction. The hypervisor may replace it with something else but only the + * instructions are guaranteed to be supported. + * + * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. + * The hypercall number should be placed in rax and the return value will be + * placed in rax. No other registers will be clobbered unless explicited + * noted by the particular hypercall. + */ + +static inline long kvm_hypercall0(unsigned int nr) +{ + long ret; + asm volatile(KVM_HYPERCALL + : "=a"(ret) + : "a"(nr)); + return ret; +} + +static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) +{ + long ret; + asm volatile(KVM_HYPERCALL + : "=a"(ret) + : "a"(nr), "b"(p1)); + return ret; +} + +static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, + unsigned long p2) +{ + long ret; + asm volatile(KVM_HYPERCALL + : "=a"(ret) + : "a"(nr), "b"(p1), "c"(p2)); + return ret; +} + +static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3) +{ + long ret; + asm volatile(KVM_HYPERCALL + : "=a"(ret) + : "a"(nr), "b"(p1), "c"(p2), "d"(p3)); + return ret; +} + +static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4) +{ + long ret; + asm volatile(KVM_HYPERCALL + : "=a"(ret) + : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)); + return ret; +} + +static inline int kvm_para_available(void) +{ + unsigned int eax, ebx, ecx, edx; + char signature[13]; + + cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); + memcpy(signature + 0, &ebx, 4); + memcpy(signature + 4, &ecx, 4); + memcpy(signature + 8, &edx, 4); + signature[12] = 0; + + if (strcmp(signature, "KVMKVMKVM") == 0) + return 1; + + return 0; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return cpuid_eax(KVM_CPUID_FEATURES); +} + +#endif + +#endif diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h new file mode 100644 index 000000000000..7db91b9bdcd4 --- /dev/null +++ b/include/asm-x86/kvm_x86_emulate.h @@ -0,0 +1,186 @@ +/****************************************************************************** + * x86_emulate.h + * + * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. + * + * Copyright (c) 2005 Keir Fraser + * + * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 + */ + +#ifndef __X86_EMULATE_H__ +#define __X86_EMULATE_H__ + +struct x86_emulate_ctxt; + +/* + * x86_emulate_ops: + * + * These operations represent the instruction emulator's interface to memory. + * There are two categories of operation: those that act on ordinary memory + * regions (*_std), and those that act on memory regions known to require + * special treatment or emulation (*_emulated). + * + * The emulator assumes that an instruction accesses only one 'emulated memory' + * location, that this location is the given linear faulting address (cr2), and + * that this is one of the instruction's data operands. Instruction fetches and + * stack operations are assumed never to access emulated memory. The emulator + * automatically deduces which operand of a string-move operation is accessing + * emulated memory, and assumes that the other operand accesses normal memory. + * + * NOTES: + * 1. The emulator isn't very smart about emulated vs. standard memory. + * 'Emulated memory' access addresses should be checked for sanity. + * 'Normal memory' accesses may fault, and the caller must arrange to + * detect and handle reentrancy into the emulator via recursive faults. + * Accesses may be unaligned and may cross page boundaries. + * 2. If the access fails (cannot emulate, or a standard access faults) then + * it is up to the memop to propagate the fault to the guest VM via + * some out-of-band mechanism, unknown to the emulator. The memop signals + * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will + * then immediately bail. + * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only + * cmpxchg8b_emulated need support 8-byte accesses. + * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system. + */ +/* Access completed successfully: continue emulation as normal. */ +#define X86EMUL_CONTINUE 0 +/* Access is unhandleable: bail from emulation and return error to caller. */ +#define X86EMUL_UNHANDLEABLE 1 +/* Terminate emulation but return success to the caller. */ +#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */ +#define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */ +#define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */ +struct x86_emulate_ops { + /* + * read_std: Read bytes of standard (non-emulated/special) memory. + * Used for instruction fetch, stack operations, and others. + * @addr: [IN ] Linear address from which to read. + * @val: [OUT] Value read from memory, zero-extended to 'u_long'. + * @bytes: [IN ] Number of bytes to read from memory. + */ + int (*read_std)(unsigned long addr, void *val, + unsigned int bytes, struct kvm_vcpu *vcpu); + + /* + * read_emulated: Read bytes from emulated/special memory area. + * @addr: [IN ] Linear address from which to read. + * @val: [OUT] Value read from memory, zero-extended to 'u_long'. + * @bytes: [IN ] Number of bytes to read from memory. + */ + int (*read_emulated) (unsigned long addr, + void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); + + /* + * write_emulated: Read bytes from emulated/special memory area. + * @addr: [IN ] Linear address to which to write. + * @val: [IN ] Value to write to memory (low-order bytes used as + * required). + * @bytes: [IN ] Number of bytes to write to memory. + */ + int (*write_emulated) (unsigned long addr, + const void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); + + /* + * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an + * emulated/special memory area. + * @addr: [IN ] Linear address to access. + * @old: [IN ] Value expected to be current at @addr. + * @new: [IN ] Value to write to @addr. + * @bytes: [IN ] Number of bytes to access using CMPXCHG. + */ + int (*cmpxchg_emulated) (unsigned long addr, + const void *old, + const void *new, + unsigned int bytes, + struct kvm_vcpu *vcpu); + +}; + +/* Type, address-of, and value of an instruction's operand. */ +struct operand { + enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; + unsigned int bytes; + unsigned long val, orig_val, *ptr; +}; + +struct fetch_cache { + u8 data[15]; + unsigned long start; + unsigned long end; +}; + +struct decode_cache { + u8 twobyte; + u8 b; + u8 lock_prefix; + u8 rep_prefix; + u8 op_bytes; + u8 ad_bytes; + u8 rex_prefix; + struct operand src; + struct operand dst; + unsigned long *override_base; + unsigned int d; + unsigned long regs[NR_VCPU_REGS]; + unsigned long eip; + /* modrm */ + u8 modrm; + u8 modrm_mod; + u8 modrm_reg; + u8 modrm_rm; + u8 use_modrm_ea; + unsigned long modrm_ea; + unsigned long modrm_val; + struct fetch_cache fetch; +}; + +struct x86_emulate_ctxt { + /* Register state before/after emulation. */ + struct kvm_vcpu *vcpu; + + /* Linear faulting address (if emulating a page-faulting instruction). */ + unsigned long eflags; + + /* Emulated execution mode, represented by an X86EMUL_MODE value. */ + int mode; + + unsigned long cs_base; + unsigned long ds_base; + unsigned long es_base; + unsigned long ss_base; + unsigned long gs_base; + unsigned long fs_base; + + /* decode cache */ + + struct decode_cache decode; +}; + +/* Repeat String Operation Prefix */ +#define REPE_PREFIX 1 +#define REPNE_PREFIX 2 + +/* Execution mode, passed to the emulator. */ +#define X86EMUL_MODE_REAL 0 /* Real mode. */ +#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ +#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ +#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ + +/* Host execution mode. */ +#if defined(__i386__) +#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 +#elif defined(CONFIG_X86_64) +#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 +#endif + +int x86_decode_insn(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops); +int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops); + +#endif /* __X86_EMULATE_H__ */ diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index ccd338460811..4d9367b72976 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h @@ -44,19 +44,19 @@ struct lguest_ro_state { /* Host information we need to restore when we switch back. */ u32 host_cr3; - struct Xgt_desc_struct host_idt_desc; - struct Xgt_desc_struct host_gdt_desc; + struct desc_ptr host_idt_desc; + struct desc_ptr host_gdt_desc; u32 host_sp; /* Fields which are used when guest is running. */ - struct Xgt_desc_struct guest_idt_desc; - struct Xgt_desc_struct guest_gdt_desc; - struct i386_hw_tss guest_tss; + struct desc_ptr guest_idt_desc; + struct desc_ptr guest_gdt_desc; + struct x86_hw_tss guest_tss; struct desc_struct guest_idt[IDT_ENTRIES]; struct desc_struct guest_gdt[GDT_ENTRIES]; }; -struct lguest_arch +struct lg_cpu_arch { /* The GDT entries copied into lguest_ro_state when running. */ struct desc_struct gdt[GDT_ENTRIES]; @@ -78,8 +78,8 @@ static inline void lguest_set_ts(void) } /* Full 4G segment descriptors, suitable for CS and DS. */ -#define FULL_EXEC_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9b00}) -#define FULL_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9300}) +#define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } }) +#define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } }) #endif /* __ASSEMBLY__ */ diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index 2091779e91fb..758b9a5d4539 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h @@ -4,7 +4,7 @@ #define LHCALL_FLUSH_ASYNC 0 #define LHCALL_LGUEST_INIT 1 -#define LHCALL_CRASH 2 +#define LHCALL_SHUTDOWN 2 #define LHCALL_LOAD_GDT 3 #define LHCALL_NEW_PGTABLE 4 #define LHCALL_FLUSH_TLB 5 @@ -20,6 +20,10 @@ #define LGUEST_TRAP_ENTRY 0x1F +/* Argument number 3 to LHCALL_LGUEST_SHUTDOWN */ +#define LGUEST_SHUTDOWN_POWEROFF 1 +#define LGUEST_SHUTDOWN_RESTART 2 + #ifndef __ASSEMBLY__ #include <asm/hw_irq.h> diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index 94b257fa8701..31739c7d66a9 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h @@ -1,5 +1,25 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#ifdef CONFIG_X86_64 +#define __ALIGN .p2align 4,,15 +#define __ALIGN_STR ".p2align 4,,15" +#endif + #ifdef CONFIG_X86_32 -# include "linkage_32.h" -#else -# include "linkage_64.h" +#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) +#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) +/* + * For 32-bit UML - mark functions implemented in assembly that use + * regparm input parameters: + */ +#define asmregparm __attribute__((regparm(3))) +#endif + +#ifdef CONFIG_X86_ALIGNMENT_16 +#define __ALIGN .align 16,0x90 +#define __ALIGN_STR ".align 16,0x90" +#endif + #endif + diff --git a/include/asm-x86/linkage_32.h b/include/asm-x86/linkage_32.h deleted file mode 100644 index f4a6ebac0247..000000000000 --- a/include/asm-x86/linkage_32.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASM_LINKAGE_H -#define __ASM_LINKAGE_H - -#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) -#define FASTCALL(x) x __attribute__((regparm(3))) -#define fastcall __attribute__((regparm(3))) - -#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) - -#ifdef CONFIG_X86_ALIGNMENT_16 -#define __ALIGN .align 16,0x90 -#define __ALIGN_STR ".align 16,0x90" -#endif - -#endif diff --git a/include/asm-x86/linkage_64.h b/include/asm-x86/linkage_64.h deleted file mode 100644 index b5f39d0189ce..000000000000 --- a/include/asm-x86/linkage_64.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_LINKAGE_H -#define __ASM_LINKAGE_H - -#define __ALIGN .p2align 4,,15 - -#endif diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index c7a1b1c66c96..f852c62b3319 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h @@ -1,5 +1,240 @@ -#ifdef CONFIG_X86_32 -# include "local_32.h" -#else -# include "local_64.h" +#ifndef _ARCH_LOCAL_H +#define _ARCH_LOCAL_H + +#include <linux/percpu.h> + +#include <asm/system.h> +#include <asm/atomic.h> +#include <asm/asm.h> + +typedef struct { + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l, i) atomic_long_set(&(l)->a, (i)) + +static inline void local_inc(local_t *l) +{ + __asm__ __volatile__( + _ASM_INC "%0" + :"+m" (l->a.counter)); +} + +static inline void local_dec(local_t *l) +{ + __asm__ __volatile__( + _ASM_DEC "%0" + :"+m" (l->a.counter)); +} + +static inline void local_add(long i, local_t *l) +{ + __asm__ __volatile__( + _ASM_ADD "%1,%0" + :"+m" (l->a.counter) + :"ir" (i)); +} + +static inline void local_sub(long i, local_t *l) +{ + __asm__ __volatile__( + _ASM_SUB "%1,%0" + :"+m" (l->a.counter) + :"ir" (i)); +} + +/** + * local_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @l: pointer to type local_t + * + * Atomically subtracts @i from @l and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline int local_sub_and_test(long i, local_t *l) +{ + unsigned char c; + + __asm__ __volatile__( + _ASM_SUB "%2,%0; sete %1" + :"+m" (l->a.counter), "=qm" (c) + :"ir" (i) : "memory"); + return c; +} + +/** + * local_dec_and_test - decrement and test + * @l: pointer to type local_t + * + * Atomically decrements @l by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline int local_dec_and_test(local_t *l) +{ + unsigned char c; + + __asm__ __volatile__( + _ASM_DEC "%0; sete %1" + :"+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * local_inc_and_test - increment and test + * @l: pointer to type local_t + * + * Atomically increments @l by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int local_inc_and_test(local_t *l) +{ + unsigned char c; + + __asm__ __volatile__( + _ASM_INC "%0; sete %1" + :"+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * local_add_negative - add and test if negative + * @i: integer value to add + * @l: pointer to type local_t + * + * Atomically adds @i to @l and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline int local_add_negative(long i, local_t *l) +{ + unsigned char c; + + __asm__ __volatile__( + _ASM_ADD "%2,%0; sets %1" + :"+m" (l->a.counter), "=qm" (c) + :"ir" (i) : "memory"); + return c; +} + +/** + * local_add_return - add and return + * @i: integer value to add + * @l: pointer to type local_t + * + * Atomically adds @i to @l and returns @i + @l + */ +static inline long local_add_return(long i, local_t *l) +{ + long __i; +#ifdef CONFIG_M386 + unsigned long flags; + if (unlikely(boot_cpu_data.x86 <= 3)) + goto no_xadd; #endif + /* Modern 486+ processor */ + __i = i; + __asm__ __volatile__( + _ASM_XADD "%0, %1;" + :"+r" (i), "+m" (l->a.counter) + : : "memory"); + return i + __i; + +#ifdef CONFIG_M386 +no_xadd: /* Legacy 386 processor */ + local_irq_save(flags); + __i = local_read(l); + local_set(l, i + __i); + local_irq_restore(flags); + return i + __i; +#endif +} + +static inline long local_sub_return(long i, local_t *l) +{ + return local_add_return(-i, l); +} + +#define local_inc_return(l) (local_add_return(1, l)) +#define local_dec_return(l) (local_sub_return(1, l)) + +#define local_cmpxchg(l, o, n) \ + (cmpxchg_local(&((l)->a.counter), (o), (n))) +/* Always has a lock prefix */ +#define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +#define local_add_unless(l, a, u) \ +({ \ + long c, old; \ + c = local_read(l); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = local_cmpxchg((l), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ +}) +#define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +/* On x86_32, these are no better than the atomic variants. + * On x86-64 these are better than the atomic variants on SMP kernels + * because they dont use a lock prefix. + */ +#define __local_inc(l) local_inc(l) +#define __local_dec(l) local_dec(l) +#define __local_add(i, l) local_add((i), (l)) +#define __local_sub(i, l) local_sub((i), (l)) + +/* Use these for per-cpu local_t variables: on some archs they are + * much more efficient than these naive implementations. Note they take + * a variable, not an address. + * + * X86_64: This could be done better if we moved the per cpu data directly + * after GS. + */ + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(l) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (l); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(l) \ + ({ preempt_disable(); \ + l; \ + preempt_enable(); }) \ + +#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) +#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) +#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) +#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) +#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) +#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) + +#define __cpu_local_inc(l) cpu_local_inc(l) +#define __cpu_local_dec(l) cpu_local_dec(l) +#define __cpu_local_add(i, l) cpu_local_add((i), (l)) +#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) + +#endif /* _ARCH_LOCAL_H */ diff --git a/include/asm-x86/local_32.h b/include/asm-x86/local_32.h deleted file mode 100644 index 6e85975b9ed2..000000000000 --- a/include/asm-x86/local_32.h +++ /dev/null @@ -1,233 +0,0 @@ -#ifndef _ARCH_I386_LOCAL_H -#define _ARCH_I386_LOCAL_H - -#include <linux/percpu.h> -#include <asm/system.h> -#include <asm/atomic.h> - -typedef struct -{ - atomic_long_t a; -} local_t; - -#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } - -#define local_read(l) atomic_long_read(&(l)->a) -#define local_set(l,i) atomic_long_set(&(l)->a, (i)) - -static __inline__ void local_inc(local_t *l) -{ - __asm__ __volatile__( - "incl %0" - :"+m" (l->a.counter)); -} - -static __inline__ void local_dec(local_t *l) -{ - __asm__ __volatile__( - "decl %0" - :"+m" (l->a.counter)); -} - -static __inline__ void local_add(long i, local_t *l) -{ - __asm__ __volatile__( - "addl %1,%0" - :"+m" (l->a.counter) - :"ir" (i)); -} - -static __inline__ void local_sub(long i, local_t *l) -{ - __asm__ __volatile__( - "subl %1,%0" - :"+m" (l->a.counter) - :"ir" (i)); -} - -/** - * local_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @l: pointer of type local_t - * - * Atomically subtracts @i from @l and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int local_sub_and_test(long i, local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "subl %2,%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * local_dec_and_test - decrement and test - * @l: pointer of type local_t - * - * Atomically decrements @l by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int local_dec_and_test(local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "decl %0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * local_inc_and_test - increment and test - * @l: pointer of type local_t - * - * Atomically increments @l by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int local_inc_and_test(local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "incl %0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * local_add_negative - add and test if negative - * @l: pointer of type local_t - * @i: integer value to add - * - * Atomically adds @i to @l and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int local_add_negative(long i, local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "addl %2,%0; sets %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * local_add_return - add and return - * @l: pointer of type local_t - * @i: integer value to add - * - * Atomically adds @i to @l and returns @i + @l - */ -static __inline__ long local_add_return(long i, local_t *l) -{ - long __i; -#ifdef CONFIG_M386 - unsigned long flags; - if(unlikely(boot_cpu_data.x86 <= 3)) - goto no_xadd; -#endif - /* Modern 486+ processor */ - __i = i; - __asm__ __volatile__( - "xaddl %0, %1;" - :"+r" (i), "+m" (l->a.counter) - : : "memory"); - return i + __i; - -#ifdef CONFIG_M386 -no_xadd: /* Legacy 386 processor */ - local_irq_save(flags); - __i = local_read(l); - local_set(l, i + __i); - local_irq_restore(flags); - return i + __i; -#endif -} - -static __inline__ long local_sub_return(long i, local_t *l) -{ - return local_add_return(-i,l); -} - -#define local_inc_return(l) (local_add_return(1,l)) -#define local_dec_return(l) (local_sub_return(1,l)) - -#define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) -/* Always has a lock prefix */ -#define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) - -/** - * local_add_unless - add unless the number is a given value - * @l: pointer of type local_t - * @a: the amount to add to l... - * @u: ...unless l is equal to u. - * - * Atomically adds @a to @l, so long as it was not @u. - * Returns non-zero if @l was not @u, and zero otherwise. - */ -#define local_add_unless(l, a, u) \ -({ \ - long c, old; \ - c = local_read(l); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = local_cmpxchg((l), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define local_inc_not_zero(l) local_add_unless((l), 1, 0) - -/* On x86, these are no better than the atomic variants. */ -#define __local_inc(l) local_inc(l) -#define __local_dec(l) local_dec(l) -#define __local_add(i,l) local_add((i),(l)) -#define __local_sub(i,l) local_sub((i),(l)) - -/* Use these for per-cpu local_t variables: on some archs they are - * much more efficient than these naive implementations. Note they take - * a variable, not an address. - */ - -/* Need to disable preemption for the cpu local counters otherwise we could - still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(l) \ - ({ local_t res__; \ - preempt_disable(); \ - res__ = (l); \ - preempt_enable(); \ - res__; }) -#define cpu_local_wrap(l) \ - ({ preempt_disable(); \ - l; \ - preempt_enable(); }) \ - -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) - -#define __cpu_local_inc(l) cpu_local_inc(l) -#define __cpu_local_dec(l) cpu_local_dec(l) -#define __cpu_local_add(i, l) cpu_local_add((i), (l)) -#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) - -#endif /* _ARCH_I386_LOCAL_H */ diff --git a/include/asm-x86/local_64.h b/include/asm-x86/local_64.h deleted file mode 100644 index e87492bb0693..000000000000 --- a/include/asm-x86/local_64.h +++ /dev/null @@ -1,222 +0,0 @@ -#ifndef _ARCH_X8664_LOCAL_H -#define _ARCH_X8664_LOCAL_H - -#include <linux/percpu.h> -#include <asm/atomic.h> - -typedef struct -{ - atomic_long_t a; -} local_t; - -#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } - -#define local_read(l) atomic_long_read(&(l)->a) -#define local_set(l,i) atomic_long_set(&(l)->a, (i)) - -static inline void local_inc(local_t *l) -{ - __asm__ __volatile__( - "incq %0" - :"=m" (l->a.counter) - :"m" (l->a.counter)); -} - -static inline void local_dec(local_t *l) -{ - __asm__ __volatile__( - "decq %0" - :"=m" (l->a.counter) - :"m" (l->a.counter)); -} - -static inline void local_add(long i, local_t *l) -{ - __asm__ __volatile__( - "addq %1,%0" - :"=m" (l->a.counter) - :"ir" (i), "m" (l->a.counter)); -} - -static inline void local_sub(long i, local_t *l) -{ - __asm__ __volatile__( - "subq %1,%0" - :"=m" (l->a.counter) - :"ir" (i), "m" (l->a.counter)); -} - -/** - * local_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @l: pointer to type local_t - * - * Atomically subtracts @i from @l and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int local_sub_and_test(long i, local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "subq %2,%0; sete %1" - :"=m" (l->a.counter), "=qm" (c) - :"ir" (i), "m" (l->a.counter) : "memory"); - return c; -} - -/** - * local_dec_and_test - decrement and test - * @l: pointer to type local_t - * - * Atomically decrements @l by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int local_dec_and_test(local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "decq %0; sete %1" - :"=m" (l->a.counter), "=qm" (c) - :"m" (l->a.counter) : "memory"); - return c != 0; -} - -/** - * local_inc_and_test - increment and test - * @l: pointer to type local_t - * - * Atomically increments @l by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int local_inc_and_test(local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "incq %0; sete %1" - :"=m" (l->a.counter), "=qm" (c) - :"m" (l->a.counter) : "memory"); - return c != 0; -} - -/** - * local_add_negative - add and test if negative - * @i: integer value to add - * @l: pointer to type local_t - * - * Atomically adds @i to @l and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int local_add_negative(long i, local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "addq %2,%0; sets %1" - :"=m" (l->a.counter), "=qm" (c) - :"ir" (i), "m" (l->a.counter) : "memory"); - return c; -} - -/** - * local_add_return - add and return - * @i: integer value to add - * @l: pointer to type local_t - * - * Atomically adds @i to @l and returns @i + @l - */ -static __inline__ long local_add_return(long i, local_t *l) -{ - long __i = i; - __asm__ __volatile__( - "xaddq %0, %1;" - :"+r" (i), "+m" (l->a.counter) - : : "memory"); - return i + __i; -} - -static __inline__ long local_sub_return(long i, local_t *l) -{ - return local_add_return(-i,l); -} - -#define local_inc_return(l) (local_add_return(1,l)) -#define local_dec_return(l) (local_sub_return(1,l)) - -#define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) -/* Always has a lock prefix */ -#define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) - -/** - * atomic_up_add_unless - add unless the number is a given value - * @l: pointer of type local_t - * @a: the amount to add to l... - * @u: ...unless l is equal to u. - * - * Atomically adds @a to @l, so long as it was not @u. - * Returns non-zero if @l was not @u, and zero otherwise. - */ -#define local_add_unless(l, a, u) \ -({ \ - long c, old; \ - c = local_read(l); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = local_cmpxchg((l), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define local_inc_not_zero(l) local_add_unless((l), 1, 0) - -/* On x86-64 these are better than the atomic variants on SMP kernels - because they dont use a lock prefix. */ -#define __local_inc(l) local_inc(l) -#define __local_dec(l) local_dec(l) -#define __local_add(i,l) local_add((i),(l)) -#define __local_sub(i,l) local_sub((i),(l)) - -/* Use these for per-cpu local_t variables: on some archs they are - * much more efficient than these naive implementations. Note they take - * a variable, not an address. - * - * This could be done better if we moved the per cpu data directly - * after GS. - */ - -/* Need to disable preemption for the cpu local counters otherwise we could - still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(l) \ - ({ local_t res__; \ - preempt_disable(); \ - res__ = (l); \ - preempt_enable(); \ - res__; }) -#define cpu_local_wrap(l) \ - ({ preempt_disable(); \ - l; \ - preempt_enable(); }) \ - -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) - -#define __cpu_local_inc(l) cpu_local_inc(l) -#define __cpu_local_dec(l) cpu_local_dec(l) -#define __cpu_local_add(i, l) cpu_local_add((i), (l)) -#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) - -#endif /* _ARCH_X8664_LOCAL_H */ diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index ebd319f838ab..6df235e8ea91 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h @@ -110,13 +110,13 @@ static inline int cpu_to_logical_apicid(int cpu) } static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) + struct mpc_config_translation *translation_record) { - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); + printk("Processor #%d %u:%u APIC version %d\n", + m->mpc_apicid, + (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, + m->mpc_apicver); return m->mpc_apicid; } diff --git a/include/asm-x86/mach-default/apm.h b/include/asm-x86/mach-default/apm.h index 1f730b8bd1fd..989f34c37d32 100644 --- a/include/asm-x86/mach-default/apm.h +++ b/include/asm-x86/mach-default/apm.h @@ -1,6 +1,4 @@ /* - * include/asm-i386/mach-default/apm.h - * * Machine specific APM BIOS functions for generic. * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> */ diff --git a/include/asm-x86/mach-default/io_ports.h b/include/asm-x86/mach-default/io_ports.h deleted file mode 100644 index 48540ba97166..000000000000 --- a/include/asm-x86/mach-default/io_ports.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * arch/i386/mach-generic/io_ports.h - * - * Machine specific IO port address definition for generic. - * Written by Osamu Tomita <tomita@cinet.co.jp> - */ -#ifndef _MACH_IO_PORTS_H -#define _MACH_IO_PORTS_H - -/* i8259A PIC registers */ -#define PIC_MASTER_CMD 0x20 -#define PIC_MASTER_IMR 0x21 -#define PIC_MASTER_ISR PIC_MASTER_CMD -#define PIC_MASTER_POLL PIC_MASTER_ISR -#define PIC_MASTER_OCW3 PIC_MASTER_ISR -#define PIC_SLAVE_CMD 0xa0 -#define PIC_SLAVE_IMR 0xa1 - -/* i8259A PIC related value */ -#define PIC_CASCADE_IR 2 -#define MASTER_ICW4_DEFAULT 0x01 -#define SLAVE_ICW4_DEFAULT 0x01 -#define PIC_ICW4_AEOI 2 - -#endif /* !_MACH_IO_PORTS_H */ diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 6db1c3babe9a..e3c2c1012c1c 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -89,15 +89,15 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) return physid_mask_of_physid(phys_apicid); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return (m->mpc_apicid); +static inline int mpc_apic_id(struct mpc_config_processor *m, + struct mpc_config_translation *translation_record) +{ + printk("Processor #%d %u:%u APIC version %d\n", + m->mpc_apicid, + (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, + m->mpc_apicver); + return m->mpc_apicid; } static inline void setup_portio_remap(void) diff --git a/include/asm-x86/mach-default/mach_time.h b/include/asm-x86/mach-default/mach_time.h deleted file mode 100644 index 31eb5de6f3dc..000000000000 --- a/include/asm-x86/mach-default/mach_time.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * include/asm-i386/mach-default/mach_time.h - * - * Machine specific set RTC function for generic. - * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp> - */ -#ifndef _MACH_TIME_H -#define _MACH_TIME_H - -#include <linux/mc146818rtc.h> - -/* for check timing call set_rtc_mmss() 500ms */ -/* used in arch/i386/time.c::do_timer_interrupt() */ -#define USEC_AFTER 500000 -#define USEC_BEFORE 500000 - -/* - * In order to set the CMOS clock precisely, set_rtc_mmss has to be - * called 500 ms after the second nowtime has started, because when - * nowtime is written into the registers of the CMOS clock, it will - * jump to the next second precisely 500 ms later. Check the Motorola - * MC146818A or Dallas DS12887 data sheet for details. - * - * BUG: This routine does not handle hour overflow properly; it just - * sets the minutes. Usually you'll only notice that after reboot! - */ -static inline int mach_set_rtc_mmss(unsigned long nowtime) -{ - int retval = 0; - int real_seconds, real_minutes, cmos_minutes; - unsigned char save_control, save_freq_select; - - save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - cmos_minutes = CMOS_READ(RTC_MINUTES); - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - BCD_TO_BIN(cmos_minutes); - - /* - * since we're only adjusting minutes and seconds, - * don't interfere with hour overflow. This avoids - * messing with unknown time zones but requires your - * RTC not to be off by more than 15 minutes - */ - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) - real_minutes += 30; /* correct for half hour time zone */ - real_minutes %= 60; - - if (abs(real_minutes - cmos_minutes) < 30) { - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(real_seconds); - BIN_TO_BCD(real_minutes); - } - CMOS_WRITE(real_seconds,RTC_SECONDS); - CMOS_WRITE(real_minutes,RTC_MINUTES); - } else { - printk(KERN_WARNING - "set_rtc_mmss: can't update from %d to %d\n", - cmos_minutes, real_minutes); - retval = -1; - } - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - - return retval; -} - -static inline unsigned long mach_get_cmos_time(void) -{ - unsigned int year, mon, day, hour, min, sec; - - do { - sec = CMOS_READ(RTC_SECONDS); - min = CMOS_READ(RTC_MINUTES); - hour = CMOS_READ(RTC_HOURS); - day = CMOS_READ(RTC_DAY_OF_MONTH); - mon = CMOS_READ(RTC_MONTH); - year = CMOS_READ(RTC_YEAR); - } while (sec != CMOS_READ(RTC_SECONDS)); - - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hour); - BCD_TO_BIN(day); - BCD_TO_BIN(mon); - BCD_TO_BIN(year); - } - - year += 1900; - if (year < 1970) - year += 100; - - return mktime(year, mon, day, hour, min, sec); -} - -#endif /* !_MACH_TIME_H */ diff --git a/include/asm-x86/mach-default/mach_timer.h b/include/asm-x86/mach-default/mach_timer.h index 807992fd4171..4b76e536cd98 100644 --- a/include/asm-x86/mach-default/mach_timer.h +++ b/include/asm-x86/mach-default/mach_timer.h @@ -1,6 +1,4 @@ /* - * include/asm-i386/mach-default/mach_timer.h - * * Machine specific calibrate_tsc() for generic. * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp> */ diff --git a/include/asm-x86/mach-default/mach_traps.h b/include/asm-x86/mach-default/mach_traps.h index 625438b8a6eb..2fe7705c0484 100644 --- a/include/asm-x86/mach-default/mach_traps.h +++ b/include/asm-x86/mach-default/mach_traps.h @@ -1,6 +1,4 @@ /* - * include/asm-i386/mach-default/mach_traps.h - * * Machine specific NMI handling for generic. * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> */ diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index caec64be516d..d23011fdf454 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h @@ -131,11 +131,11 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) { - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); + printk("Processor #%d %u:%u APIC version %d\n", + m->mpc_apicid, + (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, + m->mpc_apicver); return (m->mpc_apicid); } diff --git a/include/asm-x86/mach-generic/gpio.h b/include/asm-x86/mach-generic/gpio.h new file mode 100644 index 000000000000..5305dcb96df2 --- /dev/null +++ b/include/asm-x86/mach-generic/gpio.h @@ -0,0 +1,15 @@ +#ifndef __ASM_MACH_GENERIC_GPIO_H +#define __ASM_MACH_GENERIC_GPIO_H + +int gpio_request(unsigned gpio, const char *label); +void gpio_free(unsigned gpio); +int gpio_direction_input(unsigned gpio); +int gpio_direction_output(unsigned gpio, int value); +int gpio_get_value(unsigned gpio); +void gpio_set_value(unsigned gpio, int value); +int gpio_to_irq(unsigned gpio); +int irq_to_gpio(unsigned irq); + +#include <asm-generic/gpio.h> /* cansleep wrappers */ + +#endif /* __ASM_MACH_GENERIC_GPIO_H */ diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h index 5e5e7dd2692e..17e183bd39c1 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/mach-numaq/mach_apic.h @@ -101,11 +101,11 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, int quad = translation_record->trans_quad; int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); - printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver, quad, logical_apicid); + printk("Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", + m->mpc_apicid, + (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, + m->mpc_apicver, quad, logical_apicid); return logical_apicid; } diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h new file mode 100644 index 000000000000..db31b929b990 --- /dev/null +++ b/include/asm-x86/mach-rdc321x/gpio.h @@ -0,0 +1,56 @@ +#ifndef _RDC321X_GPIO_H +#define _RDC321X_GPIO_H + +extern int rdc_gpio_get_value(unsigned gpio); +extern void rdc_gpio_set_value(unsigned gpio, int value); +extern int rdc_gpio_direction_input(unsigned gpio); +extern int rdc_gpio_direction_output(unsigned gpio, int value); + + +/* Wrappers for the arch-neutral GPIO API */ + +static inline int gpio_request(unsigned gpio, const char *label) +{ + /* Not yet implemented */ + return 0; +} + +static inline void gpio_free(unsigned gpio) +{ + /* Not yet implemented */ +} + +static inline int gpio_direction_input(unsigned gpio) +{ + return rdc_gpio_direction_input(gpio); +} + +static inline int gpio_direction_output(unsigned gpio, int value) +{ + return rdc_gpio_direction_output(gpio, value); +} + +static inline int gpio_get_value(unsigned gpio) +{ + return rdc_gpio_get_value(gpio); +} + +static inline void gpio_set_value(unsigned gpio, int value) +{ + rdc_gpio_set_value(gpio, value); +} + +static inline int gpio_to_irq(unsigned gpio) +{ + return gpio; +} + +static inline int irq_to_gpio(unsigned irq) +{ + return irq; +} + +/* For cansleep */ +#include <asm-generic/gpio.h> + +#endif /* _RDC321X_GPIO_H_ */ diff --git a/include/asm-x86/mach-rdc321x/rdc321x_defs.h b/include/asm-x86/mach-rdc321x/rdc321x_defs.h new file mode 100644 index 000000000000..838ba8f64fd3 --- /dev/null +++ b/include/asm-x86/mach-rdc321x/rdc321x_defs.h @@ -0,0 +1,6 @@ +#define PFX "rdc321x: " + +/* General purpose configuration and data registers */ +#define RDC3210_CFGREG_ADDR 0x0CF8 +#define RDC3210_CFGREG_DATA 0x0CFC +#define RDC_MAX_GPIO 0x3A diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 732f776aab8e..062c97f6100b 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h @@ -126,15 +126,15 @@ static inline physid_mask_t apicid_to_cpu_present(int apicid) return physid_mask_of_physid(0); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return (m->mpc_apicid); +static inline int mpc_apic_id(struct mpc_config_processor *m, + struct mpc_config_translation *translation_record) +{ + printk("Processor #%d %u:%u APIC version %d\n", + m->mpc_apicid, + (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, + m->mpc_apicver); + return m->mpc_apicid; } static inline void setup_portio_remap(void) diff --git a/include/asm-x86/math_emu.h b/include/asm-x86/math_emu.h index a4b0aa3320e6..9bf4ae93ab10 100644 --- a/include/asm-x86/math_emu.h +++ b/include/asm-x86/math_emu.h @@ -1,11 +1,6 @@ #ifndef _I386_MATH_EMU_H #define _I386_MATH_EMU_H -#include <asm/sigcontext.h> - -int restore_i387_soft(void *s387, struct _fpstate __user *buf); -int save_i387_soft(void *s387, struct _fpstate __user *buf); - /* This structure matches the layout of the data saved to the stack following a device-not-present interrupt, part of it saved automatically by the 80386/80486. diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h index 5c2bb66caf17..cdd9f965835a 100644 --- a/include/asm-x86/mc146818rtc.h +++ b/include/asm-x86/mc146818rtc.h @@ -1,5 +1,100 @@ -#ifdef CONFIG_X86_32 -# include "mc146818rtc_32.h" +/* + * Machine dependent access functions for RTC registers. + */ +#ifndef _ASM_MC146818RTC_H +#define _ASM_MC146818RTC_H + +#include <asm/io.h> +#include <asm/system.h> +#include <asm/processor.h> +#include <linux/mc146818rtc.h> + +#ifndef RTC_PORT +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ +#endif + +#if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG) +/* + * This lock provides nmi access to the CMOS/RTC registers. It has some + * special properties. It is owned by a CPU and stores the index register + * currently being accessed (if owned). The idea here is that it works + * like a normal lock (normally). However, in an NMI, the NMI code will + * first check to see if its CPU owns the lock, meaning that the NMI + * interrupted during the read/write of the device. If it does, it goes ahead + * and performs the access and then restores the index register. If it does + * not, it locks normally. + * + * Note that since we are working with NMIs, we need this lock even in + * a non-SMP machine just to mark that the lock is owned. + * + * This only works with compare-and-swap. There is no other way to + * atomically claim the lock and set the owner. + */ +#include <linux/smp.h> +extern volatile unsigned long cmos_lock; + +/* + * All of these below must be called with interrupts off, preempt + * disabled, etc. + */ + +static inline void lock_cmos(unsigned char reg) +{ + unsigned long new; + new = ((smp_processor_id()+1) << 8) | reg; + for (;;) { + if (cmos_lock) { + cpu_relax(); + continue; + } + if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) + return; + } +} + +static inline void unlock_cmos(void) +{ + cmos_lock = 0; +} +static inline int do_i_have_lock_cmos(void) +{ + return (cmos_lock >> 8) == (smp_processor_id()+1); +} +static inline unsigned char current_lock_cmos_reg(void) +{ + return cmos_lock & 0xff; +} +#define lock_cmos_prefix(reg) \ + do { \ + unsigned long cmos_flags; \ + local_irq_save(cmos_flags); \ + lock_cmos(reg) +#define lock_cmos_suffix(reg) \ + unlock_cmos(); \ + local_irq_restore(cmos_flags); \ + } while (0) #else -# include "mc146818rtc_64.h" +#define lock_cmos_prefix(reg) do {} while (0) +#define lock_cmos_suffix(reg) do {} while (0) +#define lock_cmos(reg) +#define unlock_cmos() +#define do_i_have_lock_cmos() 0 +#define current_lock_cmos_reg() 0 #endif + +/* + * The yet supported machines all access the RTC index register via + * an ISA port access but the way to access the date register differs ... + */ +#define CMOS_READ(addr) rtc_cmos_read(addr) +#define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) +unsigned char rtc_cmos_read(unsigned char addr); +void rtc_cmos_write(unsigned char val, unsigned char addr); + +extern int mach_set_rtc_mmss(unsigned long nowtime); +extern unsigned long mach_get_cmos_time(void); + +#define RTC_IRQ 8 + +#endif /* _ASM_MC146818RTC_H */ diff --git a/include/asm-x86/mc146818rtc_32.h b/include/asm-x86/mc146818rtc_32.h deleted file mode 100644 index 1613b42eaf58..000000000000 --- a/include/asm-x86/mc146818rtc_32.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Machine dependent access functions for RTC registers. - */ -#ifndef _ASM_MC146818RTC_H -#define _ASM_MC146818RTC_H - -#include <asm/io.h> -#include <asm/system.h> -#include <asm/processor.h> -#include <linux/mc146818rtc.h> - -#ifndef RTC_PORT -#define RTC_PORT(x) (0x70 + (x)) -#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ -#endif - -#ifdef __HAVE_ARCH_CMPXCHG -/* - * This lock provides nmi access to the CMOS/RTC registers. It has some - * special properties. It is owned by a CPU and stores the index register - * currently being accessed (if owned). The idea here is that it works - * like a normal lock (normally). However, in an NMI, the NMI code will - * first check to see if its CPU owns the lock, meaning that the NMI - * interrupted during the read/write of the device. If it does, it goes ahead - * and performs the access and then restores the index register. If it does - * not, it locks normally. - * - * Note that since we are working with NMIs, we need this lock even in - * a non-SMP machine just to mark that the lock is owned. - * - * This only works with compare-and-swap. There is no other way to - * atomically claim the lock and set the owner. - */ -#include <linux/smp.h> -extern volatile unsigned long cmos_lock; - -/* - * All of these below must be called with interrupts off, preempt - * disabled, etc. - */ - -static inline void lock_cmos(unsigned char reg) -{ - unsigned long new; - new = ((smp_processor_id()+1) << 8) | reg; - for (;;) { - if (cmos_lock) { - cpu_relax(); - continue; - } - if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) - return; - } -} - -static inline void unlock_cmos(void) -{ - cmos_lock = 0; -} -static inline int do_i_have_lock_cmos(void) -{ - return (cmos_lock >> 8) == (smp_processor_id()+1); -} -static inline unsigned char current_lock_cmos_reg(void) -{ - return cmos_lock & 0xff; -} -#define lock_cmos_prefix(reg) \ - do { \ - unsigned long cmos_flags; \ - local_irq_save(cmos_flags); \ - lock_cmos(reg) -#define lock_cmos_suffix(reg) \ - unlock_cmos(); \ - local_irq_restore(cmos_flags); \ - } while (0) -#else -#define lock_cmos_prefix(reg) do {} while (0) -#define lock_cmos_suffix(reg) do {} while (0) -#define lock_cmos(reg) -#define unlock_cmos() -#define do_i_have_lock_cmos() 0 -#define current_lock_cmos_reg() 0 -#endif - -/* - * The yet supported machines all access the RTC index register via - * an ISA port access but the way to access the date register differs ... - */ -#define CMOS_READ(addr) rtc_cmos_read(addr) -#define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) -unsigned char rtc_cmos_read(unsigned char addr); -void rtc_cmos_write(unsigned char val, unsigned char addr); - -#define RTC_IRQ 8 - -#endif /* _ASM_MC146818RTC_H */ diff --git a/include/asm-x86/mc146818rtc_64.h b/include/asm-x86/mc146818rtc_64.h deleted file mode 100644 index d6e3009430c1..000000000000 --- a/include/asm-x86/mc146818rtc_64.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Machine dependent access functions for RTC registers. - */ -#ifndef _ASM_MC146818RTC_H -#define _ASM_MC146818RTC_H - -#include <asm/io.h> - -#ifndef RTC_PORT -#define RTC_PORT(x) (0x70 + (x)) -#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ -#endif - -/* - * The yet supported machines all access the RTC index register via - * an ISA port access but the way to access the date register differs ... - */ -#define CMOS_READ(addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -inb_p(RTC_PORT(1)); \ -}) -#define CMOS_WRITE(val, addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -outb_p((val),RTC_PORT(1)); \ -}) - -#define RTC_IRQ 8 - -#endif /* _ASM_MC146818RTC_H */ diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h index df304fd89c27..94f1fd79e22a 100644 --- a/include/asm-x86/mce.h +++ b/include/asm-x86/mce.h @@ -13,7 +13,7 @@ #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ -#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */ +#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ #define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */ #define MCI_STATUS_VAL (1UL<<63) /* valid error */ @@ -30,7 +30,7 @@ struct mce { __u64 misc; __u64 addr; __u64 mcgstatus; - __u64 rip; + __u64 ip; __u64 tsc; /* cpu time stamp counter */ __u64 res1; /* for future extension */ __u64 res2; /* dito. */ @@ -85,14 +85,7 @@ struct mce_log { #ifdef __KERNEL__ #ifdef CONFIG_X86_32 -#ifdef CONFIG_X86_MCE -extern void mcheck_init(struct cpuinfo_x86 *c); -#else -#define mcheck_init(c) do {} while(0) -#endif - extern int mce_disabled; - #else /* CONFIG_X86_32 */ #include <asm/atomic.h> @@ -121,6 +114,13 @@ extern int mce_notify_user(void); #endif /* !CONFIG_X86_32 */ + + +#ifdef CONFIG_X86_MCE +extern void mcheck_init(struct cpuinfo_x86 *c); +#else +#define mcheck_init(c) do { } while (0) +#endif extern void stop_mce(void); extern void restart_mce(void); diff --git a/include/asm-x86/mmsegment.h b/include/asm-x86/mmsegment.h deleted file mode 100644 index d3f80c996330..000000000000 --- a/include/asm-x86/mmsegment.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_MMSEGMENT_H -#define _ASM_MMSEGMENT_H 1 - -typedef struct { - unsigned long seg; -} mm_segment_t; - -#endif diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index 3f922c8e1c88..efa962c38897 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h @@ -20,4 +20,12 @@ typedef struct { void *vdso; } mm_context_t; +#ifdef CONFIG_SMP +void leave_mm(int cpu); +#else +static inline void leave_mm(int cpu) +{ +} +#endif + #endif /* _ASM_X86_MMU_H */ diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 7eb0b0b1fb3c..8198d1cca1f3 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h @@ -32,8 +32,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) #endif } -void leave_mm(unsigned long cpu); - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index 0cce83a78378..ad6dc821ef9e 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h @@ -7,7 +7,9 @@ #include <asm/pda.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> +#ifndef CONFIG_PARAVIRT #include <asm-generic/mm_hooks.h> +#endif /* * possibly do the LDT unload here? @@ -23,11 +25,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) #endif } -static inline void load_cr3(pgd_t *pgd) -{ - asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory"); -} - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -43,20 +40,20 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, load_cr3(next->pgd); if (unlikely(next->context.ldt != prev->context.ldt)) - load_LDT_nolock(&next->context, cpu); + load_LDT_nolock(&next->context); } #ifdef CONFIG_SMP else { write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) - out_of_line_bug(); + BUG(); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ load_cr3(next->pgd); - load_LDT_nolock(&next->context, cpu); + load_LDT_nolock(&next->context); } } #endif diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index 118e9812778f..5d6f4ce6e6d6 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h @@ -87,9 +87,6 @@ static inline int pfn_to_nid(unsigned long pfn) __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ }) -/* XXX: FIXME -- wli */ -#define kern_addr_valid(kaddr) (0) - #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ #define pfn_valid(pfn) ((pfn) < num_physpages) #else diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h index 19a89377b123..ebaf9663aa8a 100644 --- a/include/asm-x86/mmzone_64.h +++ b/include/asm-x86/mmzone_64.h @@ -15,9 +15,9 @@ struct memnode { int shift; unsigned int mapsize; - u8 *map; - u8 embedded_map[64-16]; -} ____cacheline_aligned; /* total size = 64 bytes */ + s16 *map; + s16 embedded_map[64-8]; +} ____cacheline_aligned; /* total size = 128 bytes */ extern struct memnode memnode; #define memnode_shift memnode.shift #define memnodemap memnode.map @@ -41,11 +41,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ NODE_DATA(nid)->node_spanned_pages) -#ifdef CONFIG_DISCONTIGMEM -#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) - -extern int pfn_valid(unsigned long pfn); -#endif +extern int early_pfn_to_nid(unsigned long pfn); #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE (64*1024*1024) diff --git a/include/asm-x86/module.h b/include/asm-x86/module.h index 2b2f18d8a531..bfedb247871c 100644 --- a/include/asm-x86/module.h +++ b/include/asm-x86/module.h @@ -1,5 +1,82 @@ +#ifndef _ASM_MODULE_H +#define _ASM_MODULE_H + +/* x86_32/64 are simple */ +struct mod_arch_specific {}; + #ifdef CONFIG_X86_32 -# include "module_32.h" +# define Elf_Shdr Elf32_Shdr +# define Elf_Sym Elf32_Sym +# define Elf_Ehdr Elf32_Ehdr #else -# include "module_64.h" +# define Elf_Shdr Elf64_Shdr +# define Elf_Sym Elf64_Sym +# define Elf_Ehdr Elf64_Ehdr #endif + +#ifdef CONFIG_X86_64 +/* X86_64 does not define MODULE_PROC_FAMILY */ +#elif defined CONFIG_M386 +#define MODULE_PROC_FAMILY "386 " +#elif defined CONFIG_M486 +#define MODULE_PROC_FAMILY "486 " +#elif defined CONFIG_M586 +#define MODULE_PROC_FAMILY "586 " +#elif defined CONFIG_M586TSC +#define MODULE_PROC_FAMILY "586TSC " +#elif defined CONFIG_M586MMX +#define MODULE_PROC_FAMILY "586MMX " +#elif defined CONFIG_MCORE2 +#define MODULE_PROC_FAMILY "CORE2 " +#elif defined CONFIG_M686 +#define MODULE_PROC_FAMILY "686 " +#elif defined CONFIG_MPENTIUMII +#define MODULE_PROC_FAMILY "PENTIUMII " +#elif defined CONFIG_MPENTIUMIII +#define MODULE_PROC_FAMILY "PENTIUMIII " +#elif defined CONFIG_MPENTIUMM +#define MODULE_PROC_FAMILY "PENTIUMM " +#elif defined CONFIG_MPENTIUM4 +#define MODULE_PROC_FAMILY "PENTIUM4 " +#elif defined CONFIG_MK6 +#define MODULE_PROC_FAMILY "K6 " +#elif defined CONFIG_MK7 +#define MODULE_PROC_FAMILY "K7 " +#elif defined CONFIG_MK8 +#define MODULE_PROC_FAMILY "K8 " +#elif defined CONFIG_X86_ELAN +#define MODULE_PROC_FAMILY "ELAN " +#elif defined CONFIG_MCRUSOE +#define MODULE_PROC_FAMILY "CRUSOE " +#elif defined CONFIG_MEFFICEON +#define MODULE_PROC_FAMILY "EFFICEON " +#elif defined CONFIG_MWINCHIPC6 +#define MODULE_PROC_FAMILY "WINCHIPC6 " +#elif defined CONFIG_MWINCHIP2 +#define MODULE_PROC_FAMILY "WINCHIP2 " +#elif defined CONFIG_MWINCHIP3D +#define MODULE_PROC_FAMILY "WINCHIP3D " +#elif defined CONFIG_MCYRIXIII +#define MODULE_PROC_FAMILY "CYRIXIII " +#elif defined CONFIG_MVIAC3_2 +#define MODULE_PROC_FAMILY "VIAC3-2 " +#elif defined CONFIG_MVIAC7 +#define MODULE_PROC_FAMILY "VIAC7 " +#elif defined CONFIG_MGEODEGX1 +#define MODULE_PROC_FAMILY "GEODEGX1 " +#elif defined CONFIG_MGEODE_LX +#define MODULE_PROC_FAMILY "GEODE " +#else +#error unknown processor family +#endif + +#ifdef CONFIG_X86_32 +# ifdef CONFIG_4KSTACKS +# define MODULE_STACKSIZE "4KSTACKS " +# else +# define MODULE_STACKSIZE "" +# endif +# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE +#endif + +#endif /* _ASM_MODULE_H */ diff --git a/include/asm-x86/module_32.h b/include/asm-x86/module_32.h deleted file mode 100644 index 7e5fda6c3976..000000000000 --- a/include/asm-x86/module_32.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _ASM_I386_MODULE_H -#define _ASM_I386_MODULE_H - -/* x86 is simple */ -struct mod_arch_specific -{ -}; - -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr - -#ifdef CONFIG_M386 -#define MODULE_PROC_FAMILY "386 " -#elif defined CONFIG_M486 -#define MODULE_PROC_FAMILY "486 " -#elif defined CONFIG_M586 -#define MODULE_PROC_FAMILY "586 " -#elif defined CONFIG_M586TSC -#define MODULE_PROC_FAMILY "586TSC " -#elif defined CONFIG_M586MMX -#define MODULE_PROC_FAMILY "586MMX " -#elif defined CONFIG_MCORE2 -#define MODULE_PROC_FAMILY "CORE2 " -#elif defined CONFIG_M686 -#define MODULE_PROC_FAMILY "686 " -#elif defined CONFIG_MPENTIUMII -#define MODULE_PROC_FAMILY "PENTIUMII " -#elif defined CONFIG_MPENTIUMIII -#define MODULE_PROC_FAMILY "PENTIUMIII " -#elif defined CONFIG_MPENTIUMM -#define MODULE_PROC_FAMILY "PENTIUMM " -#elif defined CONFIG_MPENTIUM4 -#define MODULE_PROC_FAMILY "PENTIUM4 " -#elif defined CONFIG_MK6 -#define MODULE_PROC_FAMILY "K6 " -#elif defined CONFIG_MK7 -#define MODULE_PROC_FAMILY "K7 " -#elif defined CONFIG_MK8 -#define MODULE_PROC_FAMILY "K8 " -#elif defined CONFIG_X86_ELAN -#define MODULE_PROC_FAMILY "ELAN " -#elif defined CONFIG_MCRUSOE -#define MODULE_PROC_FAMILY "CRUSOE " -#elif defined CONFIG_MEFFICEON -#define MODULE_PROC_FAMILY "EFFICEON " -#elif defined CONFIG_MWINCHIPC6 -#define MODULE_PROC_FAMILY "WINCHIPC6 " -#elif defined CONFIG_MWINCHIP2 -#define MODULE_PROC_FAMILY "WINCHIP2 " -#elif defined CONFIG_MWINCHIP3D -#define MODULE_PROC_FAMILY "WINCHIP3D " -#elif defined CONFIG_MCYRIXIII -#define MODULE_PROC_FAMILY "CYRIXIII " -#elif defined CONFIG_MVIAC3_2 -#define MODULE_PROC_FAMILY "VIAC3-2 " -#elif defined CONFIG_MVIAC7 -#define MODULE_PROC_FAMILY "VIAC7 " -#elif defined CONFIG_MGEODEGX1 -#define MODULE_PROC_FAMILY "GEODEGX1 " -#elif defined CONFIG_MGEODE_LX -#define MODULE_PROC_FAMILY "GEODE " -#else -#error unknown processor family -#endif - -#ifdef CONFIG_4KSTACKS -#define MODULE_STACKSIZE "4KSTACKS " -#else -#define MODULE_STACKSIZE "" -#endif - -#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE - -#endif /* _ASM_I386_MODULE_H */ diff --git a/include/asm-x86/module_64.h b/include/asm-x86/module_64.h deleted file mode 100644 index 67f8f69fa7b1..000000000000 --- a/include/asm-x86/module_64.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _ASM_X8664_MODULE_H -#define _ASM_X8664_MODULE_H - -struct mod_arch_specific {}; - -#define Elf_Shdr Elf64_Shdr -#define Elf_Sym Elf64_Sym -#define Elf_Ehdr Elf64_Ehdr - -#endif diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 8f268e8fd2e9..781ad74ab9e9 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -1,5 +1,117 @@ +#ifndef _AM_X86_MPSPEC_H +#define _AM_X86_MPSPEC_H + +#include <asm/mpspec_def.h> + #ifdef CONFIG_X86_32 -# include "mpspec_32.h" +#include <mach_mpspec.h> + +extern int mp_bus_id_to_type[MAX_MP_BUSSES]; +extern int mp_bus_id_to_node[MAX_MP_BUSSES]; +extern int mp_bus_id_to_local[MAX_MP_BUSSES]; +extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; + +extern unsigned int def_to_bigsmp; +extern int apic_version[MAX_APICS]; +extern u8 apicid_2_node[]; +extern int pic_mode; + +#define MAX_APICID 256 + #else -# include "mpspec_64.h" + +#define MAX_MP_BUSSES 256 +/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ +#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) + +extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); + +#endif + +extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; + +extern unsigned int boot_cpu_physical_apicid; +extern int smp_found_config; +extern int nr_ioapics; +extern int mp_irq_entries; +extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; +extern int mpc_default_type; +extern unsigned long mp_lapic_addr; + +extern void find_smp_config(void); +extern void get_smp_config(void); + +#ifdef CONFIG_ACPI +extern void mp_register_lapic(u8 id, u8 enabled); +extern void mp_register_lapic_address(u64 address); +extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base); +extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, + u32 gsi); +extern void mp_config_acpi_legacy_irqs(void); +extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); +#endif /* CONFIG_ACPI */ + +#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) + +struct physid_mask +{ + unsigned long mask[PHYSID_ARRAY_SIZE]; +}; + +typedef struct physid_mask physid_mask_t; + +#define physid_set(physid, map) set_bit(physid, (map).mask) +#define physid_clear(physid, map) clear_bit(physid, (map).mask) +#define physid_isset(physid, map) test_bit(physid, (map).mask) +#define physid_test_and_set(physid, map) \ + test_and_set_bit(physid, (map).mask) + +#define physids_and(dst, src1, src2) \ + bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) + +#define physids_or(dst, src1, src2) \ + bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) + +#define physids_clear(map) \ + bitmap_zero((map).mask, MAX_APICS) + +#define physids_complement(dst, src) \ + bitmap_complement((dst).mask, (src).mask, MAX_APICS) + +#define physids_empty(map) \ + bitmap_empty((map).mask, MAX_APICS) + +#define physids_equal(map1, map2) \ + bitmap_equal((map1).mask, (map2).mask, MAX_APICS) + +#define physids_weight(map) \ + bitmap_weight((map).mask, MAX_APICS) + +#define physids_shift_right(d, s, n) \ + bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) + +#define physids_shift_left(d, s, n) \ + bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) + +#define physids_coerce(map) ((map).mask[0]) + +#define physids_promote(physids) \ + ({ \ + physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ + __physid_mask.mask[0] = physids; \ + __physid_mask; \ + }) + +#define physid_mask_of_physid(physid) \ + ({ \ + physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ + physid_set(physid, __physid_mask); \ + __physid_mask; \ + }) + +#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } +#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } + +extern physid_mask_t phys_cpu_present_map; + #endif diff --git a/include/asm-x86/mpspec_32.h b/include/asm-x86/mpspec_32.h deleted file mode 100644 index f21349399d14..000000000000 --- a/include/asm-x86/mpspec_32.h +++ /dev/null @@ -1,81 +0,0 @@ -#ifndef __ASM_MPSPEC_H -#define __ASM_MPSPEC_H - -#include <linux/cpumask.h> -#include <asm/mpspec_def.h> -#include <mach_mpspec.h> - -extern int mp_bus_id_to_type [MAX_MP_BUSSES]; -extern int mp_bus_id_to_node [MAX_MP_BUSSES]; -extern int mp_bus_id_to_local [MAX_MP_BUSSES]; -extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; -extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; - -extern unsigned int def_to_bigsmp; -extern unsigned int boot_cpu_physical_apicid; -extern int smp_found_config; -extern void find_smp_config (void); -extern void get_smp_config (void); -extern int nr_ioapics; -extern int apic_version [MAX_APICS]; -extern int mp_irq_entries; -extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; -extern int mpc_default_type; -extern unsigned long mp_lapic_addr; -extern int pic_mode; - -#ifdef CONFIG_ACPI -extern void mp_register_lapic (u8 id, u8 enabled); -extern void mp_register_lapic_address (u64 address); -extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); -extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); -extern void mp_config_acpi_legacy_irqs (void); -extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); -#endif /* CONFIG_ACPI */ - -#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) - -struct physid_mask -{ - unsigned long mask[PHYSID_ARRAY_SIZE]; -}; - -typedef struct physid_mask physid_mask_t; - -#define physid_set(physid, map) set_bit(physid, (map).mask) -#define physid_clear(physid, map) clear_bit(physid, (map).mask) -#define physid_isset(physid, map) test_bit(physid, (map).mask) -#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) - -#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) -#define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) -#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) -#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) -#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) -#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) -#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) -#define physids_coerce(map) ((map).mask[0]) - -#define physids_promote(physids) \ - ({ \ - physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ - __physid_mask.mask[0] = physids; \ - __physid_mask; \ - }) - -#define physid_mask_of_physid(physid) \ - ({ \ - physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ - physid_set(physid, __physid_mask); \ - __physid_mask; \ - }) - -#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } -#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } - -extern physid_mask_t phys_cpu_present_map; - -#endif - diff --git a/include/asm-x86/mpspec_64.h b/include/asm-x86/mpspec_64.h deleted file mode 100644 index 017fddb61dc5..000000000000 --- a/include/asm-x86/mpspec_64.h +++ /dev/null @@ -1,233 +0,0 @@ -#ifndef __ASM_MPSPEC_H -#define __ASM_MPSPEC_H - -/* - * Structure definitions for SMP machines following the - * Intel Multiprocessing Specification 1.1 and 1.4. - */ - -/* - * This tag identifies where the SMP configuration - * information is. - */ - -#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') - -/* - * A maximum of 255 APICs with the current APIC ID architecture. - */ -#define MAX_APICS 255 - -struct intel_mp_floating -{ - char mpf_signature[4]; /* "_MP_" */ - unsigned int mpf_physptr; /* Configuration table address */ - unsigned char mpf_length; /* Our length (paragraphs) */ - unsigned char mpf_specification;/* Specification version */ - unsigned char mpf_checksum; /* Checksum (makes sum 0) */ - unsigned char mpf_feature1; /* Standard or configuration ? */ - unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ - unsigned char mpf_feature3; /* Unused (0) */ - unsigned char mpf_feature4; /* Unused (0) */ - unsigned char mpf_feature5; /* Unused (0) */ -}; - -struct mp_config_table -{ - char mpc_signature[4]; -#define MPC_SIGNATURE "PCMP" - unsigned short mpc_length; /* Size of table */ - char mpc_spec; /* 0x01 */ - char mpc_checksum; - char mpc_oem[8]; - char mpc_productid[12]; - unsigned int mpc_oemptr; /* 0 if not present */ - unsigned short mpc_oemsize; /* 0 if not present */ - unsigned short mpc_oemcount; - unsigned int mpc_lapic; /* APIC address */ - unsigned int reserved; -}; - -/* Followed by entries */ - -#define MP_PROCESSOR 0 -#define MP_BUS 1 -#define MP_IOAPIC 2 -#define MP_INTSRC 3 -#define MP_LINTSRC 4 - -struct mpc_config_processor -{ - unsigned char mpc_type; - unsigned char mpc_apicid; /* Local APIC number */ - unsigned char mpc_apicver; /* Its versions */ - unsigned char mpc_cpuflag; -#define CPU_ENABLED 1 /* Processor is available */ -#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ - unsigned int mpc_cpufeature; -#define CPU_STEPPING_MASK 0x0F -#define CPU_MODEL_MASK 0xF0 -#define CPU_FAMILY_MASK 0xF00 - unsigned int mpc_featureflag; /* CPUID feature value */ - unsigned int mpc_reserved[2]; -}; - -struct mpc_config_bus -{ - unsigned char mpc_type; - unsigned char mpc_busid; - unsigned char mpc_bustype[6]; -}; - -/* List of Bus Type string values, Intel MP Spec. */ -#define BUSTYPE_EISA "EISA" -#define BUSTYPE_ISA "ISA" -#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ -#define BUSTYPE_MCA "MCA" -#define BUSTYPE_VL "VL" /* Local bus */ -#define BUSTYPE_PCI "PCI" -#define BUSTYPE_PCMCIA "PCMCIA" -#define BUSTYPE_CBUS "CBUS" -#define BUSTYPE_CBUSII "CBUSII" -#define BUSTYPE_FUTURE "FUTURE" -#define BUSTYPE_MBI "MBI" -#define BUSTYPE_MBII "MBII" -#define BUSTYPE_MPI "MPI" -#define BUSTYPE_MPSA "MPSA" -#define BUSTYPE_NUBUS "NUBUS" -#define BUSTYPE_TC "TC" -#define BUSTYPE_VME "VME" -#define BUSTYPE_XPRESS "XPRESS" - -struct mpc_config_ioapic -{ - unsigned char mpc_type; - unsigned char mpc_apicid; - unsigned char mpc_apicver; - unsigned char mpc_flags; -#define MPC_APIC_USABLE 0x01 - unsigned int mpc_apicaddr; -}; - -struct mpc_config_intsrc -{ - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbus; - unsigned char mpc_srcbusirq; - unsigned char mpc_dstapic; - unsigned char mpc_dstirq; -}; - -enum mp_irq_source_types { - mp_INT = 0, - mp_NMI = 1, - mp_SMI = 2, - mp_ExtINT = 3 -}; - -#define MP_IRQDIR_DEFAULT 0 -#define MP_IRQDIR_HIGH 1 -#define MP_IRQDIR_LOW 3 - - -struct mpc_config_lintsrc -{ - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbusid; - unsigned char mpc_srcbusirq; - unsigned char mpc_destapic; -#define MP_APIC_ALL 0xFF - unsigned char mpc_destapiclint; -}; - -/* - * Default configurations - * - * 1 2 CPU ISA 82489DX - * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining - * 3 2 CPU EISA 82489DX - * 4 2 CPU MCA 82489DX - * 5 2 CPU ISA+PCI - * 6 2 CPU EISA+PCI - * 7 2 CPU MCA+PCI - */ - -#define MAX_MP_BUSSES 256 -/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ -#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) -extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); -extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; - -extern unsigned int boot_cpu_physical_apicid; -extern int smp_found_config; -extern void find_smp_config (void); -extern void get_smp_config (void); -extern int nr_ioapics; -extern unsigned char apic_version [MAX_APICS]; -extern int mp_irq_entries; -extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; -extern int mpc_default_type; -extern unsigned long mp_lapic_addr; - -#ifdef CONFIG_ACPI -extern void mp_register_lapic (u8 id, u8 enabled); -extern void mp_register_lapic_address (u64 address); - -extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); -extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); -extern void mp_config_acpi_legacy_irqs (void); -extern int mp_register_gsi (u32 gsi, int triggering, int polarity); -#endif - -extern int using_apic_timer; - -#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) - -struct physid_mask -{ - unsigned long mask[PHYSID_ARRAY_SIZE]; -}; - -typedef struct physid_mask physid_mask_t; - -#define physid_set(physid, map) set_bit(physid, (map).mask) -#define physid_clear(physid, map) clear_bit(physid, (map).mask) -#define physid_isset(physid, map) test_bit(physid, (map).mask) -#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) - -#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) -#define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS) -#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) -#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) -#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) -#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) -#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) -#define physids_coerce(map) ((map).mask[0]) - -#define physids_promote(physids) \ - ({ \ - physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ - __physid_mask.mask[0] = physids; \ - __physid_mask; \ - }) - -#define physid_mask_of_physid(physid) \ - ({ \ - physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ - physid_set(physid, __physid_mask); \ - __physid_mask; \ - }) - -#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } -#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } - -extern physid_mask_t phys_cpu_present_map; - -#endif - diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 13bafb16e7af..3504617fe648 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h @@ -8,52 +8,68 @@ /* * This tag identifies where the SMP configuration - * information is. + * information is. */ - + #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') -#define MAX_MPC_ENTRY 1024 -#define MAX_APICS 256 +#ifdef CONFIG_X86_32 +# define MAX_MPC_ENTRY 1024 +# define MAX_APICS 256 +#else +/* + * A maximum of 255 APICs with the current APIC ID architecture. + */ +# define MAX_APICS 255 +#endif struct intel_mp_floating { - char mpf_signature[4]; /* "_MP_" */ - unsigned long mpf_physptr; /* Configuration table address */ + char mpf_signature[4]; /* "_MP_" */ + unsigned int mpf_physptr; /* Configuration table address */ unsigned char mpf_length; /* Our length (paragraphs) */ unsigned char mpf_specification;/* Specification version */ unsigned char mpf_checksum; /* Checksum (makes sum 0) */ - unsigned char mpf_feature1; /* Standard or configuration ? */ + unsigned char mpf_feature1; /* Standard or configuration ? */ unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ unsigned char mpf_feature3; /* Unused (0) */ unsigned char mpf_feature4; /* Unused (0) */ unsigned char mpf_feature5; /* Unused (0) */ }; +#define MPC_SIGNATURE "PCMP" + struct mp_config_table { char mpc_signature[4]; -#define MPC_SIGNATURE "PCMP" unsigned short mpc_length; /* Size of table */ char mpc_spec; /* 0x01 */ char mpc_checksum; char mpc_oem[8]; char mpc_productid[12]; - unsigned long mpc_oemptr; /* 0 if not present */ + unsigned int mpc_oemptr; /* 0 if not present */ unsigned short mpc_oemsize; /* 0 if not present */ unsigned short mpc_oemcount; - unsigned long mpc_lapic; /* APIC address */ - unsigned long reserved; + unsigned int mpc_lapic; /* APIC address */ + unsigned int reserved; }; /* Followed by entries */ -#define MP_PROCESSOR 0 -#define MP_BUS 1 -#define MP_IOAPIC 2 -#define MP_INTSRC 3 -#define MP_LINTSRC 4 -#define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ +#define MP_PROCESSOR 0 +#define MP_BUS 1 +#define MP_IOAPIC 2 +#define MP_INTSRC 3 +#define MP_LINTSRC 4 +/* Used by IBM NUMA-Q to describe node locality */ +#define MP_TRANSLATION 192 + +#define CPU_ENABLED 1 /* Processor is available */ +#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ + +#define CPU_STEPPING_MASK 0x000F +#define CPU_MODEL_MASK 0x00F0 +#define CPU_FAMILY_MASK 0x0F00 struct mpc_config_processor { @@ -61,14 +77,9 @@ struct mpc_config_processor unsigned char mpc_apicid; /* Local APIC number */ unsigned char mpc_apicver; /* Its versions */ unsigned char mpc_cpuflag; -#define CPU_ENABLED 1 /* Processor is available */ -#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ - unsigned long mpc_cpufeature; -#define CPU_STEPPING_MASK 0x0F -#define CPU_MODEL_MASK 0xF0 -#define CPU_FAMILY_MASK 0xF00 - unsigned long mpc_featureflag; /* CPUID feature value */ - unsigned long mpc_reserved[2]; + unsigned int mpc_cpufeature; + unsigned int mpc_featureflag; /* CPUID feature value */ + unsigned int mpc_reserved[2]; }; struct mpc_config_bus @@ -98,14 +109,15 @@ struct mpc_config_bus #define BUSTYPE_VME "VME" #define BUSTYPE_XPRESS "XPRESS" +#define MPC_APIC_USABLE 0x01 + struct mpc_config_ioapic { unsigned char mpc_type; unsigned char mpc_apicid; unsigned char mpc_apicver; unsigned char mpc_flags; -#define MPC_APIC_USABLE 0x01 - unsigned long mpc_apicaddr; + unsigned int mpc_apicaddr; }; struct mpc_config_intsrc @@ -130,6 +142,7 @@ enum mp_irq_source_types { #define MP_IRQDIR_HIGH 1 #define MP_IRQDIR_LOW 3 +#define MP_APIC_ALL 0xFF struct mpc_config_lintsrc { @@ -138,15 +151,15 @@ struct mpc_config_lintsrc unsigned short mpc_irqflag; unsigned char mpc_srcbusid; unsigned char mpc_srcbusirq; - unsigned char mpc_destapic; -#define MP_APIC_ALL 0xFF + unsigned char mpc_destapic; unsigned char mpc_destapiclint; }; +#define MPC_OEM_SIGNATURE "_OEM" + struct mp_config_oemtable { char oem_signature[4]; -#define MPC_OEM_SIGNATURE "_OEM" unsigned short oem_length; /* Size of table */ char oem_rev; /* 0x01 */ char oem_checksum; @@ -155,13 +168,13 @@ struct mp_config_oemtable struct mpc_config_translation { - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; }; /* diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index a4944732be04..fae118a25278 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -63,6 +63,13 @@ #define MSR_IA32_LASTINTFROMIP 0x000001dd #define MSR_IA32_LASTINTTOIP 0x000001de +/* DEBUGCTLMSR bits (others vary by model): */ +#define _DEBUGCTLMSR_LBR 0 /* last branch recording */ +#define _DEBUGCTLMSR_BTF 1 /* single-step on branches */ + +#define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR) +#define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF) + #define MSR_IA32_MC0_CTL 0x00000400 #define MSR_IA32_MC0_STATUS 0x00000401 #define MSR_IA32_MC0_ADDR 0x00000402 @@ -88,6 +95,14 @@ #define MSR_AMD64_IBSDCPHYSAD 0xc0011039 #define MSR_AMD64_IBSCTL 0xc001103a +/* Fam 10h MSRs */ +#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 +#define FAM10H_MMIO_CONF_ENABLE (1<<0) +#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf +#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 +#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff +#define FAM10H_MMIO_CONF_BASE_SHIFT 20 + /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a #define MSR_K8_TOP_MEM2 0xc001001d diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 80b027081b3c..204a8a30fecf 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -7,77 +7,109 @@ # include <linux/types.h> #endif -#ifdef __i386__ - #ifdef __KERNEL__ #ifndef __ASSEMBLY__ +#include <asm/asm.h> #include <asm/errno.h> +static inline unsigned long long native_read_tscp(unsigned int *aux) +{ + unsigned long low, high; + asm volatile (".byte 0x0f,0x01,0xf9" + : "=a" (low), "=d" (high), "=c" (*aux)); + return low | ((u64)high >> 32); +} + +/* + * i386 calling convention returns 64-bit value in edx:eax, while + * x86_64 returns at rax. Also, the "A" constraint does not really + * mean rdx:rax in x86_64, so we need specialized behaviour for each + * architecture + */ +#ifdef CONFIG_X86_64 +#define DECLARE_ARGS(val, low, high) unsigned low, high +#define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32)) +#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) +#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) +#else +#define DECLARE_ARGS(val, low, high) unsigned long long val +#define EAX_EDX_VAL(val, low, high) (val) +#define EAX_EDX_ARGS(val, low, high) "A" (val) +#define EAX_EDX_RET(val, low, high) "=A" (val) +#endif + static inline unsigned long long native_read_msr(unsigned int msr) { - unsigned long long val; + DECLARE_ARGS(val, low, high); - asm volatile("rdmsr" : "=A" (val) : "c" (msr)); - return val; + asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); + return EAX_EDX_VAL(val, low, high); } static inline unsigned long long native_read_msr_safe(unsigned int msr, int *err) { - unsigned long long val; + DECLARE_ARGS(val, low, high); - asm volatile("2: rdmsr ; xorl %0,%0\n" + asm volatile("2: rdmsr ; xor %0,%0\n" "1:\n\t" ".section .fixup,\"ax\"\n\t" - "3: movl %3,%0 ; jmp 1b\n\t" + "3: mov %3,%0 ; jmp 1b\n\t" ".previous\n\t" ".section __ex_table,\"a\"\n" - " .align 4\n\t" - " .long 2b,3b\n\t" + _ASM_ALIGN "\n\t" + _ASM_PTR " 2b,3b\n\t" ".previous" - : "=r" (*err), "=A" (val) + : "=r" (*err), EAX_EDX_RET(val, low, high) : "c" (msr), "i" (-EFAULT)); - - return val; + return EAX_EDX_VAL(val, low, high); } -static inline void native_write_msr(unsigned int msr, unsigned long long val) +static inline void native_write_msr(unsigned int msr, + unsigned low, unsigned high) { - asm volatile("wrmsr" : : "c" (msr), "A"(val)); + asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high)); } static inline int native_write_msr_safe(unsigned int msr, - unsigned long long val) + unsigned low, unsigned high) { int err; - asm volatile("2: wrmsr ; xorl %0,%0\n" + asm volatile("2: wrmsr ; xor %0,%0\n" "1:\n\t" ".section .fixup,\"ax\"\n\t" - "3: movl %4,%0 ; jmp 1b\n\t" + "3: mov %4,%0 ; jmp 1b\n\t" ".previous\n\t" ".section __ex_table,\"a\"\n" - " .align 4\n\t" - " .long 2b,3b\n\t" + _ASM_ALIGN "\n\t" + _ASM_PTR " 2b,3b\n\t" ".previous" : "=a" (err) - : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), + : "c" (msr), "0" (low), "d" (high), "i" (-EFAULT)); return err; } -static inline unsigned long long native_read_tsc(void) +extern unsigned long long native_read_tsc(void); + +static __always_inline unsigned long long __native_read_tsc(void) { - unsigned long long val; - asm volatile("rdtsc" : "=A" (val)); - return val; + DECLARE_ARGS(val, low, high); + + rdtsc_barrier(); + asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); + rdtsc_barrier(); + + return EAX_EDX_VAL(val, low, high); } -static inline unsigned long long native_read_pmc(void) +static inline unsigned long long native_read_pmc(int counter) { - unsigned long long val; - asm volatile("rdpmc" : "=A" (val)); - return val; + DECLARE_ARGS(val, low, high); + + asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); + return EAX_EDX_VAL(val, low, high); } #ifdef CONFIG_PARAVIRT @@ -97,20 +129,21 @@ static inline unsigned long long native_read_pmc(void) (val2) = (u32)(__val >> 32); \ } while(0) -static inline void wrmsr(u32 __msr, u32 __low, u32 __high) +static inline void wrmsr(unsigned msr, unsigned low, unsigned high) { - native_write_msr(__msr, ((u64)__high << 32) | __low); + native_write_msr(msr, low, high); } #define rdmsrl(msr,val) \ ((val) = native_read_msr(msr)) -#define wrmsrl(msr,val) native_write_msr(msr, val) +#define wrmsrl(msr, val) \ + native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32)) /* wrmsr with exception handling */ -static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) +static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) { - return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); + return native_write_msr_safe(msr, low, high); } /* rdmsr with exception handling */ @@ -129,204 +162,31 @@ static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) #define rdtscll(val) \ ((val) = native_read_tsc()) -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) - #define rdpmc(counter,low,high) \ do { \ - u64 _l = native_read_pmc(); \ + u64 _l = native_read_pmc(counter); \ (low) = (u32)_l; \ (high) = (u32)(_l >> 32); \ } while(0) -#endif /* !CONFIG_PARAVIRT */ - -#ifdef CONFIG_SMP -void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -#else /* CONFIG_SMP */ -static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - rdmsr(msr_no, *l, *h); -} -static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - wrmsr(msr_no, l, h); -} -static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - return rdmsr_safe(msr_no, l, h); -} -static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - return wrmsr_safe(msr_no, l, h); -} -#endif /* CONFIG_SMP */ -#endif /* ! __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - -#else /* __i386__ */ - -#ifndef __ASSEMBLY__ -#include <linux/errno.h> -/* - * Access to machine-specific registers (available on 586 and better only) - * Note: the rd* operations modify the parameters directly (without using - * pointer indirection), this allows gcc to optimize better - */ - -#define rdmsr(msr,val1,val2) \ - __asm__ __volatile__("rdmsr" \ - : "=a" (val1), "=d" (val2) \ - : "c" (msr)) - - -#define rdmsrl(msr,val) do { unsigned long a__,b__; \ - __asm__ __volatile__("rdmsr" \ - : "=a" (a__), "=d" (b__) \ - : "c" (msr)); \ - val = a__ | (b__<<32); \ -} while(0) - -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ - : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - -#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) -#define rdtsc(low,high) \ - __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) +#define rdtscp(low, high, aux) \ + do { \ + unsigned long long _val = native_read_tscp(&(aux)); \ + (low) = (u32)_val; \ + (high) = (u32)(_val >> 32); \ + } while (0) -#define rdtscl(low) \ - __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") +#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) -#define rdtscp(low,high,aux) \ - __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) +#endif /* !CONFIG_PARAVIRT */ -#define rdtscll(val) do { \ - unsigned int __a,__d; \ - __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \ - (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ -} while(0) -#define rdtscpll(val, aux) do { \ - unsigned long __a, __d; \ - __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ - (val) = (__d << 32) | __a; \ -} while (0) +#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) -#define rdpmc(counter,low,high) \ - __asm__ __volatile__("rdpmc" \ - : "=a" (low), "=d" (high) \ - : "c" (counter)) - - -static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (op)); -} - -/* Some CPUID calls want 'count' to be placed in ecx */ -static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, - int *edx) -{ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (op), "c" (count)); -} - -/* - * CPUID functions returning a single datum - */ -static inline unsigned int cpuid_eax(unsigned int op) -{ - unsigned int eax; - - __asm__("cpuid" - : "=a" (eax) - : "0" (op) - : "bx", "cx", "dx"); - return eax; -} -static inline unsigned int cpuid_ebx(unsigned int op) -{ - unsigned int eax, ebx; - - __asm__("cpuid" - : "=a" (eax), "=b" (ebx) - : "0" (op) - : "cx", "dx" ); - return ebx; -} -static inline unsigned int cpuid_ecx(unsigned int op) -{ - unsigned int eax, ecx; - - __asm__("cpuid" - : "=a" (eax), "=c" (ecx) - : "0" (op) - : "bx", "dx" ); - return ecx; -} -static inline unsigned int cpuid_edx(unsigned int op) -{ - unsigned int eax, edx; - - __asm__("cpuid" - : "=a" (eax), "=d" (edx) - : "0" (op) - : "bx", "cx"); - return edx; -} - -#ifdef __KERNEL__ - -/* wrmsr with exception handling */ -#define wrmsr_safe(msr,a,b) ({ int ret__; \ - asm volatile("2: wrmsr ; xorl %0,%0\n" \ - "1:\n\t" \ - ".section .fixup,\"ax\"\n\t" \ - "3: movl %4,%0 ; jmp 1b\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n\t" \ - " .quad 2b,3b\n\t" \ - ".previous" \ - : "=a" (ret__) \ - : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ - ret__; }) - -#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) - -#define rdmsr_safe(msr,a,b) \ - ({ int ret__; \ - asm volatile ("1: rdmsr\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %4,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 1b,3b\n" \ - ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \ - :"c"(msr), "i"(-EIO), "0"(0)); \ - ret__; }) - #ifdef CONFIG_SMP void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); @@ -350,9 +210,8 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) return wrmsr_safe(msr_no, l, h); } #endif /* CONFIG_SMP */ -#endif /* __KERNEL__ */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ -#endif /* !__i386__ */ #endif diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index e8320e4e6ca2..319d065800be 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -89,24 +89,25 @@ struct mtrr_gentry extern void mtrr_save_fixed_ranges(void *); extern void mtrr_save_state(void); extern int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, char increment); + unsigned int type, bool increment); extern int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, char increment); + unsigned int type, bool increment); extern int mtrr_del (int reg, unsigned long base, unsigned long size); extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); +extern int mtrr_trim_uncached_memory(unsigned long end_pfn); # else #define mtrr_save_fixed_ranges(arg) do {} while (0) #define mtrr_save_state() do {} while (0) static __inline__ int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, char increment) + unsigned int type, bool increment) { return -ENODEV; } static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, char increment) + unsigned int type, bool increment) { return -ENODEV; } @@ -120,7 +121,10 @@ static __inline__ int mtrr_del_page (int reg, unsigned long base, { return -ENODEV; } - +static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) +{ + return 0; +} static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} #define mtrr_ap_init() do {} while (0) diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index 7a17d9e58ad6..bbeefb96ddfd 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h @@ -26,7 +26,7 @@ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ + typecheck_fn(void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK_PREFIX " decl (%%eax) \n" \ @@ -51,8 +51,7 @@ do { \ * or anything the slow path function returns */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, - int fastcall (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); @@ -78,7 +77,7 @@ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ + typecheck_fn(void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK_PREFIX " incl (%%eax) \n" \ diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h index 70a958a8e381..7206c7e8a388 100644 --- a/include/asm-x86/nmi_32.h +++ b/include/asm-x86/nmi_32.h @@ -1,6 +1,3 @@ -/* - * linux/include/asm-i386/nmi.h - */ #ifndef ASM_NMI_H #define ASM_NMI_H diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h index 65b6acf3bb59..2eeb74e5f3ff 100644 --- a/include/asm-x86/nmi_64.h +++ b/include/asm-x86/nmi_64.h @@ -1,6 +1,3 @@ -/* - * linux/include/asm-i386/nmi.h - */ #ifndef ASM_NMI_H #define ASM_NMI_H @@ -41,7 +38,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); #define get_nmi_reason() inb(0x61) -extern int panic_on_timeout; extern int unknown_nmi_panic; extern int nmi_watchdog_enabled; @@ -60,7 +56,6 @@ extern void enable_timer_nmi_watchdog(void); extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); extern void nmi_watchdog_default(void); -extern int setup_nmi_watchdog(char *); extern atomic_t nmi_active; extern unsigned int nmi_watchdog; diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h new file mode 100644 index 000000000000..fec025c7f58c --- /dev/null +++ b/include/asm-x86/nops.h @@ -0,0 +1,90 @@ +#ifndef _ASM_NOPS_H +#define _ASM_NOPS_H 1 + +/* Define nops for use with alternative() */ + +/* generic versions from gas */ +#define GENERIC_NOP1 ".byte 0x90\n" +#define GENERIC_NOP2 ".byte 0x89,0xf6\n" +#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" +#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" +#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 +#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" +#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" +#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 + +/* Opteron 64bit nops */ +#define K8_NOP1 GENERIC_NOP1 +#define K8_NOP2 ".byte 0x66,0x90\n" +#define K8_NOP3 ".byte 0x66,0x66,0x90\n" +#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" +#define K8_NOP5 K8_NOP3 K8_NOP2 +#define K8_NOP6 K8_NOP3 K8_NOP3 +#define K8_NOP7 K8_NOP4 K8_NOP3 +#define K8_NOP8 K8_NOP4 K8_NOP4 + +/* K7 nops */ +/* uses eax dependencies (arbitary choice) */ +#define K7_NOP1 GENERIC_NOP1 +#define K7_NOP2 ".byte 0x8b,0xc0\n" +#define K7_NOP3 ".byte 0x8d,0x04,0x20\n" +#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" +#define K7_NOP5 K7_NOP4 ASM_NOP1 +#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" +#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" +#define K7_NOP8 K7_NOP7 ASM_NOP1 + +/* P6 nops */ +/* uses eax dependencies (Intel-recommended choice) */ +#define P6_NOP1 GENERIC_NOP1 +#define P6_NOP2 ".byte 0x66,0x90\n" +#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" +#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" +#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" +#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" +#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" +#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" + +#if defined(CONFIG_MK8) +#define ASM_NOP1 K8_NOP1 +#define ASM_NOP2 K8_NOP2 +#define ASM_NOP3 K8_NOP3 +#define ASM_NOP4 K8_NOP4 +#define ASM_NOP5 K8_NOP5 +#define ASM_NOP6 K8_NOP6 +#define ASM_NOP7 K8_NOP7 +#define ASM_NOP8 K8_NOP8 +#elif defined(CONFIG_MK7) +#define ASM_NOP1 K7_NOP1 +#define ASM_NOP2 K7_NOP2 +#define ASM_NOP3 K7_NOP3 +#define ASM_NOP4 K7_NOP4 +#define ASM_NOP5 K7_NOP5 +#define ASM_NOP6 K7_NOP6 +#define ASM_NOP7 K7_NOP7 +#define ASM_NOP8 K7_NOP8 +#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ + defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ + defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) +#define ASM_NOP1 P6_NOP1 +#define ASM_NOP2 P6_NOP2 +#define ASM_NOP3 P6_NOP3 +#define ASM_NOP4 P6_NOP4 +#define ASM_NOP5 P6_NOP5 +#define ASM_NOP6 P6_NOP6 +#define ASM_NOP7 P6_NOP7 +#define ASM_NOP8 P6_NOP8 +#else +#define ASM_NOP1 GENERIC_NOP1 +#define ASM_NOP2 GENERIC_NOP2 +#define ASM_NOP3 GENERIC_NOP3 +#define ASM_NOP4 GENERIC_NOP4 +#define ASM_NOP5 GENERIC_NOP5 +#define ASM_NOP6 GENERIC_NOP6 +#define ASM_NOP7 GENERIC_NOP7 +#define ASM_NOP8 GENERIC_NOP8 +#endif + +#define ASM_NOP_MAX 8 + +#endif diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h index 96fcb157db1d..03d0f7a9bf02 100644 --- a/include/asm-x86/numa_32.h +++ b/include/asm-x86/numa_32.h @@ -1,3 +1,15 @@ +#ifndef _ASM_X86_32_NUMA_H +#define _ASM_X86_32_NUMA_H 1 -int pxm_to_nid(int pxm); +extern int pxm_to_nid(int pxm); +#ifdef CONFIG_NUMA +extern void __init remap_numa_kva(void); +extern void set_highmem_pages_init(int); +#else +static inline void remap_numa_kva(void) +{ +} +#endif + +#endif /* _ASM_X86_32_NUMA_H */ diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h index 0cc5c97a7fc9..15fe07cde586 100644 --- a/include/asm-x86/numa_64.h +++ b/include/asm-x86/numa_64.h @@ -20,13 +20,19 @@ extern void numa_set_node(int cpu, int node); extern void srat_reserve_add_area(int nodeid); extern int hotadd_percent; -extern unsigned char apicid_to_node[MAX_LOCAL_APIC]; +extern s16 apicid_to_node[MAX_LOCAL_APIC]; + +extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); +extern unsigned long numa_free_all_bootmem(void); +extern void setup_node_bootmem(int nodeid, unsigned long start, + unsigned long end); + #ifdef CONFIG_NUMA extern void __init init_cpu_to_node(void); static inline void clear_node_cpumask(int cpu) { - clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); + clear_bit(cpu, (unsigned long *)&node_to_cpumask_map[cpu_to_node(cpu)]); } #else @@ -34,6 +40,4 @@ static inline void clear_node_cpumask(int cpu) #define clear_node_cpumask(cpu) do {} while (0) #endif -#define NUMA_NO_NODE 0xff - #endif diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index a757eb26141d..c8b30efeed85 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -1,13 +1,183 @@ +#ifndef _ASM_X86_PAGE_H +#define _ASM_X86_PAGE_H + +#include <linux/const.h> + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + #ifdef __KERNEL__ -# ifdef CONFIG_X86_32 -# include "page_32.h" -# else -# include "page_64.h" -# endif + +#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) +#define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK)) + +#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT) +#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +#define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1) +#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1) + +#ifndef __ASSEMBLY__ +#include <linux/types.h> +#endif + +#ifdef CONFIG_X86_64 +#include <asm/page_64.h> +#define max_pfn_mapped end_pfn_map #else -# ifdef __i386__ -# include "page_32.h" -# else -# include "page_64.h" -# endif +#include <asm/page_32.h> +#define max_pfn_mapped max_low_pfn +#endif /* CONFIG_X86_64 */ + +#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) + +#define VM_DATA_DEFAULT_FLAGS \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + +#ifndef __ASSEMBLY__ + +extern int page_is_ram(unsigned long pagenr); + +struct page; + +static void inline clear_user_page(void *page, unsigned long vaddr, + struct page *pg) +{ + clear_page(page); +} + +static void inline copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *topage) +{ + copy_page(to, from); +} + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pgprotval_t pgprot; } pgprot_t; + +static inline pgd_t native_make_pgd(pgdval_t val) +{ + return (pgd_t) { val }; +} + +static inline pgdval_t native_pgd_val(pgd_t pgd) +{ + return pgd.pgd; +} + +#if PAGETABLE_LEVELS >= 3 +#if PAGETABLE_LEVELS == 4 +typedef struct { pudval_t pud; } pud_t; + +static inline pud_t native_make_pud(pmdval_t val) +{ + return (pud_t) { val }; +} + +static inline pudval_t native_pud_val(pud_t pud) +{ + return pud.pud; +} +#else /* PAGETABLE_LEVELS == 3 */ +#include <asm-generic/pgtable-nopud.h> + +static inline pudval_t native_pud_val(pud_t pud) +{ + return native_pgd_val(pud.pgd); +} +#endif /* PAGETABLE_LEVELS == 4 */ + +typedef struct { pmdval_t pmd; } pmd_t; + +static inline pmd_t native_make_pmd(pmdval_t val) +{ + return (pmd_t) { val }; +} + +static inline pmdval_t native_pmd_val(pmd_t pmd) +{ + return pmd.pmd; +} +#else /* PAGETABLE_LEVELS == 2 */ +#include <asm-generic/pgtable-nopmd.h> + +static inline pmdval_t native_pmd_val(pmd_t pmd) +{ + return native_pgd_val(pmd.pud.pgd); +} +#endif /* PAGETABLE_LEVELS >= 3 */ + +static inline pte_t native_make_pte(pteval_t val) +{ + return (pte_t) { .pte = val }; +} + +static inline pteval_t native_pte_val(pte_t pte) +{ + return pte.pte; +} + +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else /* !CONFIG_PARAVIRT */ + +#define pgd_val(x) native_pgd_val(x) +#define __pgd(x) native_make_pgd(x) + +#ifndef __PAGETABLE_PUD_FOLDED +#define pud_val(x) native_pud_val(x) +#define __pud(x) native_make_pud(x) +#endif + +#ifndef __PAGETABLE_PMD_FOLDED +#define pmd_val(x) native_pmd_val(x) +#define __pmd(x) native_make_pmd(x) #endif + +#define pte_val(x) native_pte_val(x) +#define __pte(x) native_make_pte(x) + +#endif /* CONFIG_PARAVIRT */ + +#define __pa(x) __phys_addr((unsigned long)(x)) +/* __pa_symbol should be used for C visible symbols. + This seems to be the official gcc blessed way to do such arithmetic. */ +#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x))) + +#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) + +#define __boot_va(x) __va(x) +#define __boot_pa(x) __pa(x) + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#endif /* __ASSEMBLY__ */ + +#include <asm-generic/memory_model.h> +#include <asm-generic/page.h> + +#define __HAVE_ARCH_GATE_AREA 1 + +#endif /* __KERNEL__ */ +#endif /* _ASM_X86_PAGE_H */ diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 80ecc66b6d86..a6fd10f230d2 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h @@ -1,206 +1,107 @@ -#ifndef _I386_PAGE_H -#define _I386_PAGE_H - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) -#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#ifdef CONFIG_X86_USE_3DNOW - -#include <asm/mmx.h> - -#define clear_page(page) mmx_clear_page((void *)(page)) -#define copy_page(to,from) mmx_copy_page(to,from) - -#else +#ifndef _ASM_X86_PAGE_32_H +#define _ASM_X86_PAGE_32_H /* - * On older X86 processors it's not a win to use MMX here it seems. - * Maybe the K6-III ? - */ - -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) - -#endif - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE - -/* - * These are used to make use of C type-checking.. + * This handles the memory map. + * + * A __PAGE_OFFSET of 0xC0000000 means that the kernel has + * a virtual address space of one gigabyte, which limits the + * amount of physical memory you can use to about 950MB. + * + * If you want more physical memory than this then see the CONFIG_HIGHMEM4G + * and CONFIG_HIGHMEM64G options in the kernel configuration. */ -extern int nx_enabled; +#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #ifdef CONFIG_X86_PAE -typedef struct { unsigned long pte_low, pte_high; } pte_t; -typedef struct { unsigned long long pmd; } pmd_t; -typedef struct { unsigned long long pgd; } pgd_t; -typedef struct { unsigned long long pgprot; } pgprot_t; +#define __PHYSICAL_MASK_SHIFT 36 +#define __VIRTUAL_MASK_SHIFT 32 +#define PAGETABLE_LEVELS 3 -static inline unsigned long long native_pgd_val(pgd_t pgd) -{ - return pgd.pgd; -} - -static inline unsigned long long native_pmd_val(pmd_t pmd) -{ - return pmd.pmd; -} - -static inline unsigned long long native_pte_val(pte_t pte) -{ - return pte.pte_low | ((unsigned long long)pte.pte_high << 32); -} - -static inline pgd_t native_make_pgd(unsigned long long val) -{ - return (pgd_t) { val }; -} - -static inline pmd_t native_make_pmd(unsigned long long val) -{ - return (pmd_t) { val }; -} - -static inline pte_t native_make_pte(unsigned long long val) -{ - return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ; -} - -#ifndef CONFIG_PARAVIRT -#define pmd_val(x) native_pmd_val(x) -#define __pmd(x) native_make_pmd(x) -#endif - -#define HPAGE_SHIFT 21 -#include <asm-generic/pgtable-nopud.h> +#ifndef __ASSEMBLY__ +typedef u64 pteval_t; +typedef u64 pmdval_t; +typedef u64 pudval_t; +typedef u64 pgdval_t; +typedef u64 pgprotval_t; +typedef u64 phys_addr_t; + +typedef union { + struct { + unsigned long pte_low, pte_high; + }; + pteval_t pte; +} pte_t; +#endif /* __ASSEMBLY__ + */ #else /* !CONFIG_X86_PAE */ -typedef struct { unsigned long pte_low; } pte_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; -#define boot_pte_t pte_t /* or would you rather have a typedef */ - -static inline unsigned long native_pgd_val(pgd_t pgd) -{ - return pgd.pgd; -} +#define __PHYSICAL_MASK_SHIFT 32 +#define __VIRTUAL_MASK_SHIFT 32 +#define PAGETABLE_LEVELS 2 -static inline unsigned long native_pte_val(pte_t pte) -{ - return pte.pte_low; -} - -static inline pgd_t native_make_pgd(unsigned long val) -{ - return (pgd_t) { val }; -} +#ifndef __ASSEMBLY__ +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; +typedef unsigned long phys_addr_t; -static inline pte_t native_make_pte(unsigned long val) -{ - return (pte_t) { .pte_low = val }; -} +typedef union { pteval_t pte, pte_low; } pte_t; +typedef pte_t boot_pte_t; -#define HPAGE_SHIFT 22 -#include <asm-generic/pgtable-nopmd.h> +#endif /* __ASSEMBLY__ */ #endif /* CONFIG_X86_PAE */ -#define PTE_MASK PAGE_MASK - #ifdef CONFIG_HUGETLB_PAGE -#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif -#define pgprot_val(x) ((x).pgprot) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#ifndef CONFIG_PARAVIRT -#define pgd_val(x) native_pgd_val(x) -#define __pgd(x) native_make_pgd(x) -#define pte_val(x) native_pte_val(x) -#define __pte(x) native_make_pte(x) -#endif - -#endif /* !__ASSEMBLY__ */ - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - -/* - * This handles the memory map.. We could make this a config - * option, but too many people screw it up, and too few need - * it. - * - * A __PAGE_OFFSET of 0xC0000000 means that the kernel has - * a virtual address space of one gigabyte, which limits the - * amount of physical memory you can use to about 950MB. - * - * If you want more physical memory than this then see the CONFIG_HIGHMEM4G - * and CONFIG_HIGHMEM64G options in the kernel configuration. - */ - #ifndef __ASSEMBLY__ +#define __phys_addr(x) ((x)-PAGE_OFFSET) +#define __phys_reloc_hide(x) RELOC_HIDE((x), 0) + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#endif /* CONFIG_FLATMEM */ -struct vm_area_struct; +extern int nx_enabled; /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. */ extern unsigned int __VMALLOC_RESERVE; - extern int sysctl_legacy_va_layout; -extern int page_is_ram(unsigned long pagenr); - -#endif /* __ASSEMBLY__ */ - -#ifdef __ASSEMBLY__ -#define __PAGE_OFFSET CONFIG_PAGE_OFFSET -#else -#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) -#endif - - -#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) -#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) -/* __pa_symbol should be used for C visible symbols. - This seems to be the official gcc blessed way to do such arithmetic. */ -#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) ((pfn) < max_mapnr) -#endif /* CONFIG_FLATMEM */ -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#ifdef CONFIG_X86_USE_3DNOW +#include <asm/mmx.h> + +static inline void clear_page(void *page) +{ + mmx_clear_page(page); +} -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +static inline void copy_page(void *to, void *from) +{ + mmx_copy_page(to, from); +} +#else /* !CONFIG_X86_USE_3DNOW */ +#include <linux/string.h> -#include <asm-generic/memory_model.h> -#include <asm-generic/page.h> +static inline void clear_page(void *page) +{ + memset(page, 0, PAGE_SIZE); +} -#define __HAVE_ARCH_GATE_AREA 1 -#endif /* __KERNEL__ */ +static inline void copy_page(void *to, void *from) +{ + memcpy(to, from, PAGE_SIZE); +} +#endif /* CONFIG_X86_3DNOW */ +#endif /* !__ASSEMBLY__ */ -#endif /* _I386_PAGE_H */ +#endif /* _ASM_X86_PAGE_32_H */ diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index c3b52bcb171e..c1ac42d8707f 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -1,15 +1,9 @@ #ifndef _X86_64_PAGE_H #define _X86_64_PAGE_H -#include <linux/const.h> +#define PAGETABLE_LEVELS 4 -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) -#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) - -#define THREAD_ORDER 1 +#define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define CURRENT_MASK (~(THREAD_SIZE-1)) @@ -29,54 +23,7 @@ #define MCE_STACK 5 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ -#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) -#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT) - -#define HPAGE_SHIFT PMD_SHIFT -#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -extern unsigned long end_pfn; - -void clear_page(void *); -void copy_page(void *, void *); - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE -/* - * These are used to make use of C type-checking.. - */ -typedef struct { unsigned long pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; -typedef struct { unsigned long pud; } pud_t; -typedef struct { unsigned long pgd; } pgd_t; -#define PTE_MASK PHYSICAL_PAGE_MASK - -typedef struct { unsigned long pgprot; } pgprot_t; - -extern unsigned long phys_base; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) -#define pud_val(x) ((x).pud) -#define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pud(x) ((pud_t) { (x) } ) -#define __pgd(x) ((pgd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#endif /* !__ASSEMBLY__ */ +#define __PAGE_OFFSET _AC(0xffff810000000000, UL) #define __PHYSICAL_START CONFIG_PHYSICAL_START #define __KERNEL_ALIGN 0x200000 @@ -92,53 +39,44 @@ extern unsigned long phys_base; #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map _AC(0xffffffff80000000, UL) -#define __PAGE_OFFSET _AC(0xffff810000000000, UL) - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) /* See Documentation/x86_64/mm.txt for a description of the memory map. */ #define __PHYSICAL_MASK_SHIFT 46 -#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1) #define __VIRTUAL_MASK_SHIFT 48 -#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1) #define KERNEL_TEXT_SIZE (40*1024*1024) #define KERNEL_TEXT_START _AC(0xffffffff80000000, UL) -#define PAGE_OFFSET __PAGE_OFFSET #ifndef __ASSEMBLY__ +void clear_page(void *page); +void copy_page(void *to, void *from); -#include <asm/bug.h> +extern unsigned long end_pfn; +extern unsigned long end_pfn_map; +extern unsigned long phys_base; extern unsigned long __phys_addr(unsigned long); +#define __phys_reloc_hide(x) (x) -#endif /* __ASSEMBLY__ */ - -#define __pa(x) __phys_addr((unsigned long)(x)) -#define __pa_symbol(x) __phys_addr((unsigned long)(x)) - -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#define __boot_va(x) __va(x) -#define __boot_pa(x) __pa(x) -#ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) ((pfn) < end_pfn) -#endif - -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +/* + * These are used to make use of C type-checking.. + */ +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; +typedef unsigned long phys_addr_t; -#define VM_DATA_DEFAULT_FLAGS \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +typedef struct { pteval_t pte; } pte_t; -#define __HAVE_ARCH_GATE_AREA 1 #define vmemmap ((struct page *)VMEMMAP_START) -#include <asm-generic/memory_model.h> -#include <asm-generic/page.h> +#endif /* !__ASSEMBLY__ */ + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) ((pfn) < end_pfn) +#endif -#endif /* __KERNEL__ */ #endif /* _X86_64_PAGE_H */ diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index f59d370c5df4..d6236eb46466 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -5,22 +5,37 @@ #ifdef CONFIG_PARAVIRT #include <asm/page.h> +#include <asm/asm.h> /* Bitmask of what can be clobbered: usually at least eax. */ -#define CLBR_NONE 0x0 -#define CLBR_EAX 0x1 -#define CLBR_ECX 0x2 -#define CLBR_EDX 0x4 -#define CLBR_ANY 0x7 +#define CLBR_NONE 0 +#define CLBR_EAX (1 << 0) +#define CLBR_ECX (1 << 1) +#define CLBR_EDX (1 << 2) + +#ifdef CONFIG_X86_64 +#define CLBR_RSI (1 << 3) +#define CLBR_RDI (1 << 4) +#define CLBR_R8 (1 << 5) +#define CLBR_R9 (1 << 6) +#define CLBR_R10 (1 << 7) +#define CLBR_R11 (1 << 8) +#define CLBR_ANY ((1 << 9) - 1) +#include <asm/desc_defs.h> +#else +/* CLBR_ANY should match all regs platform has. For i386, that's just it */ +#define CLBR_ANY ((1 << 3) - 1) +#endif /* X86_64 */ #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/cpumask.h> #include <asm/kmap_types.h> +#include <asm/desc_defs.h> struct page; struct thread_struct; -struct Xgt_desc_struct; +struct desc_ptr; struct tss_struct; struct mm_struct; struct desc_struct; @@ -86,22 +101,27 @@ struct pv_cpu_ops { unsigned long (*read_cr4)(void); void (*write_cr4)(unsigned long); +#ifdef CONFIG_X86_64 + unsigned long (*read_cr8)(void); + void (*write_cr8)(unsigned long); +#endif + /* Segment descriptor handling */ void (*load_tr_desc)(void); - void (*load_gdt)(const struct Xgt_desc_struct *); - void (*load_idt)(const struct Xgt_desc_struct *); - void (*store_gdt)(struct Xgt_desc_struct *); - void (*store_idt)(struct Xgt_desc_struct *); + void (*load_gdt)(const struct desc_ptr *); + void (*load_idt)(const struct desc_ptr *); + void (*store_gdt)(struct desc_ptr *); + void (*store_idt)(struct desc_ptr *); void (*set_ldt)(const void *desc, unsigned entries); unsigned long (*store_tr)(void); void (*load_tls)(struct thread_struct *t, unsigned int cpu); - void (*write_ldt_entry)(struct desc_struct *, - int entrynum, u32 low, u32 high); + void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, + const void *desc); void (*write_gdt_entry)(struct desc_struct *, - int entrynum, u32 low, u32 high); - void (*write_idt_entry)(struct desc_struct *, - int entrynum, u32 low, u32 high); - void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); + int entrynum, const void *desc, int size); + void (*write_idt_entry)(gate_desc *, + int entrynum, const gate_desc *gate); + void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); void (*set_iopl_mask)(unsigned mask); @@ -115,15 +135,18 @@ struct pv_cpu_ops { /* MSR, PMC and TSR operations. err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ u64 (*read_msr)(unsigned int msr, int *err); - int (*write_msr)(unsigned int msr, u64 val); + int (*write_msr)(unsigned int msr, unsigned low, unsigned high); u64 (*read_tsc)(void); - u64 (*read_pmc)(void); + u64 (*read_pmc)(int counter); + unsigned long long (*read_tscp)(unsigned int *aux); /* These two are jmp to, not actually called. */ - void (*irq_enable_sysexit)(void); + void (*irq_enable_syscall_ret)(void); void (*iret)(void); + void (*swapgs)(void); + struct pv_lazy_ops lazy_mode; }; @@ -150,9 +173,9 @@ struct pv_apic_ops { * Direct APIC operations, principally for VMI. Ideally * these shouldn't be in this interface. */ - void (*apic_write)(unsigned long reg, unsigned long v); - void (*apic_write_atomic)(unsigned long reg, unsigned long v); - unsigned long (*apic_read)(unsigned long reg); + void (*apic_write)(unsigned long reg, u32 v); + void (*apic_write_atomic)(unsigned long reg, u32 v); + u32 (*apic_read)(unsigned long reg); void (*setup_boot_clock)(void); void (*setup_secondary_clock)(void); @@ -198,7 +221,7 @@ struct pv_mmu_ops { /* Hooks for allocating/releasing pagetable pages */ void (*alloc_pt)(struct mm_struct *mm, u32 pfn); - void (*alloc_pd)(u32 pfn); + void (*alloc_pd)(struct mm_struct *mm, u32 pfn); void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); void (*release_pt)(u32 pfn); void (*release_pd)(u32 pfn); @@ -212,28 +235,34 @@ struct pv_mmu_ops { void (*pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + pteval_t (*pte_val)(pte_t); + pte_t (*make_pte)(pteval_t pte); + + pgdval_t (*pgd_val)(pgd_t); + pgd_t (*make_pgd)(pgdval_t pgd); + +#if PAGETABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - void (*set_pud)(pud_t *pudp, pud_t pudval); void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); void (*pmd_clear)(pmd_t *pmdp); - unsigned long long (*pte_val)(pte_t); - unsigned long long (*pmd_val)(pmd_t); - unsigned long long (*pgd_val)(pgd_t); +#endif /* CONFIG_X86_PAE */ - pte_t (*make_pte)(unsigned long long pte); - pmd_t (*make_pmd)(unsigned long long pmd); - pgd_t (*make_pgd)(unsigned long long pgd); -#else - unsigned long (*pte_val)(pte_t); - unsigned long (*pgd_val)(pgd_t); + void (*set_pud)(pud_t *pudp, pud_t pudval); - pte_t (*make_pte)(unsigned long pte); - pgd_t (*make_pgd)(unsigned long pgd); -#endif + pmdval_t (*pmd_val)(pmd_t); + pmd_t (*make_pmd)(pmdval_t pmd); + +#if PAGETABLE_LEVELS == 4 + pudval_t (*pud_val)(pud_t); + pud_t (*make_pud)(pudval_t pud); + + void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); +#endif /* PAGETABLE_LEVELS == 4 */ +#endif /* PAGETABLE_LEVELS >= 3 */ #ifdef CONFIG_HIGHPTE void *(*kmap_atomic_pte)(struct page *page, enum km_type type); @@ -279,7 +308,8 @@ extern struct pv_mmu_ops pv_mmu_ops; #define _paravirt_alt(insn_string, type, clobber) \ "771:\n\t" insn_string "\n" "772:\n" \ ".pushsection .parainstructions,\"a\"\n" \ - " .long 771b\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR " 771b\n" \ " .byte " type "\n" \ " .byte 772b-771b\n" \ " .short " clobber "\n" \ @@ -289,6 +319,11 @@ extern struct pv_mmu_ops pv_mmu_ops; #define paravirt_alt(insn_string) \ _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") +/* Simple instruction patching code. */ +#define DEF_NATIVE(ops, name, code) \ + extern const char start_##ops##_##name[], end_##ops##_##name[]; \ + asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") + unsigned paravirt_patch_nop(void); unsigned paravirt_patch_ignore(unsigned len); unsigned paravirt_patch_call(void *insnbuf, @@ -303,6 +338,9 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, unsigned paravirt_patch_insns(void *insnbuf, unsigned len, const char *start, const char *end); +unsigned native_patch(u8 type, u16 clobbers, void *ibuf, + unsigned long addr, unsigned len); + int paravirt_disable_iospace(void); /* @@ -319,7 +357,7 @@ int paravirt_disable_iospace(void); * runtime. * * Normally, a call to a pv_op function is a simple indirect call: - * (paravirt_ops.operations)(args...). + * (pv_op_struct.operations)(args...). * * Unfortunately, this is a relatively slow operation for modern CPUs, * because it cannot necessarily determine what the destination @@ -329,11 +367,17 @@ int paravirt_disable_iospace(void); * calls are essentially free, because the call and return addresses * are completely predictable.) * - * These macros rely on the standard gcc "regparm(3)" calling + * For i386, these macros rely on the standard gcc "regparm(3)" calling * convention, in which the first three arguments are placed in %eax, * %edx, %ecx (in that order), and the remaining arguments are placed * on the stack. All caller-save registers (eax,edx,ecx) are expected * to be modified (either clobbered or used for return values). + * X86_64, on the other hand, already specifies a register-based calling + * conventions, returning at %rax, with parameteres going on %rdi, %rsi, + * %rdx, and %rcx. Note that for this reason, x86_64 does not need any + * special handling for dealing with 4 arguments, unlike i386. + * However, x86_64 also have to clobber all caller saved registers, which + * unfortunately, are quite a bit (r8 - r11) * * The call instruction itself is marked by placing its start address * and size into the .parainstructions section, so that @@ -356,10 +400,12 @@ int paravirt_disable_iospace(void); * the return type. The macro then uses sizeof() on that type to * determine whether its a 32 or 64 bit value, and places the return * in the right register(s) (just %eax for 32-bit, and %edx:%eax for - * 64-bit). + * 64-bit). For x86_64 machines, it just returns at %rax regardless of + * the return value size. * * 64-bit arguments are passed as a pair of adjacent 32-bit arguments - * in low,high order. + * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments + * in low,high order * * Small structures are passed and returned in registers. The macro * calling convention can't directly deal with this, so the wrapper @@ -369,46 +415,67 @@ int paravirt_disable_iospace(void); * means that all uses must be wrapped in inline functions. This also * makes sure the incoming and outgoing types are always correct. */ +#ifdef CONFIG_X86_32 +#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx +#define PVOP_CALL_ARGS PVOP_VCALL_ARGS +#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ + "=c" (__ecx) +#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS +#define EXTRA_CLOBBERS +#define VEXTRA_CLOBBERS +#else +#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx +#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax +#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ + "=S" (__esi), "=d" (__edx), \ + "=c" (__ecx) + +#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) + +#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" +#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" +#endif + #define __PVOP_CALL(rettype, op, pre, post, ...) \ ({ \ rettype __ret; \ - unsigned long __eax, __edx, __ecx; \ + PVOP_CALL_ARGS; \ + /* This is 32-bit specific, but is okay in 64-bit */ \ + /* since this condition will never hold */ \ if (sizeof(rettype) > sizeof(unsigned long)) { \ asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : "=a" (__eax), "=d" (__edx), \ - "=c" (__ecx) \ + : PVOP_CALL_CLOBBERS \ : paravirt_type(op), \ paravirt_clobber(CLBR_ANY), \ ##__VA_ARGS__ \ - : "memory", "cc"); \ + : "memory", "cc" EXTRA_CLOBBERS); \ __ret = (rettype)((((u64)__edx) << 32) | __eax); \ } else { \ asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : "=a" (__eax), "=d" (__edx), \ - "=c" (__ecx) \ + : PVOP_CALL_CLOBBERS \ : paravirt_type(op), \ paravirt_clobber(CLBR_ANY), \ ##__VA_ARGS__ \ - : "memory", "cc"); \ + : "memory", "cc" EXTRA_CLOBBERS); \ __ret = (rettype)__eax; \ } \ __ret; \ }) #define __PVOP_VCALL(op, pre, post, ...) \ ({ \ - unsigned long __eax, __edx, __ecx; \ + PVOP_VCALL_ARGS; \ asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ + : PVOP_VCALL_CLOBBERS \ : paravirt_type(op), \ paravirt_clobber(CLBR_ANY), \ ##__VA_ARGS__ \ - : "memory", "cc"); \ + : "memory", "cc" VEXTRA_CLOBBERS); \ }) #define PVOP_CALL0(rettype, op) \ @@ -417,22 +484,26 @@ int paravirt_disable_iospace(void); __PVOP_VCALL(op, "", "") #define PVOP_CALL1(rettype, op, arg1) \ - __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1))) + __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) #define PVOP_VCALL1(op, arg1) \ - __PVOP_VCALL(op, "", "", "0" ((u32)(arg1))) + __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) #define PVOP_CALL2(rettype, op, arg1, arg2) \ - __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) + __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ + "1" ((unsigned long)(arg2))) #define PVOP_VCALL2(op, arg1, arg2) \ - __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) + __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ + "1" ((unsigned long)(arg2))) #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ - __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \ - "1"((u32)(arg2)), "2"((u32)(arg3))) + __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ + "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) #define PVOP_VCALL3(op, arg1, arg2, arg3) \ - __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \ - "2"((u32)(arg3))) + __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ + "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) +/* This is the only difference in x86_64. We can make it much simpler */ +#ifdef CONFIG_X86_32 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ __PVOP_CALL(rettype, op, \ "push %[_arg4];", "lea 4(%%esp),%%esp;", \ @@ -443,16 +514,26 @@ int paravirt_disable_iospace(void); "push %[_arg4];", "lea 4(%%esp),%%esp;", \ "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) +#else +#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ + __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ + "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ + "3"((unsigned long)(arg4))) +#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ + __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ + "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ + "3"((unsigned long)(arg4))) +#endif static inline int paravirt_enabled(void) { return pv_info.paravirt_enabled; } -static inline void load_esp0(struct tss_struct *tss, +static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread); + PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); } #define ARCH_SETUP pv_init_ops.arch_setup(); @@ -540,6 +621,18 @@ static inline void write_cr4(unsigned long x) PVOP_VCALL1(pv_cpu_ops.write_cr4, x); } +#ifdef CONFIG_X86_64 +static inline unsigned long read_cr8(void) +{ + return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); +} + +static inline void write_cr8(unsigned long x) +{ + PVOP_VCALL1(pv_cpu_ops.write_cr8, x); +} +#endif + static inline void raw_safe_halt(void) { PVOP_VCALL0(pv_irq_ops.safe_halt); @@ -613,8 +706,6 @@ static inline unsigned long long paravirt_sched_clock(void) } #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) - static inline unsigned long long paravirt_read_pmc(int counter) { return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); @@ -626,15 +717,36 @@ static inline unsigned long long paravirt_read_pmc(int counter) high = _l >> 32; \ } while(0) +static inline unsigned long long paravirt_rdtscp(unsigned int *aux) +{ + return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); +} + +#define rdtscp(low, high, aux) \ +do { \ + int __aux; \ + unsigned long __val = paravirt_rdtscp(&__aux); \ + (low) = (u32)__val; \ + (high) = (u32)(__val >> 32); \ + (aux) = __aux; \ +} while (0) + +#define rdtscpll(val, aux) \ +do { \ + unsigned long __aux; \ + val = paravirt_rdtscp(&__aux); \ + (aux) = __aux; \ +} while (0) + static inline void load_TR_desc(void) { PVOP_VCALL0(pv_cpu_ops.load_tr_desc); } -static inline void load_gdt(const struct Xgt_desc_struct *dtr) +static inline void load_gdt(const struct desc_ptr *dtr) { PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); } -static inline void load_idt(const struct Xgt_desc_struct *dtr) +static inline void load_idt(const struct desc_ptr *dtr) { PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); } @@ -642,11 +754,11 @@ static inline void set_ldt(const void *addr, unsigned entries) { PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); } -static inline void store_gdt(struct Xgt_desc_struct *dtr) +static inline void store_gdt(struct desc_ptr *dtr) { PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); } -static inline void store_idt(struct Xgt_desc_struct *dtr) +static inline void store_idt(struct desc_ptr *dtr) { PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); } @@ -659,17 +771,22 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu) { PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); } -static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) + +static inline void write_ldt_entry(struct desc_struct *dt, int entry, + const void *desc) { - PVOP_VCALL4(pv_cpu_ops.write_ldt_entry, dt, entry, low, high); + PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); } -static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) + +static inline void write_gdt_entry(struct desc_struct *dt, int entry, + void *desc, int type) { - PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, low, high); + PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); } -static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) + +static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) { - PVOP_VCALL4(pv_cpu_ops.write_idt_entry, dt, entry, low, high); + PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); } static inline void set_iopl_mask(unsigned mask) { @@ -690,17 +807,17 @@ static inline void slow_down_io(void) { /* * Basic functions accessing APICs. */ -static inline void apic_write(unsigned long reg, unsigned long v) +static inline void apic_write(unsigned long reg, u32 v) { PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); } -static inline void apic_write_atomic(unsigned long reg, unsigned long v) +static inline void apic_write_atomic(unsigned long reg, u32 v) { PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); } -static inline unsigned long apic_read(unsigned long reg) +static inline u32 apic_read(unsigned long reg) { return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); } @@ -786,9 +903,9 @@ static inline void paravirt_release_pt(unsigned pfn) PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); } -static inline void paravirt_alloc_pd(unsigned pfn) +static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn) { - PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn); + PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn); } static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, @@ -822,128 +939,236 @@ static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); } -#ifdef CONFIG_X86_PAE -static inline pte_t __pte(unsigned long long val) +static inline pte_t __pte(pteval_t val) { - unsigned long long ret = PVOP_CALL2(unsigned long long, - pv_mmu_ops.make_pte, - val, val >> 32); - return (pte_t) { ret, ret >> 32 }; + pteval_t ret; + + if (sizeof(pteval_t) > sizeof(long)) + ret = PVOP_CALL2(pteval_t, + pv_mmu_ops.make_pte, + val, (u64)val >> 32); + else + ret = PVOP_CALL1(pteval_t, + pv_mmu_ops.make_pte, + val); + + return (pte_t) { .pte = ret }; } -static inline pmd_t __pmd(unsigned long long val) +static inline pteval_t pte_val(pte_t pte) { - return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd, - val, val >> 32) }; + pteval_t ret; + + if (sizeof(pteval_t) > sizeof(long)) + ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, + pte.pte, (u64)pte.pte >> 32); + else + ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, + pte.pte); + + return ret; } -static inline pgd_t __pgd(unsigned long long val) +static inline pgd_t __pgd(pgdval_t val) { - return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd, - val, val >> 32) }; + pgdval_t ret; + + if (sizeof(pgdval_t) > sizeof(long)) + ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, + val, (u64)val >> 32); + else + ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, + val); + + return (pgd_t) { ret }; } -static inline unsigned long long pte_val(pte_t x) +static inline pgdval_t pgd_val(pgd_t pgd) { - return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val, - x.pte_low, x.pte_high); + pgdval_t ret; + + if (sizeof(pgdval_t) > sizeof(long)) + ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, + pgd.pgd, (u64)pgd.pgd >> 32); + else + ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, + pgd.pgd); + + return ret; } -static inline unsigned long long pmd_val(pmd_t x) +static inline void set_pte(pte_t *ptep, pte_t pte) { - return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val, - x.pmd, x.pmd >> 32); + if (sizeof(pteval_t) > sizeof(long)) + PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, + pte.pte, (u64)pte.pte >> 32); + else + PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, + pte.pte); } -static inline unsigned long long pgd_val(pgd_t x) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) { - return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val, - x.pgd, x.pgd >> 32); + if (sizeof(pteval_t) > sizeof(long)) + /* 5 arg words */ + pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); + else + PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); } -static inline void set_pte(pte_t *ptep, pte_t pteval) +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { - PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high); + pmdval_t val = native_pmd_val(pmd); + + if (sizeof(pmdval_t) > sizeof(long)) + PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); + else + PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); } -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) +#if PAGETABLE_LEVELS >= 3 +static inline pmd_t __pmd(pmdval_t val) { - /* 5 arg words */ - pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval); + pmdval_t ret; + + if (sizeof(pmdval_t) > sizeof(long)) + ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, + val, (u64)val >> 32); + else + ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, + val); + + return (pmd_t) { ret }; } -static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) +static inline pmdval_t pmd_val(pmd_t pmd) { - PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, - pteval.pte_low, pteval.pte_high); + pmdval_t ret; + + if (sizeof(pmdval_t) > sizeof(long)) + ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, + pmd.pmd, (u64)pmd.pmd >> 32); + else + ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, + pmd.pmd); + + return ret; } -static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +static inline void set_pud(pud_t *pudp, pud_t pud) { - /* 5 arg words */ - pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); + pudval_t val = native_pud_val(pud); + + if (sizeof(pudval_t) > sizeof(long)) + PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, + val, (u64)val >> 32); + else + PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, + val); +} +#if PAGETABLE_LEVELS == 4 +static inline pud_t __pud(pudval_t val) +{ + pudval_t ret; + + if (sizeof(pudval_t) > sizeof(long)) + ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, + val, (u64)val >> 32); + else + ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, + val); + + return (pud_t) { ret }; } -static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) +static inline pudval_t pud_val(pud_t pud) { - PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, - pmdval.pmd, pmdval.pmd >> 32); + pudval_t ret; + + if (sizeof(pudval_t) > sizeof(long)) + ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, + pud.pud, (u64)pud.pud >> 32); + else + ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, + pud.pud); + + return ret; } -static inline void set_pud(pud_t *pudp, pud_t pudval) +static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { - PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, - pudval.pgd.pgd, pudval.pgd.pgd >> 32); + pgdval_t val = native_pgd_val(pgd); + + if (sizeof(pgdval_t) > sizeof(long)) + PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp, + val, (u64)val >> 32); + else + PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, + val); } -static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline void pgd_clear(pgd_t *pgdp) { - PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); + set_pgd(pgdp, __pgd(0)); } -static inline void pmd_clear(pmd_t *pmdp) +static inline void pud_clear(pud_t *pudp) { - PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); + set_pud(pudp, __pud(0)); } -#else /* !CONFIG_X86_PAE */ +#endif /* PAGETABLE_LEVELS == 4 */ -static inline pte_t __pte(unsigned long val) +#endif /* PAGETABLE_LEVELS >= 3 */ + +#ifdef CONFIG_X86_PAE +/* Special-case pte-setting operations for PAE, which can't update a + 64-bit pte atomically */ +static inline void set_pte_atomic(pte_t *ptep, pte_t pte) { - return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) }; + PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, + pte.pte, pte.pte >> 32); } -static inline pgd_t __pgd(unsigned long val) +static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) { - return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) }; + /* 5 arg words */ + pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); } -static inline unsigned long pte_val(pte_t x) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { - return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low); + PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); } -static inline unsigned long pgd_val(pgd_t x) +static inline void pmd_clear(pmd_t *pmdp) +{ + PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); +} +#else /* !CONFIG_X86_PAE */ +static inline void set_pte_atomic(pte_t *ptep, pte_t pte) { - return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd); + set_pte(ptep, pte); } -static inline void set_pte(pte_t *ptep, pte_t pteval) +static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) { - PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low); + set_pte(ptep, pte); } -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { - PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low); + set_pte_at(mm, addr, ptep, __pte(0)); } -static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) +static inline void pmd_clear(pmd_t *pmdp) { - PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd); + set_pmd(pmdp, __pmd(0)); } #endif /* CONFIG_X86_PAE */ @@ -1014,52 +1239,68 @@ struct paravirt_patch_site { extern struct paravirt_patch_site __parainstructions[], __parainstructions_end[]; +#ifdef CONFIG_X86_32 +#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" +#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" +#define PV_FLAGS_ARG "0" +#define PV_EXTRA_CLOBBERS +#define PV_VEXTRA_CLOBBERS +#else +/* We save some registers, but all of them, that's too much. We clobber all + * caller saved registers but the argument parameter */ +#define PV_SAVE_REGS "pushq %%rdi;" +#define PV_RESTORE_REGS "popq %%rdi;" +#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx" +#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx" +#define PV_FLAGS_ARG "D" +#endif + static inline unsigned long __raw_local_save_flags(void) { unsigned long f; - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" + asm volatile(paravirt_alt(PV_SAVE_REGS PARAVIRT_CALL - "popl %%edx; popl %%ecx") + PV_RESTORE_REGS) : "=a"(f) : paravirt_type(pv_irq_ops.save_fl), paravirt_clobber(CLBR_EAX) - : "memory", "cc"); + : "memory", "cc" PV_VEXTRA_CLOBBERS); return f; } static inline void raw_local_irq_restore(unsigned long f) { - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" + asm volatile(paravirt_alt(PV_SAVE_REGS PARAVIRT_CALL - "popl %%edx; popl %%ecx") + PV_RESTORE_REGS) : "=a"(f) - : "0"(f), + : PV_FLAGS_ARG(f), paravirt_type(pv_irq_ops.restore_fl), paravirt_clobber(CLBR_EAX) - : "memory", "cc"); + : "memory", "cc" PV_EXTRA_CLOBBERS); } static inline void raw_local_irq_disable(void) { - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" + asm volatile(paravirt_alt(PV_SAVE_REGS PARAVIRT_CALL - "popl %%edx; popl %%ecx") + PV_RESTORE_REGS) : : paravirt_type(pv_irq_ops.irq_disable), paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc"); + : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); } static inline void raw_local_irq_enable(void) { - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" + asm volatile(paravirt_alt(PV_SAVE_REGS PARAVIRT_CALL - "popl %%edx; popl %%ecx") + PV_RESTORE_REGS) : : paravirt_type(pv_irq_ops.irq_enable), paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc"); + : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); } static inline unsigned long __raw_local_irq_save(void) @@ -1071,27 +1312,6 @@ static inline unsigned long __raw_local_irq_save(void) return f; } -#define CLI_STRING \ - _paravirt_alt("pushl %%ecx; pushl %%edx;" \ - "call *%[paravirt_cli_opptr];" \ - "popl %%edx; popl %%ecx", \ - "%c[paravirt_cli_type]", "%c[paravirt_clobber]") - -#define STI_STRING \ - _paravirt_alt("pushl %%ecx; pushl %%edx;" \ - "call *%[paravirt_sti_opptr];" \ - "popl %%edx; popl %%ecx", \ - "%c[paravirt_sti_type]", "%c[paravirt_clobber]") - -#define CLI_STI_CLOBBERS , "%eax" -#define CLI_STI_INPUT_ARGS \ - , \ - [paravirt_cli_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_disable)), \ - [paravirt_cli_opptr] "m" (pv_irq_ops.irq_disable), \ - [paravirt_sti_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_enable)), \ - [paravirt_sti_opptr] "m" (pv_irq_ops.irq_enable), \ - paravirt_clobber(CLBR_EAX) - /* Make sure as little as possible of this mess escapes. */ #undef PARAVIRT_CALL #undef __PVOP_CALL @@ -1109,43 +1329,72 @@ static inline unsigned long __raw_local_irq_save(void) #else /* __ASSEMBLY__ */ -#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) - -#define PARA_SITE(ptype, clobbers, ops) \ +#define _PVSITE(ptype, clobbers, ops, word, algn) \ 771:; \ ops; \ 772:; \ .pushsection .parainstructions,"a"; \ - .long 771b; \ + .align algn; \ + word 771b; \ .byte ptype; \ .byte 772b-771b; \ .short clobbers; \ .popsection + +#ifdef CONFIG_X86_64 +#define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx +#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax +#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) +#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) +#else +#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx +#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax +#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) +#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) +#endif + #define INTERRUPT_RETURN \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ jmp *%cs:pv_cpu_ops+PV_CPU_iret) #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ - pushl %eax; pushl %ecx; pushl %edx; \ + PV_SAVE_REGS; \ call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ - popl %edx; popl %ecx; popl %eax) \ + PV_RESTORE_REGS;) \ #define ENABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ - pushl %eax; pushl %ecx; pushl %edx; \ + PV_SAVE_REGS; \ call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ - popl %edx; popl %ecx; popl %eax) + PV_RESTORE_REGS;) + +#define ENABLE_INTERRUPTS_SYSCALL_RET \ + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\ + CLBR_NONE, \ + jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret) -#define ENABLE_INTERRUPTS_SYSEXIT \ - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), CLBR_NONE,\ - jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_sysexit) +#ifdef CONFIG_X86_32 #define GET_CR0_INTO_EAX \ push %ecx; push %edx; \ call *pv_cpu_ops+PV_CPU_read_cr0; \ pop %edx; pop %ecx +#else +#define SWAPGS \ + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ + PV_SAVE_REGS; \ + call *pv_cpu_ops+PV_CPU_swapgs; \ + PV_RESTORE_REGS \ + ) + +#define GET_CR2_INTO_RCX \ + call *pv_mmu_ops+PV_MMU_read_cr2; \ + movq %rax, %rcx; \ + xorq %rax, %rax; + +#endif #endif /* __ASSEMBLY__ */ #endif /* CONFIG_PARAVIRT */ diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index e88361966347..c61190cb9e12 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h @@ -66,6 +66,7 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #ifdef CONFIG_PCI +extern void early_quirks(void); static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) @@ -73,9 +74,10 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, *strat = PCI_DMA_BURST_INFINITY; *strategy_parameter = ~0UL; } +#else +static inline void early_quirks(void) { } #endif - #endif /* __KERNEL__ */ #ifdef CONFIG_X86_32 @@ -90,6 +92,19 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, /* generic pci stuff */ #include <asm-generic/pci.h> +#ifdef CONFIG_NUMA +/* Returns the node based on pci bus */ +static inline int __pcibus_to_node(struct pci_bus *bus) +{ + struct pci_sysdata *sd = bus->sysdata; + return sd->node; +} + +static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) +{ + return node_to_cpumask(__pcibus_to_node(bus)); +} +#endif #endif diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index ef54226a9325..374690314539 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h @@ -26,7 +26,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int l extern void pci_iommu_alloc(void); -extern int iommu_setup(char *opt); /* The PCI address space does equal the physical memory * address space. The networking and block device layers use diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index 35962bbe5e72..c0305bff0f19 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h @@ -7,22 +7,22 @@ #include <linux/cache.h> #include <asm/page.h> -/* Per processor datastructure. %gs points to it while the kernel runs */ +/* Per processor datastructure. %gs points to it while the kernel runs */ struct x8664_pda { struct task_struct *pcurrent; /* 0 Current process */ unsigned long data_offset; /* 8 Per cpu data offset from linker address */ - unsigned long kernelstack; /* 16 top of kernel stack for current */ - unsigned long oldrsp; /* 24 user rsp for system call */ - int irqcount; /* 32 Irq nesting counter. Starts with -1 */ - int cpunumber; /* 36 Logical CPU number */ + unsigned long kernelstack; /* 16 top of kernel stack for current */ + unsigned long oldrsp; /* 24 user rsp for system call */ + int irqcount; /* 32 Irq nesting counter. Starts -1 */ + unsigned int cpunumber; /* 36 Logical CPU number */ #ifdef CONFIG_CC_STACKPROTECTOR unsigned long stack_canary; /* 40 stack canary value */ /* gcc-ABI: this canary MUST be at offset 40!!! */ #endif char *irqstackptr; - int nodenumber; /* number of current node */ + unsigned int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ short mmu_state; @@ -40,13 +40,14 @@ struct x8664_pda { extern struct x8664_pda *_cpu_pda[]; extern struct x8664_pda boot_cpu_pda[]; +extern void pda_init(int); #define cpu_pda(i) (_cpu_pda[i]) -/* +/* * There is no fast way to get the base address of the PDA, all the accesses * have to mention %fs/%gs. So it needs to be done this Torvaldian way. - */ + */ extern void __bad_pda_field(void) __attribute__((noreturn)); /* @@ -57,70 +58,70 @@ extern struct x8664_pda _proxy_pda; #define pda_offset(field) offsetof(struct x8664_pda, field) -#define pda_to_op(op,field,val) do { \ +#define pda_to_op(op, field, val) do { \ typedef typeof(_proxy_pda.field) T__; \ if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ switch (sizeof(_proxy_pda.field)) { \ case 2: \ - asm(op "w %1,%%gs:%c2" : \ + asm(op "w %1,%%gs:%c2" : \ "+m" (_proxy_pda.field) : \ "ri" ((T__)val), \ - "i"(pda_offset(field))); \ - break; \ + "i"(pda_offset(field))); \ + break; \ case 4: \ - asm(op "l %1,%%gs:%c2" : \ + asm(op "l %1,%%gs:%c2" : \ "+m" (_proxy_pda.field) : \ "ri" ((T__)val), \ - "i" (pda_offset(field))); \ + "i" (pda_offset(field))); \ break; \ case 8: \ - asm(op "q %1,%%gs:%c2": \ + asm(op "q %1,%%gs:%c2": \ "+m" (_proxy_pda.field) : \ "ri" ((T__)val), \ - "i"(pda_offset(field))); \ + "i"(pda_offset(field))); \ break; \ - default: \ + default: \ __bad_pda_field(); \ - } \ - } while (0) + } \ + } while (0) #define pda_from_op(op,field) ({ \ typeof(_proxy_pda.field) ret__; \ switch (sizeof(_proxy_pda.field)) { \ - case 2: \ - asm(op "w %%gs:%c1,%0" : \ + case 2: \ + asm(op "w %%gs:%c1,%0" : \ "=r" (ret__) : \ - "i" (pda_offset(field)), \ - "m" (_proxy_pda.field)); \ + "i" (pda_offset(field)), \ + "m" (_proxy_pda.field)); \ break; \ case 4: \ asm(op "l %%gs:%c1,%0": \ "=r" (ret__): \ - "i" (pda_offset(field)), \ - "m" (_proxy_pda.field)); \ + "i" (pda_offset(field)), \ + "m" (_proxy_pda.field)); \ break; \ - case 8: \ + case 8: \ asm(op "q %%gs:%c1,%0": \ "=r" (ret__) : \ - "i" (pda_offset(field)), \ - "m" (_proxy_pda.field)); \ + "i" (pda_offset(field)), \ + "m" (_proxy_pda.field)); \ break; \ - default: \ + default: \ __bad_pda_field(); \ } \ ret__; }) -#define read_pda(field) pda_from_op("mov",field) -#define write_pda(field,val) pda_to_op("mov",field,val) -#define add_pda(field,val) pda_to_op("add",field,val) -#define sub_pda(field,val) pda_to_op("sub",field,val) -#define or_pda(field,val) pda_to_op("or",field,val) +#define read_pda(field) pda_from_op("mov", field) +#define write_pda(field, val) pda_to_op("mov", field, val) +#define add_pda(field, val) pda_to_op("add", field, val) +#define sub_pda(field, val) pda_to_op("sub", field, val) +#define or_pda(field, val) pda_to_op("or", field, val) /* This is not atomic against other CPUs -- CPU preemption needs to be off */ -#define test_and_clear_bit_pda(bit,field) ({ \ +#define test_and_clear_bit_pda(bit, field) ({ \ int old__; \ asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ - : "=r" (old__), "+m" (_proxy_pda.field) \ + : "=r" (old__), "+m" (_proxy_pda.field) \ : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ old__; \ }) diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index a1aaad274cca..0dec00f27eb4 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h @@ -1,5 +1,142 @@ -#ifdef CONFIG_X86_32 -# include "percpu_32.h" -#else -# include "percpu_64.h" +#ifndef _ASM_X86_PERCPU_H_ +#define _ASM_X86_PERCPU_H_ + +#ifdef CONFIG_X86_64 +#include <linux/compiler.h> + +/* Same as asm-generic/percpu.h, except that we store the per cpu offset + in the PDA. Longer term the PDA and every per cpu variable + should be just put into a single section and referenced directly + from %gs */ + +#ifdef CONFIG_SMP +#include <asm/pda.h> + +#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) +#define __my_cpu_offset read_pda(data_offset) + +#define per_cpu_offset(x) (__per_cpu_offset(x)) + #endif +#include <asm-generic/percpu.h> + +DECLARE_PER_CPU(struct x8664_pda, pda); + +#else /* CONFIG_X86_64 */ + +#ifdef __ASSEMBLY__ + +/* + * PER_CPU finds an address of a per-cpu variable. + * + * Args: + * var - variable name + * reg - 32bit register + * + * The resulting address is stored in the "reg" argument. + * + * Example: + * PER_CPU(cpu_gdt_descr, %ebx) + */ +#ifdef CONFIG_SMP +#define PER_CPU(var, reg) \ + movl %fs:per_cpu__##this_cpu_off, reg; \ + lea per_cpu__##var(reg), reg +#define PER_CPU_VAR(var) %fs:per_cpu__##var +#else /* ! SMP */ +#define PER_CPU(var, reg) \ + movl $per_cpu__##var, reg +#define PER_CPU_VAR(var) per_cpu__##var +#endif /* SMP */ + +#else /* ...!ASSEMBLY */ + +/* + * PER_CPU finds an address of a per-cpu variable. + * + * Args: + * var - variable name + * cpu - 32bit register containing the current CPU number + * + * The resulting address is stored in the "cpu" argument. + * + * Example: + * PER_CPU(cpu_gdt_descr, %ebx) + */ +#ifdef CONFIG_SMP + +#define __my_cpu_offset x86_read_percpu(this_cpu_off) + +/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ +#define __percpu_seg "%%fs:" + +#else /* !SMP */ + +#define __percpu_seg "" + +#endif /* SMP */ + +#include <asm-generic/percpu.h> + +/* We can use this directly for local CPU (faster). */ +DECLARE_PER_CPU(unsigned long, this_cpu_off); + +/* For arch-specific code, we can use direct single-insn ops (they + * don't give an lvalue though). */ +extern void __bad_percpu_size(void); + +#define percpu_to_op(op,var,val) \ + do { \ + typedef typeof(var) T__; \ + if (0) { T__ tmp__; tmp__ = (val); } \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b %1,"__percpu_seg"%0" \ + : "+m" (var) \ + :"ri" ((T__)val)); \ + break; \ + case 2: \ + asm(op "w %1,"__percpu_seg"%0" \ + : "+m" (var) \ + :"ri" ((T__)val)); \ + break; \ + case 4: \ + asm(op "l %1,"__percpu_seg"%0" \ + : "+m" (var) \ + :"ri" ((T__)val)); \ + break; \ + default: __bad_percpu_size(); \ + } \ + } while (0) + +#define percpu_from_op(op,var) \ + ({ \ + typeof(var) ret__; \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + case 2: \ + asm(op "w "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + case 4: \ + asm(op "l "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + default: __bad_percpu_size(); \ + } \ + ret__; }) + +#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) +#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) +#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) +#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) +#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) +#endif /* !__ASSEMBLY__ */ +#endif /* !CONFIG_X86_64 */ +#endif /* _ASM_X86_PERCPU_H_ */ diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h deleted file mode 100644 index a7ebd436f3cc..000000000000 --- a/include/asm-x86/percpu_32.h +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef __ARCH_I386_PERCPU__ -#define __ARCH_I386_PERCPU__ - -#ifdef __ASSEMBLY__ - -/* - * PER_CPU finds an address of a per-cpu variable. - * - * Args: - * var - variable name - * reg - 32bit register - * - * The resulting address is stored in the "reg" argument. - * - * Example: - * PER_CPU(cpu_gdt_descr, %ebx) - */ -#ifdef CONFIG_SMP -#define PER_CPU(var, reg) \ - movl %fs:per_cpu__##this_cpu_off, reg; \ - lea per_cpu__##var(reg), reg -#define PER_CPU_VAR(var) %fs:per_cpu__##var -#else /* ! SMP */ -#define PER_CPU(var, reg) \ - movl $per_cpu__##var, reg -#define PER_CPU_VAR(var) per_cpu__##var -#endif /* SMP */ - -#else /* ...!ASSEMBLY */ - -/* - * PER_CPU finds an address of a per-cpu variable. - * - * Args: - * var - variable name - * cpu - 32bit register containing the current CPU number - * - * The resulting address is stored in the "cpu" argument. - * - * Example: - * PER_CPU(cpu_gdt_descr, %ebx) - */ -#ifdef CONFIG_SMP -/* Same as generic implementation except for optimized local access. */ -#define __GENERIC_PER_CPU - -/* This is used for other cpus to find our section. */ -extern unsigned long __per_cpu_offset[]; - -#define per_cpu_offset(x) (__per_cpu_offset[x]) - -/* Separate out the type, so (int[3], foo) works. */ -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - -/* We can use this directly for local CPU (faster). */ -DECLARE_PER_CPU(unsigned long, this_cpu_off); - -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) (*({ \ - extern int simple_indentifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) - -#define __raw_get_cpu_var(var) (*({ \ - extern int simple_indentifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \ -})) - -#define __get_cpu_var(var) __raw_get_cpu_var(var) - -/* A macro to avoid #include hell... */ -#define percpu_modcopy(pcpudst, src, size) \ -do { \ - unsigned int __i; \ - for_each_possible_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ -} while (0) - -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - -/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ -#define __percpu_seg "%%fs:" -#else /* !SMP */ -#include <asm-generic/percpu.h> -#define __percpu_seg "" -#endif /* SMP */ - -/* For arch-specific code, we can use direct single-insn ops (they - * don't give an lvalue though). */ -extern void __bad_percpu_size(void); - -#define percpu_to_op(op,var,val) \ - do { \ - typedef typeof(var) T__; \ - if (0) { T__ tmp__; tmp__ = (val); } \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 2: \ - asm(op "w %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 4: \ - asm(op "l %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - } while (0) - -#define percpu_from_op(op,var) \ - ({ \ - typeof(var) ret__; \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 2: \ - asm(op "w "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 4: \ - asm(op "l "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - ret__; }) - -#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) -#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) -#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) -#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) -#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) -#endif /* !__ASSEMBLY__ */ - -#endif /* __ARCH_I386_PERCPU__ */ diff --git a/include/asm-x86/percpu_64.h b/include/asm-x86/percpu_64.h deleted file mode 100644 index 5abd48270101..000000000000 --- a/include/asm-x86/percpu_64.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _ASM_X8664_PERCPU_H_ -#define _ASM_X8664_PERCPU_H_ -#include <linux/compiler.h> - -/* Same as asm-generic/percpu.h, except that we store the per cpu offset - in the PDA. Longer term the PDA and every per cpu variable - should be just put into a single section and referenced directly - from %gs */ - -#ifdef CONFIG_SMP - -#include <asm/pda.h> - -#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) -#define __my_cpu_offset() read_pda(data_offset) - -#define per_cpu_offset(x) (__per_cpu_offset(x)) - -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_internodealigned_in_smp - -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) (*({ \ - extern int simple_identifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)); })) -#define __get_cpu_var(var) (*({ \ - extern int simple_identifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); })) -#define __raw_get_cpu_var(var) (*({ \ - extern int simple_identifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); })) - -/* A macro to avoid #include hell... */ -#define percpu_modcopy(pcpudst, src, size) \ -do { \ - unsigned int __i; \ - for_each_possible_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ -} while (0) - -extern void setup_per_cpu_areas(void); - -#else /* ! SMP */ - -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) -#define __get_cpu_var(var) per_cpu__##var -#define __raw_get_cpu_var(var) per_cpu__##var - -#endif /* SMP */ - -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name - -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - -#endif /* _ASM_X8664_PERCPU_H_ */ diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h index f2fc33ceb9f2..10c2b452e64c 100644 --- a/include/asm-x86/pgalloc_32.h +++ b/include/asm-x86/pgalloc_32.h @@ -3,31 +3,33 @@ #include <linux/threads.h> #include <linux/mm.h> /* for struct page */ +#include <asm/tlb.h> +#include <asm-generic/tlb.h> #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #else #define paravirt_alloc_pt(mm, pfn) do { } while (0) -#define paravirt_alloc_pd(pfn) do { } while (0) -#define paravirt_alloc_pd(pfn) do { } while (0) +#define paravirt_alloc_pd(mm, pfn) do { } while (0) #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) #define paravirt_release_pt(pfn) do { } while (0) #define paravirt_release_pd(pfn) do { } while (0) #endif -#define pmd_populate_kernel(mm, pmd, pte) \ -do { \ - paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \ - set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ -} while (0) +static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) +{ + unsigned long pfn = page_to_pfn(pte); -#define pmd_populate(mm, pmd, pte) \ -do { \ - paravirt_alloc_pt(mm, page_to_pfn(pte)); \ - set_pmd(pmd, __pmd(_PAGE_TABLE + \ - ((unsigned long long)page_to_pfn(pte) << \ - (unsigned long long) PAGE_SHIFT))); \ -} while (0) + paravirt_alloc_pt(mm, pfn); + set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); +} /* * Allocate and free page tables. @@ -49,20 +51,55 @@ static inline void pte_free(struct page *pte) } -#define __pte_free_tlb(tlb,pte) \ -do { \ - paravirt_release_pt(page_to_pfn(pte)); \ - tlb_remove_page((tlb),(pte)); \ -} while (0) +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) +{ + paravirt_release_pt(page_to_pfn(pte)); + tlb_remove_page(tlb, pte); +} #ifdef CONFIG_X86_PAE /* * In the PAE case we free the pmds as part of the pgd. */ -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) -#define pud_populate(mm, pmd, pte) BUG() -#endif +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); +} + +static inline void pmd_free(pmd_t *pmd) +{ + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + free_page((unsigned long)pmd); +} + +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) +{ + /* This is called just after the pmd has been detached from + the pgd, which requires a full tlb flush to be recognized + by the CPU. Rather than incurring multiple tlb flushes + while the address space is being pulled down, make the tlb + gathering machinery do a full flush when we're done. */ + tlb->fullmm = 1; + + paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); + tlb_remove_page(tlb, virt_to_page(pmd)); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) +{ + paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT); + + /* Note: almost everything apart from _PAGE_PRESENT is + reserved at the pmd (PDPT) level. */ + set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); + + /* + * Pentium-II erratum A13: in PAE mode we explicitly have to flush + * the TLB via cr3 if the top-level pgd is changed... + */ + if (mm == current->active_mm) + write_cr3(read_cr3()); +} +#endif /* CONFIG_X86_PAE */ #endif /* _I386_PGALLOC_H */ diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 84b03cf56a79..701404fab308 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h @@ -15,30 +15,31 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) { *ptep = pte; } -static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep , pte_t pte) -{ - native_set_pte(ptep, pte); -} + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; } -#ifndef CONFIG_PARAVIRT -#define set_pte(pteptr, pteval) native_set_pte(pteptr, pteval) -#define set_pte_at(mm,addr,ptep,pteval) native_set_pte_at(mm, addr, ptep, pteval) -#define set_pmd(pmdptr, pmdval) native_set_pmd(pmdptr, pmdval) -#endif -#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) -#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) +static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) +{ + native_set_pte(ptep, pte); +} -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + native_set_pte(ptep, pte); +} + +static inline void native_pmd_clear(pmd_t *pmdp) +{ + native_set_pmd(pmdp, __pmd(0)); +} static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) { - *xp = __pte(0); + *xp = native_make_pte(0); } #ifdef CONFIG_SMP @@ -53,16 +54,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) -#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) -#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) - -/* - * All present pages are kernel-executable: - */ -static inline int pte_exec_kernel(pte_t pte) -{ - return 1; -} /* * Bits 0, 6 and 7 are taken, split up the 29 bits of offset @@ -74,13 +65,13 @@ static inline int pte_exec_kernel(pte_t pte) ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) #define pgoff_to_pte(off) \ - ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) + ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) #endif /* _I386_PGTABLE_2LEVEL_H */ diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 948a33414118..a195c3e757b9 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -15,16 +15,18 @@ #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) -#define pud_none(pud) 0 -#define pud_bad(pud) 0 -#define pud_present(pud) 1 -/* - * All present pages with !NX bit are kernel-executable: - */ -static inline int pte_exec_kernel(pte_t pte) +static inline int pud_none(pud_t pud) +{ + return pud_val(pud) == 0; +} +static inline int pud_bad(pud_t pud) +{ + return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; +} +static inline int pud_present(pud_t pud) { - return !(pte_val(pte) & _PAGE_NX); + return pud_val(pud) & _PAGE_PRESENT; } /* Rules for using set_pte: the pte being assigned *must* be @@ -39,11 +41,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) smp_wmb(); ptep->pte_low = pte.pte_low; } -static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep , pte_t pte) -{ - native_set_pte(ptep, pte); -} /* * Since this is only called on user PTEs, and the page fault handler @@ -71,7 +68,7 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) } static inline void native_set_pud(pud_t *pudp, pud_t pud) { - *pudp = pud; + set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); } /* @@ -94,24 +91,29 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } -#ifndef CONFIG_PARAVIRT -#define set_pte(ptep, pte) native_set_pte(ptep, pte) -#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) -#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte) -#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte) -#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) -#define set_pud(pudp, pud) native_set_pud(pudp, pud) -#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) -#define pmd_clear(pmd) native_pmd_clear(pmd) -#endif - -/* - * Pentium-II erratum A13: in PAE mode we explicitly have to flush - * the TLB via cr3 if the top-level pgd is changed... - * We do not let the generic code free and clear pgd entries due to - * this erratum. - */ -static inline void pud_clear (pud_t * pud) { } +static inline void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud(0)); + + /* + * In principle we need to do a cr3 reload here to make sure + * the processor recognizes the changed pgd. In practice, all + * the places where pud_clear() gets called are followed by + * full tlb flushes anyway, so we can defer the cost here. + * + * Specifically: + * + * mm/memory.c:free_pmd_range() - immediately after the + * pud_clear() it does a pmd_free_tlb(). We change the + * mmu_gather structure to do a full tlb flush (which has the + * effect of reloading cr3) when the pagetable free is + * complete. + * + * arch/x86/mm/hugetlbpage.c:huge_pmd_unshare() - the call to + * this is followed by a flush_tlb_range, which on x86 does a + * full tlb flush. + */ +} #define pud_page(pud) \ ((struct page *) __va(pud_val(pud) & PAGE_MASK)) @@ -155,21 +157,7 @@ static inline int pte_none(pte_t pte) static inline unsigned long pte_pfn(pte_t pte) { - return pte_val(pte) >> PAGE_SHIFT; -} - -extern unsigned long long __supported_pte_mask; - -static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) -{ - return __pte((((unsigned long long)page_nr << PAGE_SHIFT) | - pgprot_val(pgprot)) & __supported_pte_mask); -} - -static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) -{ - return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | - pgprot_val(pgprot)) & __supported_pte_mask); + return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT; } /* @@ -177,7 +165,7 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) * put the 32 bits of offset into the high part. */ #define pte_to_pgoff(pte) ((pte).pte_high) -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) +#define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ @@ -185,8 +173,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) -#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) - -#define __pmd_free_tlb(tlb, x) do { } while (0) +#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) #endif /* _I386_PGTABLE_3LEVEL_H */ diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 1039140652af..cd2524f07452 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -1,5 +1,364 @@ +#ifndef _ASM_X86_PGTABLE_H +#define _ASM_X86_PGTABLE_H + +#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) +#define FIRST_USER_ADDRESS 0 + +#define _PAGE_BIT_PRESENT 0 +#define _PAGE_BIT_RW 1 +#define _PAGE_BIT_USER 2 +#define _PAGE_BIT_PWT 3 +#define _PAGE_BIT_PCD 4 +#define _PAGE_BIT_ACCESSED 5 +#define _PAGE_BIT_DIRTY 6 +#define _PAGE_BIT_FILE 6 +#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ +#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ +#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ +#define _PAGE_BIT_UNUSED2 10 +#define _PAGE_BIT_UNUSED3 11 +#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + +/* + * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a + * sign-extended value on 32-bit with all 1's in the upper word, + * which preserves the upper pte values on 64-bit ptes: + */ +#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) +#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) +#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) +#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) +#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) +#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) +#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) +#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ +#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ +#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) +#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) +#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) + +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) +#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) +#else +#define _PAGE_NX 0 +#endif + +/* If _PAGE_PRESENT is clear, we use these: */ +#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ +#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; + pte_present gives true */ + +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) + +#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_COPY PAGE_COPY_NOEXEC +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) + +#ifdef CONFIG_X86_32 +#define _PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) +#define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX) + +#ifndef __ASSEMBLY__ +extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; +#endif /* __ASSEMBLY__ */ +#else +#define __PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) +#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) +#endif + +#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) +#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) +#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) +#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) + +#ifdef CONFIG_X86_32 +# define MAKE_GLOBAL(x) __pgprot((x)) +#else +# define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) +#endif + +#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) +#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) +#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) +#define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) +#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) +#define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) +#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) +#define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC) +#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) +#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) + +/* xwr */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY_EXEC +#define __P101 PAGE_READONLY_EXEC +#define __P110 PAGE_COPY_EXEC +#define __P111 PAGE_COPY_EXEC + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY_EXEC +#define __S101 PAGE_READONLY_EXEC +#define __S110 PAGE_SHARED_EXEC +#define __S111 PAGE_SHARED_EXEC + +#ifndef __ASSEMBLY__ + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +extern spinlock_t pgd_lock; +extern struct list_head pgd_list; + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } +static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; } +static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } + +static inline int pmd_large(pmd_t pte) { + return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == + (_PAGE_PSE|_PAGE_PRESENT); +} + +static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); } +static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); } +static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); } +static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); } +static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } +static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } +static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } +static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } +static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); } +static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); } +static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } + +extern pteval_t __supported_pte_mask; + +static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) +{ + return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | + pgprot_val(pgprot)) & __supported_pte_mask); +} + +static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) +{ + return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | + pgprot_val(pgprot)) & __supported_pte_mask); +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pteval_t val = pte_val(pte); + + /* + * Chop off the NX bit (if present), and add the NX portion of + * the newprot (if present): + */ + val &= _PAGE_CHG_MASK & ~_PAGE_NX; + val |= pgprot_val(newprot) & __supported_pte_mask; + + return __pte(val); +} + +#define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX)) + +#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else /* !CONFIG_PARAVIRT */ +#define set_pte(ptep, pte) native_set_pte(ptep, pte) +#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) + +#define set_pte_present(mm, addr, ptep, pte) \ + native_set_pte_present(mm, addr, ptep, pte) +#define set_pte_atomic(ptep, pte) \ + native_set_pte_atomic(ptep, pte) + +#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) + +#ifndef __PAGETABLE_PUD_FOLDED +#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) +#define pgd_clear(pgd) native_pgd_clear(pgd) +#endif + +#ifndef set_pud +# define set_pud(pudp, pud) native_set_pud(pudp, pud) +#endif + +#ifndef __PAGETABLE_PMD_FOLDED +#define pud_clear(pud) native_pud_clear(pud) +#endif + +#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) +#define pmd_clear(pmd) native_pmd_clear(pmd) + +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) +#endif /* CONFIG_PARAVIRT */ + +#endif /* __ASSEMBLY__ */ + #ifdef CONFIG_X86_32 # include "pgtable_32.h" #else # include "pgtable_64.h" #endif + +#ifndef __ASSEMBLY__ + +enum { + PG_LEVEL_NONE, + PG_LEVEL_4K, + PG_LEVEL_2M, + PG_LEVEL_1G, +}; + +/* + * Helper function that returns the kernel pagetable entry controlling + * the virtual address 'address'. NULL means no pagetable entry present. + * NOTE: the return type is pte_t but if the pmd is PSE then we return it + * as a pte too. + */ +extern pte_t *lookup_address(unsigned long address, int *level); + +/* local pte updates need not use xchg for locking */ +static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) +{ + pte_t res = *ptep; + + /* Pure native function needs no input for mm, addr */ + native_pte_clear(NULL, 0, ptep); + return res; +} + +static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep , pte_t pte) +{ + native_set_pte(ptep, pte); +} + +#ifndef CONFIG_PARAVIRT +/* + * Rules for using pte_update - it must be called after any PTE update which + * has not been done using the set_pte / clear_pte interfaces. It is used by + * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE + * updates should either be sets, clears, or set_pte_atomic for P->P + * transitions, which means this hook should only be called for user PTEs. + * This hook implies a P->P protection or access change has taken place, which + * requires a subsequent TLB flush. The notification can optionally be delayed + * until the TLB flush event by using the pte_update_defer form of the + * interface, but care must be taken to assure that the flush happens while + * still holding the same page table lock so that the shadow and primary pages + * do not become out of sync on SMP. + */ +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) +#endif + +/* + * We only update the dirty/accessed state if we set + * the dirty bit by hand in the kernel, since the hardware + * will do the accessed bit for us, and we don't want to + * race with other CPU's that might be updating the dirty + * bit at the same time. + */ +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ +({ \ + int __changed = !pte_same(*(ptep), entry); \ + if (__changed && dirty) { \ + *ptep = entry; \ + pte_update_defer((vma)->vm_mm, (address), (ptep)); \ + flush_tlb_page(vma, address); \ + } \ + __changed; \ +}) + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ + int __ret = 0; \ + if (pte_young(*(ptep))) \ + __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ + &(ptep)->pte); \ + if (__ret) \ + pte_update((vma)->vm_mm, addr, ptep); \ + __ret; \ +}) + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young(vma, address, ptep) \ +({ \ + int __young; \ + __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ + if (__young) \ + flush_tlb_page(vma, address); \ + __young; \ +}) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_t pte = native_ptep_get_and_clear(ptep); + pte_update(mm, addr, ptep); + return pte; +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +{ + pte_t pte; + if (full) { + /* + * Full address destruction in progress; paravirt does not + * care about updates and native needs no locking + */ + pte = native_local_ptep_get_and_clear(ptep); + } else { + pte = ptep_get_and_clear(mm, addr, ptep); + } + return pte; +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); + pte_update(mm, addr, ptep); +} + +#include <asm-generic/pgtable.h> +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_X86_PGTABLE_H */ diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index ed3e70d8d04b..21e70fbf1dae 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -25,20 +25,11 @@ struct mm_struct; struct vm_area_struct; -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) -extern unsigned long empty_zero_page[1024]; extern pgd_t swapper_pg_dir[1024]; extern struct kmem_cache *pmd_cache; -extern spinlock_t pgd_lock; -extern struct page *pgd_list; void check_pgt_cache(void); -void pmd_ctor(struct kmem_cache *, void *); -void pgtable_cache_init(void); +static inline void pgtable_cache_init(void) {} void paging_init(void); @@ -58,9 +49,6 @@ void paging_init(void); #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) -#define FIRST_USER_ADDRESS 0 - #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) @@ -85,113 +73,6 @@ void paging_init(void); #endif /* - * _PAGE_PSE set in the page directory entry just means that - * the page directory entry points directly to a 4MB-aligned block of - * memory. - */ -#define _PAGE_BIT_PRESENT 0 -#define _PAGE_BIT_RW 1 -#define _PAGE_BIT_USER 2 -#define _PAGE_BIT_PWT 3 -#define _PAGE_BIT_PCD 4 -#define _PAGE_BIT_ACCESSED 5 -#define _PAGE_BIT_DIRTY 6 -#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ -#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ -#define _PAGE_BIT_UNUSED2 10 -#define _PAGE_BIT_UNUSED3 11 -#define _PAGE_BIT_NX 63 - -#define _PAGE_PRESENT 0x001 -#define _PAGE_RW 0x002 -#define _PAGE_USER 0x004 -#define _PAGE_PWT 0x008 -#define _PAGE_PCD 0x010 -#define _PAGE_ACCESSED 0x020 -#define _PAGE_DIRTY 0x040 -#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ -#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ -#define _PAGE_UNUSED1 0x200 /* available for programmer */ -#define _PAGE_UNUSED2 0x400 -#define _PAGE_UNUSED3 0x800 - -/* If _PAGE_PRESENT is clear, we use these: */ -#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ -#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; - pte_present gives true */ -#ifdef CONFIG_X86_PAE -#define _PAGE_NX (1ULL<<_PAGE_BIT_NX) -#else -#define _PAGE_NX 0 -#endif - -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) - -#define PAGE_NONE \ - __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) -#define PAGE_SHARED \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) - -#define PAGE_SHARED_EXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY_NOEXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_COPY_EXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY \ - PAGE_COPY_NOEXEC -#define PAGE_READONLY \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_READONLY_EXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) - -#define _PAGE_KERNEL \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) -#define _PAGE_KERNEL_EXEC \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) - -extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; -#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) -#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) -#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) -#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) -#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) - -#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) -#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) -#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) -#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) -#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) -#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) - -/* - * The i386 can't do page protection for execute, and considers that - * the same are read. Also, write permissions imply read permissions. - * This is the closest we can get.. - */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_EXEC -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - -/* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are * done without a 'access_ok(VERIFY_WRITE,..)' @@ -211,133 +92,12 @@ extern unsigned long pg0[]; #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } -static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } -static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } - -/* - * The following only works if pte_present() is not true. - */ -static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } - -static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } -static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } -static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } -static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } -static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } - #ifdef CONFIG_X86_PAE # include <asm/pgtable-3level.h> #else # include <asm/pgtable-2level.h> #endif -#ifndef CONFIG_PARAVIRT -/* - * Rules for using pte_update - it must be called after any PTE update which - * has not been done using the set_pte / clear_pte interfaces. It is used by - * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE - * updates should either be sets, clears, or set_pte_atomic for P->P - * transitions, which means this hook should only be called for user PTEs. - * This hook implies a P->P protection or access change has taken place, which - * requires a subsequent TLB flush. The notification can optionally be delayed - * until the TLB flush event by using the pte_update_defer form of the - * interface, but care must be taken to assure that the flush happens while - * still holding the same page table lock so that the shadow and primary pages - * do not become out of sync on SMP. - */ -#define pte_update(mm, addr, ptep) do { } while (0) -#define pte_update_defer(mm, addr, ptep) do { } while (0) -#endif - -/* local pte updates need not use xchg for locking */ -static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) -{ - pte_t res = *ptep; - - /* Pure native function needs no input for mm, addr */ - native_pte_clear(NULL, 0, ptep); - return res; -} - -/* - * We only update the dirty/accessed state if we set - * the dirty bit by hand in the kernel, since the hardware - * will do the accessed bit for us, and we don't want to - * race with other CPU's that might be updating the dirty - * bit at the same time. - */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ -({ \ - int __changed = !pte_same(*(ptep), entry); \ - if (__changed && dirty) { \ - (ptep)->pte_low = (entry).pte_low; \ - pte_update_defer((vma)->vm_mm, (address), (ptep)); \ - flush_tlb_page(vma, address); \ - } \ - __changed; \ -}) - -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ - int __ret = 0; \ - if (pte_young(*(ptep))) \ - __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ - &(ptep)->pte_low); \ - if (__ret) \ - pte_update((vma)->vm_mm, addr, ptep); \ - __ret; \ -}) - -#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -#define ptep_clear_flush_young(vma, address, ptep) \ -({ \ - int __young; \ - __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ - if (__young) \ - flush_tlb_page(vma, address); \ - __young; \ -}) - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = native_ptep_get_and_clear(ptep); - pte_update(mm, addr, ptep); - return pte; -} - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) -{ - pte_t pte; - if (full) { - /* - * Full address destruction in progress; paravirt does not - * care about updates and native needs no locking - */ - pte = native_local_ptep_get_and_clear(ptep); - } else { - pte = ptep_get_and_clear(mm, addr, ptep); - } - return pte; -} - -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte_low); - pte_update(mm, addr, ptep); -} - /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * @@ -367,25 +127,6 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - pte.pte_low &= _PAGE_CHG_MASK; - pte.pte_low |= pgprot_val(newprot); -#ifdef CONFIG_X86_PAE - /* - * Chop off the NX bit (if present), and add the NX portion of - * the newprot (if present): - */ - pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); - pte.pte_high |= (pgprot_val(newprot) >> 32) & \ - (__supported_pte_mask >> 32); -#endif - return pte; -} - -#define pmd_large(pmd) \ -((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) - /* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] * @@ -432,26 +173,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pmd_page_vaddr(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) -/* - * Helper function that returns the kernel pagetable entry controlling - * the virtual address 'address'. NULL means no pagetable entry present. - * NOTE: the return type is pte_t but if the pmd is PSE then we return it - * as a pte too. - */ -extern pte_t *lookup_address(unsigned long address); - -/* - * Make a given kernel text page executable/non-executable. - * Returns the previous executability setting of that page (which - * is used to restore the previous state). Used by the SMP bootup code. - * NOTE: this is an __init function for security reasons. - */ -#ifdef CONFIG_X86_PAE - extern int set_kernel_exec(unsigned long vaddr, int enable); -#else - static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} -#endif - #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) @@ -497,13 +218,17 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) #endif /* !__ASSEMBLY__ */ +/* + * kern_addr_valid() is (1) for FLATMEM and (0) for + * SPARSEMEM and DISCONTIGMEM + */ #ifdef CONFIG_FLATMEM #define kern_addr_valid(addr) (1) -#endif /* CONFIG_FLATMEM */ +#else +#define kern_addr_valid(kaddr) (0) +#endif #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -#include <asm-generic/pgtable.h> - #endif /* _I386_PGTABLE_H */ diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 9b0ff477b39e..6e615a103c2f 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -17,22 +17,16 @@ extern pud_t level3_kernel_pgt[512]; extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pgd_t init_level4_pgt[]; -extern unsigned long __supported_pte_mask; #define swapper_pg_dir init_level4_pgt extern void paging_init(void); extern void clear_kernel_mapping(unsigned long addr, unsigned long size); -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) - #endif /* !__ASSEMBLY__ */ +#define SHARED_KERNEL_PMD 1 + /* * PGDIR_SHIFT determines what a top-level page table entry can map */ @@ -71,57 +65,68 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; #define pgd_none(x) (!pgd_val(x)) #define pud_none(x) (!pud_val(x)) -static inline void set_pte(pte_t *dst, pte_t val) +struct mm_struct; + +static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + *ptep = native_make_pte(0); +} + +static inline void native_set_pte(pte_t *ptep, pte_t pte) { - pte_val(*dst) = pte_val(val); -} -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + *ptep = pte; +} -static inline void set_pmd(pmd_t *dst, pmd_t val) +static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) { - pmd_val(*dst) = pmd_val(val); -} + native_set_pte(ptep, pte); +} -static inline void set_pud(pud_t *dst, pud_t val) +static inline pte_t native_ptep_get_and_clear(pte_t *xp) { - pud_val(*dst) = pud_val(val); +#ifdef CONFIG_SMP + return native_make_pte(xchg(&xp->pte, 0)); +#else + /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */ + pte_t ret = *xp; + native_pte_clear(NULL, 0, xp); + return ret; +#endif } -static inline void pud_clear (pud_t *pud) +static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { - set_pud(pud, __pud(0)); + *pmdp = pmd; } -static inline void set_pgd(pgd_t *dst, pgd_t val) +static inline void native_pmd_clear(pmd_t *pmd) { - pgd_val(*dst) = pgd_val(val); -} + native_set_pmd(pmd, native_make_pmd(0)); +} -static inline void pgd_clear (pgd_t * pgd) +static inline void native_set_pud(pud_t *pudp, pud_t pud) { - set_pgd(pgd, __pgd(0)); + *pudp = pud; } -#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) +static inline void native_pud_clear(pud_t *pud) +{ + native_set_pud(pud, native_make_pud(0)); +} -struct mm_struct; +static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + *pgdp = pgd; +} -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +static inline void native_pgd_clear(pgd_t * pgd) { - pte_t pte; - if (full) { - pte = *ptep; - *ptep = __pte(0); - } else { - pte = ptep_get_and_clear(mm, addr, ptep); - } - return pte; + native_set_pgd(pgd, native_make_pgd(0)); } #define pte_same(a, b) ((a).pte == (b).pte) -#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) - #endif /* !__ASSEMBLY__ */ #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) @@ -131,8 +136,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) -#define FIRST_USER_ADDRESS 0 #define MAXMEM _AC(0x3fffffffffff, UL) #define VMALLOC_START _AC(0xffffc20000000000, UL) @@ -142,91 +145,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long #define MODULES_END _AC(0xfffffffffff00000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) -#define _PAGE_BIT_PRESENT 0 -#define _PAGE_BIT_RW 1 -#define _PAGE_BIT_USER 2 -#define _PAGE_BIT_PWT 3 -#define _PAGE_BIT_PCD 4 -#define _PAGE_BIT_ACCESSED 5 -#define _PAGE_BIT_DIRTY 6 -#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ -#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ -#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ - -#define _PAGE_PRESENT 0x001 -#define _PAGE_RW 0x002 -#define _PAGE_USER 0x004 -#define _PAGE_PWT 0x008 -#define _PAGE_PCD 0x010 -#define _PAGE_ACCESSED 0x020 -#define _PAGE_DIRTY 0x040 -#define _PAGE_PSE 0x080 /* 2MB page */ -#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ -#define _PAGE_GLOBAL 0x100 /* Global TLB entry */ - -#define _PAGE_PROTNONE 0x080 /* If not present */ -#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX) - -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) - -#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) - -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_COPY PAGE_COPY_NOEXEC -#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) -#define __PAGE_KERNEL \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) -#define __PAGE_KERNEL_EXEC \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) -#define __PAGE_KERNEL_NOCACHE \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX) -#define __PAGE_KERNEL_RO \ - (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) -#define __PAGE_KERNEL_VSYSCALL \ - (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) -#define __PAGE_KERNEL_VSYSCALL_NOCACHE \ - (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD) -#define __PAGE_KERNEL_LARGE \ - (__PAGE_KERNEL | _PAGE_PSE) -#define __PAGE_KERNEL_LARGE_EXEC \ - (__PAGE_KERNEL_EXEC | _PAGE_PSE) - -#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) - -#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) -#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) -#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) -#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) -#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL) -#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) -#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) -#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) - -/* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_EXEC -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - #ifndef __ASSEMBLY__ static inline unsigned long pgd_bad(pgd_t pgd) @@ -246,66 +164,16 @@ static inline unsigned long pmd_bad(pmd_t pmd) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) -#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this - right? */ +#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) -static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) -{ - pte_t pte; - pte_val(pte) = (page_nr << PAGE_SHIFT); - pte_val(pte) |= pgprot_val(pgprot); - pte_val(pte) &= __supported_pte_mask; - return pte; -} - -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) -static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } -static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } - -static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } -static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } -static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } -static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; } -static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } -static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } -static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } -static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; } - -struct vm_area_struct; - -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); -} - -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte); -} - /* * Macro to mark a page protection value as "uncacheable". */ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) -static inline int pmd_large(pmd_t pte) { - return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; -} - /* * Conversion functions: convert a page and protection to a page entry, @@ -340,29 +208,18 @@ static inline int pmd_large(pmd_t pte) { pmd_index(address)) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) -#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) +#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE }) #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT /* PTE - Level 1 access. */ /* page, protection -> pte */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) -/* Change flags of a PTE */ -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - pte_val(pte) &= _PAGE_CHG_MASK; - pte_val(pte) |= pgprot_val(newprot); - pte_val(pte) &= __supported_pte_mask; - return pte; -} - #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ @@ -376,40 +233,20 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define update_mmu_cache(vma,address,pte) do { } while (0) -/* We only update the dirty/accessed state if we set - * the dirty bit by hand in the kernel, since the hardware - * will do the accessed bit for us, and we don't want to - * race with other CPU's that might be updating the dirty - * bit at the same time. */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ -({ \ - int __changed = !pte_same(*(__ptep), __entry); \ - if (__changed && __dirty) { \ - set_pte(__ptep, __entry); \ - flush_tlb_page(__vma, __address); \ - } \ - __changed; \ -}) - /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -extern spinlock_t pgd_lock; -extern struct list_head pgd_list; +#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) extern int kern_addr_valid(unsigned long addr); -pte_t *lookup_address(unsigned long addr); - #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) #define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define pgtable_cache_init() do { } while (0) #define check_pgt_cache() do { } while (0) @@ -422,12 +259,7 @@ pte_t *lookup_address(unsigned long addr); #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME -#include <asm-generic/pgtable.h> #endif /* !__ASSEMBLY__ */ #endif /* _X86_64_PGTABLE_H */ diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 46e1c04e309c..ab4d0c2a3f8f 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -1,5 +1,842 @@ +#ifndef __ASM_X86_PROCESSOR_H +#define __ASM_X86_PROCESSOR_H + +#include <asm/processor-flags.h> + +/* migration helpers, for KVM - will be removed in 2.6.25: */ +#include <asm/vm86.h> +#define Xgt_desc_struct desc_ptr + +/* Forward declaration, a strange C thing */ +struct task_struct; +struct mm_struct; + +#include <asm/vm86.h> +#include <asm/math_emu.h> +#include <asm/segment.h> +#include <asm/types.h> +#include <asm/sigcontext.h> +#include <asm/current.h> +#include <asm/cpufeature.h> +#include <asm/system.h> +#include <asm/page.h> +#include <asm/percpu.h> +#include <asm/msr.h> +#include <asm/desc_defs.h> +#include <asm/nops.h> +#include <linux/personality.h> +#include <linux/cpumask.h> +#include <linux/cache.h> +#include <linux/threads.h> +#include <linux/init.h> + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +static inline void *current_text_addr(void) +{ + void *pc; + asm volatile("mov $1f,%0\n1:":"=r" (pc)); + return pc; +} + +#ifdef CONFIG_X86_VSMP +#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) +#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) +#else +#define ARCH_MIN_TASKALIGN 16 +#define ARCH_MIN_MMSTRUCT_ALIGN 0 +#endif + +/* + * CPU type and hardware bug flags. Kept separately for each CPU. + * Members of this structure are referenced in head.S, so think twice + * before touching them. [mj] + */ + +struct cpuinfo_x86 { + __u8 x86; /* CPU family */ + __u8 x86_vendor; /* CPU vendor */ + __u8 x86_model; + __u8 x86_mask; +#ifdef CONFIG_X86_32 + char wp_works_ok; /* It doesn't on 386's */ + char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ + char hard_math; + char rfu; + char fdiv_bug; + char f00f_bug; + char coma_bug; + char pad0; +#else + /* number of 4K pages in DTLB/ITLB combined(in pages)*/ + int x86_tlbsize; + __u8 x86_virt_bits, x86_phys_bits; + /* cpuid returned core id bits */ + __u8 x86_coreid_bits; + /* Max extended CPUID function supported */ + __u32 extended_cpuid_level; +#endif + int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ + __u32 x86_capability[NCAPINTS]; + char x86_vendor_id[16]; + char x86_model_id[64]; + int x86_cache_size; /* in KB - valid for CPUS which support this + call */ + int x86_cache_alignment; /* In bytes */ + int x86_power; + unsigned long loops_per_jiffy; +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif + u16 x86_max_cores; /* cpuid returned max cores value */ + u16 apicid; + u16 x86_clflush_size; +#ifdef CONFIG_SMP + u16 booted_cores; /* number of cores as seen by OS */ + u16 phys_proc_id; /* Physical processor id. */ + u16 cpu_core_id; /* Core id */ + u16 cpu_index; /* index into per_cpu list */ +#endif +} __attribute__((__aligned__(SMP_CACHE_BYTES))); + +#define X86_VENDOR_INTEL 0 +#define X86_VENDOR_CYRIX 1 +#define X86_VENDOR_AMD 2 +#define X86_VENDOR_UMC 3 +#define X86_VENDOR_NEXGEN 4 +#define X86_VENDOR_CENTAUR 5 +#define X86_VENDOR_TRANSMETA 7 +#define X86_VENDOR_NSC 8 +#define X86_VENDOR_NUM 9 +#define X86_VENDOR_UNKNOWN 0xff + +/* + * capabilities of CPUs + */ +extern struct cpuinfo_x86 boot_cpu_data; +extern struct cpuinfo_x86 new_cpu_data; +extern struct tss_struct doublefault_tss; +extern __u32 cleared_cpu_caps[NCAPINTS]; + +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); +#define cpu_data(cpu) per_cpu(cpu_info, cpu) +#define current_cpu_data cpu_data(smp_processor_id()) +#else +#define cpu_data(cpu) boot_cpu_data +#define current_cpu_data boot_cpu_data +#endif + +void cpu_detect(struct cpuinfo_x86 *c); + +extern void identify_cpu(struct cpuinfo_x86 *); +extern void identify_boot_cpu(void); +extern void identify_secondary_cpu(struct cpuinfo_x86 *); +extern void print_cpu_info(struct cpuinfo_x86 *); +extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); +extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern unsigned short num_cache_leaves; + +#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) +extern void detect_ht(struct cpuinfo_x86 *c); +#else +static inline void detect_ht(struct cpuinfo_x86 *c) {} +#endif + +static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + /* ecx is often an input as well as an output. */ + __asm__("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (*eax), "2" (*ecx)); +} + +static inline void load_cr3(pgd_t *pgdir) +{ + write_cr3(__pa(pgdir)); +} + +#ifdef CONFIG_X86_32 +/* This is the TSS defined by the hardware. */ +struct x86_hw_tss { + unsigned short back_link, __blh; + unsigned long sp0; + unsigned short ss0, __ss0h; + unsigned long sp1; + unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ + unsigned long sp2; + unsigned short ss2, __ss2h; + unsigned long __cr3; + unsigned long ip; + unsigned long flags; + unsigned long ax, cx, dx, bx; + unsigned long sp, bp, si, di; + unsigned short es, __esh; + unsigned short cs, __csh; + unsigned short ss, __ssh; + unsigned short ds, __dsh; + unsigned short fs, __fsh; + unsigned short gs, __gsh; + unsigned short ldt, __ldth; + unsigned short trace, io_bitmap_base; +} __attribute__((packed)); +#else +struct x86_hw_tss { + u32 reserved1; + u64 sp0; + u64 sp1; + u64 sp2; + u64 reserved2; + u64 ist[7]; + u32 reserved3; + u32 reserved4; + u16 reserved5; + u16 io_bitmap_base; +} __attribute__((packed)) ____cacheline_aligned; +#endif + +/* + * Size of io_bitmap. + */ +#define IO_BITMAP_BITS 65536 +#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) +#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) +#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) +#define INVALID_IO_BITMAP_OFFSET 0x8000 +#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 + +struct tss_struct { + struct x86_hw_tss x86_tss; + + /* + * The extra 1 is there because the CPU will access an + * additional byte beyond the end of the IO permission + * bitmap. The extra byte must be all 1 bits, and must + * be within the limit. + */ + unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; + /* + * Cache the current maximum and the last task that used the bitmap: + */ + unsigned long io_bitmap_max; + struct thread_struct *io_bitmap_owner; + /* + * pads the TSS to be cacheline-aligned (size is 0x100) + */ + unsigned long __cacheline_filler[35]; + /* + * .. and then another 0x100 bytes for emergency kernel stack + */ + unsigned long stack[64]; +} __attribute__((packed)); + +DECLARE_PER_CPU(struct tss_struct, init_tss); + +/* Save the original ist values for checking stack pointers during debugging */ +struct orig_ist { + unsigned long ist[7]; +}; + +#define MXCSR_DEFAULT 0x1f80 + +struct i387_fsave_struct { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ + u32 status; /* software status information */ +}; + +struct i387_fxsave_struct { + u16 cwd; + u16 swd; + u16 twd; + u16 fop; + union { + struct { + u64 rip; + u64 rdp; + }; + struct { + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + }; + }; + u32 mxcsr; + u32 mxcsr_mask; + u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ + u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ + u32 padding[24]; +} __attribute__((aligned(16))); + +struct i387_soft_struct { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ + u8 ftop, changed, lookahead, no_update, rm, alimit; + struct info *info; + u32 entry_eip; +}; + +union i387_union { + struct i387_fsave_struct fsave; + struct i387_fxsave_struct fxsave; + struct i387_soft_struct soft; +}; + +#ifdef CONFIG_X86_32 +/* + * the following now lives in the per cpu area: + * extern int cpu_llc_id[NR_CPUS]; + */ +DECLARE_PER_CPU(u8, cpu_llc_id); +#else +DECLARE_PER_CPU(struct orig_ist, orig_ist); +#endif + +extern void print_cpu_info(struct cpuinfo_x86 *); +extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); +extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern unsigned short num_cache_leaves; + +struct thread_struct { +/* cached TLS descriptors. */ + struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; + unsigned long sp0; + unsigned long sp; +#ifdef CONFIG_X86_32 + unsigned long sysenter_cs; +#else + unsigned long usersp; /* Copy from PDA */ + unsigned short es, ds, fsindex, gsindex; +#endif + unsigned long ip; + unsigned long fs; + unsigned long gs; +/* Hardware debugging registers */ + unsigned long debugreg0; + unsigned long debugreg1; + unsigned long debugreg2; + unsigned long debugreg3; + unsigned long debugreg6; + unsigned long debugreg7; +/* fault info */ + unsigned long cr2, trap_no, error_code; +/* floating point info */ + union i387_union i387 __attribute__((aligned(16)));; +#ifdef CONFIG_X86_32 +/* virtual 86 mode info */ + struct vm86_struct __user *vm86_info; + unsigned long screen_bitmap; + unsigned long v86flags, v86mask, saved_sp0; + unsigned int saved_fs, saved_gs; +#endif +/* IO permissions */ + unsigned long *io_bitmap_ptr; + unsigned long iopl; +/* max allowed port in the bitmap, in bytes: */ + unsigned io_bitmap_max; +/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ + unsigned long debugctlmsr; +/* Debug Store - if not 0 points to a DS Save Area configuration; + * goes into MSR_IA32_DS_AREA */ + unsigned long ds_area_msr; +}; + +static inline unsigned long native_get_debugreg(int regno) +{ + unsigned long val = 0; /* Damn you, gcc! */ + + switch (regno) { + case 0: + asm("mov %%db0, %0" :"=r" (val)); break; + case 1: + asm("mov %%db1, %0" :"=r" (val)); break; + case 2: + asm("mov %%db2, %0" :"=r" (val)); break; + case 3: + asm("mov %%db3, %0" :"=r" (val)); break; + case 6: + asm("mov %%db6, %0" :"=r" (val)); break; + case 7: + asm("mov %%db7, %0" :"=r" (val)); break; + default: + BUG(); + } + return val; +} + +static inline void native_set_debugreg(int regno, unsigned long value) +{ + switch (regno) { + case 0: + asm("mov %0,%%db0" : /* no output */ :"r" (value)); + break; + case 1: + asm("mov %0,%%db1" : /* no output */ :"r" (value)); + break; + case 2: + asm("mov %0,%%db2" : /* no output */ :"r" (value)); + break; + case 3: + asm("mov %0,%%db3" : /* no output */ :"r" (value)); + break; + case 6: + asm("mov %0,%%db6" : /* no output */ :"r" (value)); + break; + case 7: + asm("mov %0,%%db7" : /* no output */ :"r" (value)); + break; + default: + BUG(); + } +} + +/* + * Set IOPL bits in EFLAGS from given mask + */ +static inline void native_set_iopl_mask(unsigned mask) +{ +#ifdef CONFIG_X86_32 + unsigned int reg; + __asm__ __volatile__ ("pushfl;" + "popl %0;" + "andl %1, %0;" + "orl %2, %0;" + "pushl %0;" + "popfl" + : "=&r" (reg) + : "i" (~X86_EFLAGS_IOPL), "r" (mask)); +#endif +} + +static inline void native_load_sp0(struct tss_struct *tss, + struct thread_struct *thread) +{ + tss->x86_tss.sp0 = thread->sp0; +#ifdef CONFIG_X86_32 + /* Only happens when SEP is enabled, no need to test "SEP"arately */ + if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { + tss->x86_tss.ss1 = thread->sysenter_cs; + wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); + } +#endif +} + +static inline void native_swapgs(void) +{ +#ifdef CONFIG_X86_64 + asm volatile("swapgs" ::: "memory"); +#endif +} + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define __cpuid native_cpuid +#define paravirt_enabled() 0 + +/* + * These special macros can be used to get or set a debugging register + */ +#define get_debugreg(var, register) \ + (var) = native_get_debugreg(register) +#define set_debugreg(value, register) \ + native_set_debugreg(register, value) + +static inline void load_sp0(struct tss_struct *tss, + struct thread_struct *thread) +{ + native_load_sp0(tss, thread); +} + +#define set_iopl_mask native_set_iopl_mask +#define SWAPGS swapgs +#endif /* CONFIG_PARAVIRT */ + +/* + * Save the cr4 feature set we're using (ie + * Pentium 4MB enable and PPro Global page + * enable), so that any CPU's that boot up + * after us can get the correct flags. + */ +extern unsigned long mmu_cr4_features; + +static inline void set_in_cr4(unsigned long mask) +{ + unsigned cr4; + mmu_cr4_features |= mask; + cr4 = read_cr4(); + cr4 |= mask; + write_cr4(cr4); +} + +static inline void clear_in_cr4(unsigned long mask) +{ + unsigned cr4; + mmu_cr4_features &= ~mask; + cr4 = read_cr4(); + cr4 &= ~mask; + write_cr4(cr4); +} + +struct microcode_header { + unsigned int hdrver; + unsigned int rev; + unsigned int date; + unsigned int sig; + unsigned int cksum; + unsigned int ldrver; + unsigned int pf; + unsigned int datasize; + unsigned int totalsize; + unsigned int reserved[3]; +}; + +struct microcode { + struct microcode_header hdr; + unsigned int bits[0]; +}; + +typedef struct microcode microcode_t; +typedef struct microcode_header microcode_header_t; + +/* microcode format is extended from prescott processors */ +struct extended_signature { + unsigned int sig; + unsigned int pf; + unsigned int cksum; +}; + +struct extended_sigtable { + unsigned int count; + unsigned int cksum; + unsigned int reserved[3]; + struct extended_signature sigs[0]; +}; + +typedef struct { + unsigned long seg; +} mm_segment_t; + + +/* + * create a kernel thread without removing it from tasklists + */ +extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *); + +/* Prepare to copy thread state - unlazy all lazy status */ +extern void prepare_to_copy(struct task_struct *tsk); + +unsigned long get_wchan(struct task_struct *p); + +/* + * Generic CPUID function + * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx + * resulting in stale register contents being returned. + */ +static inline void cpuid(unsigned int op, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + *eax = op; + *ecx = 0; + __cpuid(eax, ebx, ecx, edx); +} + +/* Some CPUID calls want 'count' to be placed in ecx */ +static inline void cpuid_count(unsigned int op, int count, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + *eax = op; + *ecx = count; + __cpuid(eax, ebx, ecx, edx); +} + +/* + * CPUID functions returning a single datum + */ +static inline unsigned int cpuid_eax(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + return eax; +} +static inline unsigned int cpuid_ebx(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + return ebx; +} +static inline unsigned int cpuid_ecx(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + return ecx; +} +static inline unsigned int cpuid_edx(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + return edx; +} + +/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ +static inline void rep_nop(void) +{ + __asm__ __volatile__("rep;nop": : :"memory"); +} + +/* Stop speculative execution */ +static inline void sync_core(void) +{ + int tmp; + asm volatile("cpuid" : "=a" (tmp) : "0" (1) + : "ebx", "ecx", "edx", "memory"); +} + +#define cpu_relax() rep_nop() + +static inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) +{ + /* "monitor %eax,%ecx,%edx;" */ + asm volatile( + ".byte 0x0f,0x01,0xc8;" + : :"a" (eax), "c" (ecx), "d"(edx)); +} + +static inline void __mwait(unsigned long eax, unsigned long ecx) +{ + /* "mwait %eax,%ecx;" */ + asm volatile( + ".byte 0x0f,0x01,0xc9;" + : :"a" (eax), "c" (ecx)); +} + +static inline void __sti_mwait(unsigned long eax, unsigned long ecx) +{ + /* "mwait %eax,%ecx;" */ + asm volatile( + "sti; .byte 0x0f,0x01,0xc9;" + : :"a" (eax), "c" (ecx)); +} + +extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); + +extern int force_mwait; + +extern void select_idle_routine(const struct cpuinfo_x86 *c); + +extern unsigned long boot_option_idle_override; + +extern void enable_sep_cpu(void); +extern int sysenter_setup(void); + +/* Defined in head.S */ +extern struct desc_ptr early_gdt_descr; + +extern void cpu_set_gdt(int); +extern void switch_to_new_gdt(void); +extern void cpu_init(void); +extern void init_gdt(int cpu); + +/* from system description table in BIOS. Mostly for MCA use, but + * others may find it useful. */ +extern unsigned int machine_id; +extern unsigned int machine_submodel_id; +extern unsigned int BIOS_revision; +extern unsigned int mca_pentium_flag; + +/* Boot loader type from the setup header */ +extern int bootloader_type; + +extern char ignore_fpu_irq; +#define cache_line_size() (boot_cpu_data.x86_cache_alignment) + +#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +#ifdef CONFIG_X86_32 +#define BASE_PREFETCH ASM_NOP4 +#define ARCH_HAS_PREFETCH +#else +#define BASE_PREFETCH "prefetcht0 (%1)" +#endif + +/* Prefetch instructions for Pentium III and AMD Athlon */ +/* It's not worth to care about 3dnow! prefetches for the K6 + because they are microcoded there and very slow. + However we don't do prefetches for pre XP Athlons currently + That should be fixed. */ +static inline void prefetch(const void *x) +{ + alternative_input(BASE_PREFETCH, + "prefetchnta (%1)", + X86_FEATURE_XMM, + "r" (x)); +} + +/* 3dnow! prefetch to get an exclusive cache line. Useful for + spinlocks to avoid one state transition in the cache coherency protocol. */ +static inline void prefetchw(const void *x) +{ + alternative_input(BASE_PREFETCH, + "prefetchw (%1)", + X86_FEATURE_3DNOW, + "r" (x)); +} + +#define spin_lock_prefetch(x) prefetchw(x) #ifdef CONFIG_X86_32 -# include "processor_32.h" +/* + * User space process size: 3GB (default). + */ +#define TASK_SIZE (PAGE_OFFSET) + +#define INIT_THREAD { \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ + .io_bitmap_ptr = NULL, \ + .fs = __KERNEL_PERCPU, \ +} + +/* + * Note that the .io_bitmap member must be extra-big. This is because + * the CPU will access an additional byte beyond the end of the IO + * permission bitmap. The extra byte must be all 1 bits, and must + * be within the limit. + */ +#define INIT_TSS { \ + .x86_tss = { \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ + }, \ + .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ +} + +#define start_thread(regs, new_eip, new_esp) do { \ + __asm__("movl %0,%%gs": :"r" (0)); \ + regs->fs = 0; \ + set_fs(USER_DS); \ + regs->ds = __USER_DS; \ + regs->es = __USER_DS; \ + regs->ss = __USER_DS; \ + regs->cs = __USER_CS; \ + regs->ip = new_eip; \ + regs->sp = new_esp; \ +} while (0) + + +extern unsigned long thread_saved_pc(struct task_struct *tsk); + +#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) +#define KSTK_TOP(info) \ +({ \ + unsigned long *__ptr = (unsigned long *)(info); \ + (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ +}) + +/* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. + * This is necessary to guarantee that the entire "struct pt_regs" + * is accessable even if the CPU haven't stored the SS/ESP registers + * on the stack (interrupt gate does not save these registers + * when switching to the same priv ring). + * Therefore beware: accessing the ss/esp fields of the + * "struct pt_regs" is possible, but they may contain the + * completely wrong values. + */ +#define task_pt_regs(task) \ +({ \ + struct pt_regs *__regs__; \ + __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ + __regs__ - 1; \ +}) + +#define KSTK_ESP(task) (task_pt_regs(task)->sp) + #else -# include "processor_64.h" +/* + * User space process size. 47bits minus one guard page. + */ +#define TASK_SIZE64 (0x800000000000UL - 4096) + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ + 0xc0000000 : 0xFFFFe000) + +#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE64) +#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ + IA32_PAGE_OFFSET : TASK_SIZE64) + +#define INIT_THREAD { \ + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ +} + +#define INIT_TSS { \ + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ +} + +#define start_thread(regs, new_rip, new_rsp) do { \ + asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ + load_gs_index(0); \ + (regs)->ip = (new_rip); \ + (regs)->sp = (new_rsp); \ + write_pda(oldrsp, (new_rsp)); \ + (regs)->cs = __USER_CS; \ + (regs)->ss = __USER_DS; \ + (regs)->flags = 0x200; \ + set_fs(USER_DS); \ +} while (0) + +/* + * Return saved PC of a blocked thread. + * What is this good for? it will be always the scheduler or ret_from_fork. + */ +#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) + +#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) +#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ +#endif /* CONFIG_X86_64 */ + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) + +#define KSTK_EIP(task) (task_pt_regs(task)->ip) + #endif diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h deleted file mode 100644 index 13976b086837..000000000000 --- a/include/asm-x86/processor_32.h +++ /dev/null @@ -1,786 +0,0 @@ -/* - * include/asm-i386/processor.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -#ifndef __ASM_I386_PROCESSOR_H -#define __ASM_I386_PROCESSOR_H - -#include <asm/vm86.h> -#include <asm/math_emu.h> -#include <asm/segment.h> -#include <asm/page.h> -#include <asm/types.h> -#include <asm/sigcontext.h> -#include <asm/cpufeature.h> -#include <asm/msr.h> -#include <asm/system.h> -#include <linux/cache.h> -#include <linux/threads.h> -#include <asm/percpu.h> -#include <linux/cpumask.h> -#include <linux/init.h> -#include <asm/processor-flags.h> - -/* flag for disabling the tsc */ -extern int tsc_disable; - -struct desc_struct { - unsigned long a,b; -}; - -#define desc_empty(desc) \ - (!((desc)->a | (desc)->b)) - -#define desc_equal(desc1, desc2) \ - (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) -/* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). - */ -#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) - -/* - * CPU type and hardware bug flags. Kept separately for each CPU. - * Members of this structure are referenced in head.S, so think twice - * before touching them. [mj] - */ - -struct cpuinfo_x86 { - __u8 x86; /* CPU family */ - __u8 x86_vendor; /* CPU vendor */ - __u8 x86_model; - __u8 x86_mask; - char wp_works_ok; /* It doesn't on 386's */ - char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ - char hard_math; - char rfu; - int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ - unsigned long x86_capability[NCAPINTS]; - char x86_vendor_id[16]; - char x86_model_id[64]; - int x86_cache_size; /* in KB - valid for CPUS which support this - call */ - int x86_cache_alignment; /* In bytes */ - char fdiv_bug; - char f00f_bug; - char coma_bug; - char pad0; - int x86_power; - unsigned long loops_per_jiffy; -#ifdef CONFIG_SMP - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ -#endif - unsigned char x86_max_cores; /* cpuid returned max cores value */ - unsigned char apicid; - unsigned short x86_clflush_size; -#ifdef CONFIG_SMP - unsigned char booted_cores; /* number of cores as seen by OS */ - __u8 phys_proc_id; /* Physical processor id. */ - __u8 cpu_core_id; /* Core id */ - __u8 cpu_index; /* index into per_cpu list */ -#endif -} __attribute__((__aligned__(SMP_CACHE_BYTES))); - -#define X86_VENDOR_INTEL 0 -#define X86_VENDOR_CYRIX 1 -#define X86_VENDOR_AMD 2 -#define X86_VENDOR_UMC 3 -#define X86_VENDOR_NEXGEN 4 -#define X86_VENDOR_CENTAUR 5 -#define X86_VENDOR_TRANSMETA 7 -#define X86_VENDOR_NSC 8 -#define X86_VENDOR_NUM 9 -#define X86_VENDOR_UNKNOWN 0xff - -/* - * capabilities of CPUs - */ - -extern struct cpuinfo_x86 boot_cpu_data; -extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; -DECLARE_PER_CPU(struct tss_struct, init_tss); - -#ifdef CONFIG_SMP -DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); -#define cpu_data(cpu) per_cpu(cpu_info, cpu) -#define current_cpu_data cpu_data(smp_processor_id()) -#else -#define cpu_data(cpu) boot_cpu_data -#define current_cpu_data boot_cpu_data -#endif - -/* - * the following now lives in the per cpu area: - * extern int cpu_llc_id[NR_CPUS]; - */ -DECLARE_PER_CPU(u8, cpu_llc_id); -extern char ignore_fpu_irq; - -void __init cpu_detect(struct cpuinfo_x86 *c); - -extern void identify_boot_cpu(void); -extern void identify_secondary_cpu(struct cpuinfo_x86 *); -extern void print_cpu_info(struct cpuinfo_x86 *); -extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); -extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); -extern unsigned short num_cache_leaves; - -#ifdef CONFIG_X86_HT -extern void detect_ht(struct cpuinfo_x86 *c); -#else -static inline void detect_ht(struct cpuinfo_x86 *c) {} -#endif - -static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - /* ecx is often an input as well as an output. */ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); -} - -#define load_cr3(pgdir) write_cr3(__pa(pgdir)) - -/* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; - -static inline void set_in_cr4 (unsigned long mask) -{ - unsigned cr4; - mmu_cr4_features |= mask; - cr4 = read_cr4(); - cr4 |= mask; - write_cr4(cr4); -} - -static inline void clear_in_cr4 (unsigned long mask) -{ - unsigned cr4; - mmu_cr4_features &= ~mask; - cr4 = read_cr4(); - cr4 &= ~mask; - write_cr4(cr4); -} - -/* Stop speculative execution */ -static inline void sync_core(void) -{ - int tmp; - asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); -} - -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) -{ - /* "monitor %eax,%ecx,%edx;" */ - asm volatile( - ".byte 0x0f,0x01,0xc8;" - : :"a" (eax), "c" (ecx), "d"(edx)); -} - -static inline void __mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax,%ecx;" */ - asm volatile( - ".byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); -} - -extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); - -/* from system description table in BIOS. Mostly for MCA use, but -others may find it useful. */ -extern unsigned int machine_id; -extern unsigned int machine_submodel_id; -extern unsigned int BIOS_revision; -extern unsigned int mca_pentium_flag; - -/* Boot loader type from the setup header */ -extern int bootloader_type; - -/* - * User space process size: 3GB (default). - */ -#define TASK_SIZE (PAGE_OFFSET) - -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) - -#define HAVE_ARCH_PICK_MMAP_LAYOUT - -extern void hard_disable_TSC(void); -extern void disable_TSC(void); -extern void hard_enable_TSC(void); - -/* - * Size of io_bitmap. - */ -#define IO_BITMAP_BITS 65536 -#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) -#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) -#define INVALID_IO_BITMAP_OFFSET 0x8000 -#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 - -struct i387_fsave_struct { - long cwd; - long swd; - long twd; - long fip; - long fcs; - long foo; - long fos; - long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - long status; /* software status information */ -}; - -struct i387_fxsave_struct { - unsigned short cwd; - unsigned short swd; - unsigned short twd; - unsigned short fop; - long fip; - long fcs; - long foo; - long fos; - long mxcsr; - long mxcsr_mask; - long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ - long padding[56]; -} __attribute__ ((aligned (16))); - -struct i387_soft_struct { - long cwd; - long swd; - long twd; - long fip; - long fcs; - long foo; - long fos; - long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - unsigned char ftop, changed, lookahead, no_update, rm, alimit; - struct info *info; - unsigned long entry_eip; -}; - -union i387_union { - struct i387_fsave_struct fsave; - struct i387_fxsave_struct fxsave; - struct i387_soft_struct soft; -}; - -typedef struct { - unsigned long seg; -} mm_segment_t; - -struct thread_struct; - -/* This is the TSS defined by the hardware. */ -struct i386_hw_tss { - unsigned short back_link,__blh; - unsigned long esp0; - unsigned short ss0,__ss0h; - unsigned long esp1; - unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ - unsigned long esp2; - unsigned short ss2,__ss2h; - unsigned long __cr3; - unsigned long eip; - unsigned long eflags; - unsigned long eax,ecx,edx,ebx; - unsigned long esp; - unsigned long ebp; - unsigned long esi; - unsigned long edi; - unsigned short es, __esh; - unsigned short cs, __csh; - unsigned short ss, __ssh; - unsigned short ds, __dsh; - unsigned short fs, __fsh; - unsigned short gs, __gsh; - unsigned short ldt, __ldth; - unsigned short trace, io_bitmap_base; -} __attribute__((packed)); - -struct tss_struct { - struct i386_hw_tss x86_tss; - - /* - * The extra 1 is there because the CPU will access an - * additional byte beyond the end of the IO permission - * bitmap. The extra byte must be all 1 bits, and must - * be within the limit. - */ - unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; - /* - * Cache the current maximum and the last task that used the bitmap: - */ - unsigned long io_bitmap_max; - struct thread_struct *io_bitmap_owner; - /* - * pads the TSS to be cacheline-aligned (size is 0x100) - */ - unsigned long __cacheline_filler[35]; - /* - * .. and then another 0x100 bytes for emergency kernel stack - */ - unsigned long stack[64]; -} __attribute__((packed)); - -#define ARCH_MIN_TASKALIGN 16 - -struct thread_struct { -/* cached TLS descriptors. */ - struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; - unsigned long esp0; - unsigned long sysenter_cs; - unsigned long eip; - unsigned long esp; - unsigned long fs; - unsigned long gs; -/* Hardware debugging registers */ - unsigned long debugreg[8]; /* %%db0-7 debug registers */ -/* fault info */ - unsigned long cr2, trap_no, error_code; -/* floating point info */ - union i387_union i387; -/* virtual 86 mode info */ - struct vm86_struct __user * vm86_info; - unsigned long screen_bitmap; - unsigned long v86flags, v86mask, saved_esp0; - unsigned int saved_fs, saved_gs; -/* IO permissions */ - unsigned long *io_bitmap_ptr; - unsigned long iopl; -/* max allowed port in the bitmap, in bytes: */ - unsigned long io_bitmap_max; -}; - -#define INIT_THREAD { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ - .vm86_info = NULL, \ - .sysenter_cs = __KERNEL_CS, \ - .io_bitmap_ptr = NULL, \ - .fs = __KERNEL_PERCPU, \ -} - -/* - * Note that the .io_bitmap member must be extra-big. This is because - * the CPU will access an additional byte beyond the end of the IO - * permission bitmap. The extra byte must be all 1 bits, and must - * be within the limit. - */ -#define INIT_TSS { \ - .x86_tss = { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ - .ss0 = __KERNEL_DS, \ - .ss1 = __KERNEL_CS, \ - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ - }, \ - .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ -} - -#define start_thread(regs, new_eip, new_esp) do { \ - __asm__("movl %0,%%gs": :"r" (0)); \ - regs->xfs = 0; \ - set_fs(USER_DS); \ - regs->xds = __USER_DS; \ - regs->xes = __USER_DS; \ - regs->xss = __USER_DS; \ - regs->xcs = __USER_CS; \ - regs->eip = new_eip; \ - regs->esp = new_esp; \ -} while (0) - -/* Forward declaration, a strange C thing */ -struct task_struct; -struct mm_struct; - -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - -/* Prepare to copy thread state - unlazy all lazy status */ -extern void prepare_to_copy(struct task_struct *tsk); - -/* - * create a kernel thread without removing it from tasklists - */ -extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); - -extern unsigned long thread_saved_pc(struct task_struct *tsk); -void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); - -unsigned long get_wchan(struct task_struct *p); - -#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) -#define KSTK_TOP(info) \ -({ \ - unsigned long *__ptr = (unsigned long *)(info); \ - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ -}) - -/* - * The below -8 is to reserve 8 bytes on top of the ring0 stack. - * This is necessary to guarantee that the entire "struct pt_regs" - * is accessable even if the CPU haven't stored the SS/ESP registers - * on the stack (interrupt gate does not save these registers - * when switching to the same priv ring). - * Therefore beware: accessing the xss/esp fields of the - * "struct pt_regs" is possible, but they may contain the - * completely wrong values. - */ -#define task_pt_regs(task) \ -({ \ - struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ - __regs__ - 1; \ -}) - -#define KSTK_EIP(task) (task_pt_regs(task)->eip) -#define KSTK_ESP(task) (task_pt_regs(task)->esp) - - -struct microcode_header { - unsigned int hdrver; - unsigned int rev; - unsigned int date; - unsigned int sig; - unsigned int cksum; - unsigned int ldrver; - unsigned int pf; - unsigned int datasize; - unsigned int totalsize; - unsigned int reserved[3]; -}; - -struct microcode { - struct microcode_header hdr; - unsigned int bits[0]; -}; - -typedef struct microcode microcode_t; -typedef struct microcode_header microcode_header_t; - -/* microcode format is extended from prescott processors */ -struct extended_signature { - unsigned int sig; - unsigned int pf; - unsigned int cksum; -}; - -struct extended_sigtable { - unsigned int count; - unsigned int cksum; - unsigned int reserved[3]; - struct extended_signature sigs[0]; -}; - -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static inline void rep_nop(void) -{ - __asm__ __volatile__("rep;nop": : :"memory"); -} - -#define cpu_relax() rep_nop() - -static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) -{ - tss->x86_tss.esp0 = thread->esp0; - /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } -} - - -static inline unsigned long native_get_debugreg(int regno) -{ - unsigned long val = 0; /* Damn you, gcc! */ - - switch (regno) { - case 0: - asm("movl %%db0, %0" :"=r" (val)); break; - case 1: - asm("movl %%db1, %0" :"=r" (val)); break; - case 2: - asm("movl %%db2, %0" :"=r" (val)); break; - case 3: - asm("movl %%db3, %0" :"=r" (val)); break; - case 6: - asm("movl %%db6, %0" :"=r" (val)); break; - case 7: - asm("movl %%db7, %0" :"=r" (val)); break; - default: - BUG(); - } - return val; -} - -static inline void native_set_debugreg(int regno, unsigned long value) -{ - switch (regno) { - case 0: - asm("movl %0,%%db0" : /* no output */ :"r" (value)); - break; - case 1: - asm("movl %0,%%db1" : /* no output */ :"r" (value)); - break; - case 2: - asm("movl %0,%%db2" : /* no output */ :"r" (value)); - break; - case 3: - asm("movl %0,%%db3" : /* no output */ :"r" (value)); - break; - case 6: - asm("movl %0,%%db6" : /* no output */ :"r" (value)); - break; - case 7: - asm("movl %0,%%db7" : /* no output */ :"r" (value)); - break; - default: - BUG(); - } -} - -/* - * Set IOPL bits in EFLAGS from given mask - */ -static inline void native_set_iopl_mask(unsigned mask) -{ - unsigned int reg; - __asm__ __volatile__ ("pushfl;" - "popl %0;" - "andl %1, %0;" - "orl %2, %0;" - "pushl %0;" - "popfl" - : "=&r" (reg) - : "i" (~X86_EFLAGS_IOPL), "r" (mask)); -} - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define paravirt_enabled() 0 -#define __cpuid native_cpuid - -static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) -{ - native_load_esp0(tss, thread); -} - -/* - * These special macros can be used to get or set a debugging register - */ -#define get_debugreg(var, register) \ - (var) = native_get_debugreg(register) -#define set_debugreg(value, register) \ - native_set_debugreg(register, value) - -#define set_iopl_mask native_set_iopl_mask -#endif /* CONFIG_PARAVIRT */ - -/* - * Generic CPUID function - * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx - * resulting in stale register contents being returned. - */ -static inline void cpuid(unsigned int op, - unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - *eax = op; - *ecx = 0; - __cpuid(eax, ebx, ecx, edx); -} - -/* Some CPUID calls want 'count' to be placed in ecx */ -static inline void cpuid_count(unsigned int op, int count, - unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - *eax = op; - *ecx = count; - __cpuid(eax, ebx, ecx, edx); -} - -/* - * CPUID functions returning a single datum - */ -static inline unsigned int cpuid_eax(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return eax; -} -static inline unsigned int cpuid_ebx(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return ebx; -} -static inline unsigned int cpuid_ecx(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return ecx; -} -static inline unsigned int cpuid_edx(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return edx; -} - -/* generic versions from gas */ -#define GENERIC_NOP1 ".byte 0x90\n" -#define GENERIC_NOP2 ".byte 0x89,0xf6\n" -#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" -#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" -#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 -#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" -#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" -#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 - -/* Opteron nops */ -#define K8_NOP1 GENERIC_NOP1 -#define K8_NOP2 ".byte 0x66,0x90\n" -#define K8_NOP3 ".byte 0x66,0x66,0x90\n" -#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" -#define K8_NOP5 K8_NOP3 K8_NOP2 -#define K8_NOP6 K8_NOP3 K8_NOP3 -#define K8_NOP7 K8_NOP4 K8_NOP3 -#define K8_NOP8 K8_NOP4 K8_NOP4 - -/* K7 nops */ -/* uses eax dependencies (arbitary choice) */ -#define K7_NOP1 GENERIC_NOP1 -#define K7_NOP2 ".byte 0x8b,0xc0\n" -#define K7_NOP3 ".byte 0x8d,0x04,0x20\n" -#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" -#define K7_NOP5 K7_NOP4 ASM_NOP1 -#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" -#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" -#define K7_NOP8 K7_NOP7 ASM_NOP1 - -/* P6 nops */ -/* uses eax dependencies (Intel-recommended choice) */ -#define P6_NOP1 GENERIC_NOP1 -#define P6_NOP2 ".byte 0x66,0x90\n" -#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" -#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" -#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" -#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" -#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" -#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" - -#ifdef CONFIG_MK8 -#define ASM_NOP1 K8_NOP1 -#define ASM_NOP2 K8_NOP2 -#define ASM_NOP3 K8_NOP3 -#define ASM_NOP4 K8_NOP4 -#define ASM_NOP5 K8_NOP5 -#define ASM_NOP6 K8_NOP6 -#define ASM_NOP7 K8_NOP7 -#define ASM_NOP8 K8_NOP8 -#elif defined(CONFIG_MK7) -#define ASM_NOP1 K7_NOP1 -#define ASM_NOP2 K7_NOP2 -#define ASM_NOP3 K7_NOP3 -#define ASM_NOP4 K7_NOP4 -#define ASM_NOP5 K7_NOP5 -#define ASM_NOP6 K7_NOP6 -#define ASM_NOP7 K7_NOP7 -#define ASM_NOP8 K7_NOP8 -#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ - defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ - defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) -#define ASM_NOP1 P6_NOP1 -#define ASM_NOP2 P6_NOP2 -#define ASM_NOP3 P6_NOP3 -#define ASM_NOP4 P6_NOP4 -#define ASM_NOP5 P6_NOP5 -#define ASM_NOP6 P6_NOP6 -#define ASM_NOP7 P6_NOP7 -#define ASM_NOP8 P6_NOP8 -#else -#define ASM_NOP1 GENERIC_NOP1 -#define ASM_NOP2 GENERIC_NOP2 -#define ASM_NOP3 GENERIC_NOP3 -#define ASM_NOP4 GENERIC_NOP4 -#define ASM_NOP5 GENERIC_NOP5 -#define ASM_NOP6 GENERIC_NOP6 -#define ASM_NOP7 GENERIC_NOP7 -#define ASM_NOP8 GENERIC_NOP8 -#endif - -#define ASM_NOP_MAX 8 - -/* Prefetch instructions for Pentium III and AMD Athlon */ -/* It's not worth to care about 3dnow! prefetches for the K6 - because they are microcoded there and very slow. - However we don't do prefetches for pre XP Athlons currently - That should be fixed. */ -#define ARCH_HAS_PREFETCH -static inline void prefetch(const void *x) -{ - alternative_input(ASM_NOP4, - "prefetchnta (%1)", - X86_FEATURE_XMM, - "r" (x)); -} - -#define ARCH_HAS_PREFETCH -#define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH - -/* 3dnow! prefetch to get an exclusive cache line. Useful for - spinlocks to avoid one state transition in the cache coherency protocol. */ -static inline void prefetchw(const void *x) -{ - alternative_input(ASM_NOP4, - "prefetchw (%1)", - X86_FEATURE_3DNOW, - "r" (x)); -} -#define spin_lock_prefetch(x) prefetchw(x) - -extern void select_idle_routine(const struct cpuinfo_x86 *c); - -#define cache_line_size() (boot_cpu_data.x86_cache_alignment) - -extern unsigned long boot_option_idle_override; -extern void enable_sep_cpu(void); -extern int sysenter_setup(void); - -/* Defined in head.S */ -extern struct Xgt_desc_struct early_gdt_descr; - -extern void cpu_set_gdt(int); -extern void switch_to_new_gdt(void); -extern void cpu_init(void); -extern void init_gdt(int cpu); - -extern int force_mwait; - -#endif /* __ASM_I386_PROCESSOR_H */ diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h deleted file mode 100644 index e4f19970a82b..000000000000 --- a/include/asm-x86/processor_64.h +++ /dev/null @@ -1,452 +0,0 @@ -/* - * include/asm-x86_64/processor.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -#ifndef __ASM_X86_64_PROCESSOR_H -#define __ASM_X86_64_PROCESSOR_H - -#include <asm/segment.h> -#include <asm/page.h> -#include <asm/types.h> -#include <asm/sigcontext.h> -#include <asm/cpufeature.h> -#include <linux/threads.h> -#include <asm/msr.h> -#include <asm/current.h> -#include <asm/system.h> -#include <asm/mmsegment.h> -#include <asm/percpu.h> -#include <linux/personality.h> -#include <linux/cpumask.h> -#include <asm/processor-flags.h> - -#define TF_MASK 0x00000100 -#define IF_MASK 0x00000200 -#define IOPL_MASK 0x00003000 -#define NT_MASK 0x00004000 -#define VM_MASK 0x00020000 -#define AC_MASK 0x00040000 -#define VIF_MASK 0x00080000 /* virtual interrupt flag */ -#define VIP_MASK 0x00100000 /* virtual interrupt pending */ -#define ID_MASK 0x00200000 - -#define desc_empty(desc) \ - (!((desc)->a | (desc)->b)) - -#define desc_equal(desc1, desc2) \ - (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) - -/* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). - */ -#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; }) - -/* - * CPU type and hardware bug flags. Kept separately for each CPU. - */ - -struct cpuinfo_x86 { - __u8 x86; /* CPU family */ - __u8 x86_vendor; /* CPU vendor */ - __u8 x86_model; - __u8 x86_mask; - int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ - __u32 x86_capability[NCAPINTS]; - char x86_vendor_id[16]; - char x86_model_id[64]; - int x86_cache_size; /* in KB */ - int x86_clflush_size; - int x86_cache_alignment; - int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ - __u8 x86_virt_bits, x86_phys_bits; - __u8 x86_max_cores; /* cpuid returned max cores value */ - __u32 x86_power; - __u32 extended_cpuid_level; /* Max extended CPUID function supported */ - unsigned long loops_per_jiffy; -#ifdef CONFIG_SMP - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ -#endif - __u8 apicid; -#ifdef CONFIG_SMP - __u8 booted_cores; /* number of cores as seen by OS */ - __u8 phys_proc_id; /* Physical Processor id. */ - __u8 cpu_core_id; /* Core id. */ - __u8 cpu_index; /* index into per_cpu list */ -#endif -} ____cacheline_aligned; - -#define X86_VENDOR_INTEL 0 -#define X86_VENDOR_CYRIX 1 -#define X86_VENDOR_AMD 2 -#define X86_VENDOR_UMC 3 -#define X86_VENDOR_NEXGEN 4 -#define X86_VENDOR_CENTAUR 5 -#define X86_VENDOR_TRANSMETA 7 -#define X86_VENDOR_NUM 8 -#define X86_VENDOR_UNKNOWN 0xff - -#ifdef CONFIG_SMP -DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); -#define cpu_data(cpu) per_cpu(cpu_info, cpu) -#define current_cpu_data cpu_data(smp_processor_id()) -#else -#define cpu_data(cpu) boot_cpu_data -#define current_cpu_data boot_cpu_data -#endif - -extern char ignore_irq13; - -extern void identify_cpu(struct cpuinfo_x86 *); -extern void print_cpu_info(struct cpuinfo_x86 *); -extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); -extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); -extern unsigned short num_cache_leaves; - -/* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; - -static inline void set_in_cr4 (unsigned long mask) -{ - mmu_cr4_features |= mask; - __asm__("movq %%cr4,%%rax\n\t" - "orq %0,%%rax\n\t" - "movq %%rax,%%cr4\n" - : : "irg" (mask) - :"ax"); -} - -static inline void clear_in_cr4 (unsigned long mask) -{ - mmu_cr4_features &= ~mask; - __asm__("movq %%cr4,%%rax\n\t" - "andq %0,%%rax\n\t" - "movq %%rax,%%cr4\n" - : : "irg" (~mask) - :"ax"); -} - - -/* - * User space process size. 47bits minus one guard page. - */ -#define TASK_SIZE64 (0x800000000000UL - 4096) - -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) - -#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) -#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) - -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) - -/* - * Size of io_bitmap. - */ -#define IO_BITMAP_BITS 65536 -#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) -#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) -#define INVALID_IO_BITMAP_OFFSET 0x8000 - -struct i387_fxsave_struct { - u16 cwd; - u16 swd; - u16 twd; - u16 fop; - u64 rip; - u64 rdp; - u32 mxcsr; - u32 mxcsr_mask; - u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ - u32 padding[24]; -} __attribute__ ((aligned (16))); - -union i387_union { - struct i387_fxsave_struct fxsave; -}; - -struct tss_struct { - u32 reserved1; - u64 rsp0; - u64 rsp1; - u64 rsp2; - u64 reserved2; - u64 ist[7]; - u32 reserved3; - u32 reserved4; - u16 reserved5; - u16 io_bitmap_base; - /* - * The extra 1 is there because the CPU will access an - * additional byte beyond the end of the IO permission - * bitmap. The extra byte must be all 1 bits, and must - * be within the limit. Thus we have: - * - * 128 bytes, the bitmap itself, for ports 0..0x3ff - * 8 bytes, for an extra "long" of ~0UL - */ - unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; -} __attribute__((packed)) ____cacheline_aligned; - - -extern struct cpuinfo_x86 boot_cpu_data; -DECLARE_PER_CPU(struct tss_struct,init_tss); -/* Save the original ist values for checking stack pointers during debugging */ -struct orig_ist { - unsigned long ist[7]; -}; -DECLARE_PER_CPU(struct orig_ist, orig_ist); - -#ifdef CONFIG_X86_VSMP -#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) -#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) -#else -#define ARCH_MIN_TASKALIGN 16 -#define ARCH_MIN_MMSTRUCT_ALIGN 0 -#endif - -struct thread_struct { - unsigned long rsp0; - unsigned long rsp; - unsigned long userrsp; /* Copy from PDA */ - unsigned long fs; - unsigned long gs; - unsigned short es, ds, fsindex, gsindex; -/* Hardware debugging registers */ - unsigned long debugreg0; - unsigned long debugreg1; - unsigned long debugreg2; - unsigned long debugreg3; - unsigned long debugreg6; - unsigned long debugreg7; -/* fault info */ - unsigned long cr2, trap_no, error_code; -/* floating point info */ - union i387_union i387 __attribute__((aligned(16))); -/* IO permissions. the bitmap could be moved into the GDT, that would make - switch faster for a limited number of ioperm using tasks. -AK */ - int ioperm; - unsigned long *io_bitmap_ptr; - unsigned io_bitmap_max; -/* cached TLS descriptors. */ - u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; -} __attribute__((aligned(16))); - -#define INIT_THREAD { \ - .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ -} - -#define INIT_TSS { \ - .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ -} - -#define INIT_MMAP \ -{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } - -#define start_thread(regs,new_rip,new_rsp) do { \ - asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ - load_gs_index(0); \ - (regs)->rip = (new_rip); \ - (regs)->rsp = (new_rsp); \ - write_pda(oldrsp, (new_rsp)); \ - (regs)->cs = __USER_CS; \ - (regs)->ss = __USER_DS; \ - (regs)->eflags = 0x200; \ - set_fs(USER_DS); \ -} while(0) - -#define get_debugreg(var, register) \ - __asm__("movq %%db" #register ", %0" \ - :"=r" (var)) -#define set_debugreg(value, register) \ - __asm__("movq %0,%%db" #register \ - : /* no output */ \ - :"r" (value)) - -struct task_struct; -struct mm_struct; - -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - -/* Prepare to copy thread state - unlazy all lazy status */ -extern void prepare_to_copy(struct task_struct *tsk); - -/* - * create a kernel thread without removing it from tasklists - */ -extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); - -/* - * Return saved PC of a blocked thread. - * What is this good for? it will be always the scheduler or ret_from_fork. - */ -#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) - -extern unsigned long get_wchan(struct task_struct *p); -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) -#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) -#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ - - -struct microcode_header { - unsigned int hdrver; - unsigned int rev; - unsigned int date; - unsigned int sig; - unsigned int cksum; - unsigned int ldrver; - unsigned int pf; - unsigned int datasize; - unsigned int totalsize; - unsigned int reserved[3]; -}; - -struct microcode { - struct microcode_header hdr; - unsigned int bits[0]; -}; - -typedef struct microcode microcode_t; -typedef struct microcode_header microcode_header_t; - -/* microcode format is extended from prescott processors */ -struct extended_signature { - unsigned int sig; - unsigned int pf; - unsigned int cksum; -}; - -struct extended_sigtable { - unsigned int count; - unsigned int cksum; - unsigned int reserved[3]; - struct extended_signature sigs[0]; -}; - - -#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) -#define ASM_NOP1 P6_NOP1 -#define ASM_NOP2 P6_NOP2 -#define ASM_NOP3 P6_NOP3 -#define ASM_NOP4 P6_NOP4 -#define ASM_NOP5 P6_NOP5 -#define ASM_NOP6 P6_NOP6 -#define ASM_NOP7 P6_NOP7 -#define ASM_NOP8 P6_NOP8 -#else -#define ASM_NOP1 K8_NOP1 -#define ASM_NOP2 K8_NOP2 -#define ASM_NOP3 K8_NOP3 -#define ASM_NOP4 K8_NOP4 -#define ASM_NOP5 K8_NOP5 -#define ASM_NOP6 K8_NOP6 -#define ASM_NOP7 K8_NOP7 -#define ASM_NOP8 K8_NOP8 -#endif - -/* Opteron nops */ -#define K8_NOP1 ".byte 0x90\n" -#define K8_NOP2 ".byte 0x66,0x90\n" -#define K8_NOP3 ".byte 0x66,0x66,0x90\n" -#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" -#define K8_NOP5 K8_NOP3 K8_NOP2 -#define K8_NOP6 K8_NOP3 K8_NOP3 -#define K8_NOP7 K8_NOP4 K8_NOP3 -#define K8_NOP8 K8_NOP4 K8_NOP4 - -/* P6 nops */ -/* uses eax dependencies (Intel-recommended choice) */ -#define P6_NOP1 ".byte 0x90\n" -#define P6_NOP2 ".byte 0x66,0x90\n" -#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" -#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" -#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" -#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" -#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" -#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" - -#define ASM_NOP_MAX 8 - -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static inline void rep_nop(void) -{ - __asm__ __volatile__("rep;nop": : :"memory"); -} - -/* Stop speculative execution */ -static inline void sync_core(void) -{ - int tmp; - asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); -} - -#define ARCH_HAS_PREFETCHW 1 -static inline void prefetchw(void *x) -{ - alternative_input("prefetcht0 (%1)", - "prefetchw (%1)", - X86_FEATURE_3DNOW, - "r" (x)); -} - -#define ARCH_HAS_SPINLOCK_PREFETCH 1 - -#define spin_lock_prefetch(x) prefetchw(x) - -#define cpu_relax() rep_nop() - -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) -{ - /* "monitor %eax,%ecx,%edx;" */ - asm volatile( - ".byte 0x0f,0x01,0xc8;" - : :"a" (eax), "c" (ecx), "d"(edx)); -} - -static inline void __mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax,%ecx;" */ - asm volatile( - ".byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); -} - -static inline void __sti_mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax,%ecx;" */ - asm volatile( - "sti; .byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); -} - -extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); - -#define stack_current() \ -({ \ - struct thread_info *ti; \ - asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ - ti->task; \ -}) - -#define cache_line_size() (boot_cpu_data.x86_cache_alignment) - -extern unsigned long boot_option_idle_override; -/* Boot loader type from the setup header */ -extern int bootloader_type; - -#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 - -#endif /* __ASM_X86_64_PROCESSOR_H */ diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index dabba55f7ed8..68563c0709ac 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h @@ -5,87 +5,24 @@ /* misc architecture specific prototypes */ -struct cpuinfo_x86; -struct pt_regs; - -extern void start_kernel(void); -extern void pda_init(int); - extern void early_idt_handler(void); -extern void mcheck_init(struct cpuinfo_x86 *c); extern void init_memory_mapping(unsigned long start, unsigned long end); -extern void system_call(void); -extern int kernel_syscall(void); +extern void system_call(void); extern void syscall_init(void); extern void ia32_syscall(void); -extern void ia32_cstar_target(void); -extern void ia32_sysenter_target(void); - -extern void config_acpi_tables(void); -extern void ia32_syscall(void); - -extern int pmtimer_mark_offset(void); -extern void pmtimer_resume(void); -extern void pmtimer_wait(unsigned); -extern unsigned int do_gettimeoffset_pm(void); -#ifdef CONFIG_X86_PM_TIMER -extern u32 pmtmr_ioport; -#else -#define pmtmr_ioport 0 -#endif -extern int nohpet; - -extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); - -extern void early_identify_cpu(struct cpuinfo_x86 *c); - -extern int k8_scan_nodes(unsigned long start, unsigned long end); - -extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); -extern unsigned long numa_free_all_bootmem(void); +extern void ia32_cstar_target(void); +extern void ia32_sysenter_target(void); extern void reserve_bootmem_generic(unsigned long phys, unsigned len); -extern void load_gs_index(unsigned gs); - -extern unsigned long end_pfn_map; - -extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp); -extern void show_registers(struct pt_regs *regs); - -extern void exception_table_check(void); - -extern void acpi_reserve_bootmem(void); - -extern void swap_low_mappings(void); - -extern void __show_regs(struct pt_regs * regs); -extern void show_regs(struct pt_regs * regs); - extern void syscall32_cpu_init(void); -extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); - -extern void early_quirks(void); extern void check_efer(void); -extern void select_idle_routine(const struct cpuinfo_x86 *c); - -extern unsigned long table_start, table_end; - -extern int exception_trace; -extern unsigned cpu_khz; -extern unsigned tsc_khz; - extern int reboot_force; -extern int notsc_setup(char *); - -extern int gsi_irq_sharing(int gsi); - -extern int force_mwait; long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h index 7524e1233833..81a8ee4c55fc 100644 --- a/include/asm-x86/ptrace-abi.h +++ b/include/asm-x86/ptrace-abi.h @@ -78,4 +78,66 @@ # define PTRACE_SYSEMU_SINGLESTEP 32 #endif +#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ + +#ifndef __ASSEMBLY__ + +#include <asm/types.h> + +/* configuration/status structure used in PTRACE_BTS_CONFIG and + PTRACE_BTS_STATUS commands. +*/ +struct ptrace_bts_config { + /* requested or actual size of BTS buffer in bytes */ + u32 size; + /* bitmask of below flags */ + u32 flags; + /* buffer overflow signal */ + u32 signal; + /* actual size of bts_struct in bytes */ + u32 bts_size; +}; +#endif + +#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ +#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ +#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow + instead of wrapping around */ +#define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available + instead of failing */ + +#define PTRACE_BTS_CONFIG 40 +/* Configure branch trace recording. + ADDR points to a struct ptrace_bts_config. + DATA gives the size of that buffer. + A new buffer is allocated, iff the size changes. + Returns the number of bytes read. +*/ +#define PTRACE_BTS_STATUS 41 +/* Return the current configuration in a struct ptrace_bts_config + pointed to by ADDR; DATA gives the size of that buffer. + Returns the number of bytes written. +*/ +#define PTRACE_BTS_SIZE 42 +/* Return the number of available BTS records. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_GET 43 +/* Get a single BTS record. + DATA defines the index into the BTS array, where 0 is the newest + entry, and higher indices refer to older entries. + ADDR is pointing to struct bts_struct (see asm/ds.h). +*/ +#define PTRACE_BTS_CLEAR 44 +/* Clear the BTS buffer. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_DRAIN 45 +/* Read all available BTS records and clear the buffer. + ADDR points to an array of struct bts_struct. + DATA gives the size of that buffer. + BTS records are read from oldest to newest. + Returns number of BTS records drained. +*/ + #endif diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 51ddb2590870..d9e04b46a440 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -4,12 +4,15 @@ #include <linux/compiler.h> /* For __user */ #include <asm/ptrace-abi.h> + #ifndef __ASSEMBLY__ #ifdef __i386__ /* this struct defines the way the registers are stored on the stack during a system call. */ +#ifndef __KERNEL__ + struct pt_regs { long ebx; long ecx; @@ -21,7 +24,7 @@ struct pt_regs { int xds; int xes; int xfs; - /* int xgs; */ + /* int gs; */ long orig_eax; long eip; int xcs; @@ -30,44 +33,37 @@ struct pt_regs { int xss; }; -#ifdef __KERNEL__ +#else /* __KERNEL__ */ + +struct pt_regs { + long bx; + long cx; + long dx; + long si; + long di; + long bp; + long ax; + int ds; + int es; + int fs; + /* int gs; */ + long orig_ax; + long ip; + int cs; + long flags; + long sp; + int ss; +}; #include <asm/vm86.h> #include <asm/segment.h> -struct task_struct; -extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); - -/* - * user_mode_vm(regs) determines whether a register set came from user mode. - * This is true if V8086 mode was enabled OR if the register set was from - * protected mode with RPL-3 CS value. This tricky test checks that with - * one comparison. Many places in the kernel can bypass this full check - * if they have already ruled out V8086 mode, so user_mode(regs) can be used. - */ -static inline int user_mode(struct pt_regs *regs) -{ - return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; -} -static inline int user_mode_vm(struct pt_regs *regs) -{ - return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; -} -static inline int v8086_mode(struct pt_regs *regs) -{ - return (regs->eflags & VM_MASK); -} - -#define instruction_pointer(regs) ((regs)->eip) -#define frame_pointer(regs) ((regs)->ebp) -#define stack_pointer(regs) ((unsigned long)(regs)) -#define regs_return_value(regs) ((regs)->eax) - -extern unsigned long profile_pc(struct pt_regs *regs); #endif /* __KERNEL__ */ #else /* __i386__ */ +#ifndef __KERNEL__ + struct pt_regs { unsigned long r15; unsigned long r14; @@ -96,47 +92,143 @@ struct pt_regs { /* top of stack page */ }; +#else /* __KERNEL__ */ + +struct pt_regs { + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long bp; + unsigned long bx; +/* arguments: non interrupts/non tracing syscalls only save upto here*/ + unsigned long r11; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long orig_ax; +/* end of arguments */ +/* cpu exception frame or undefined */ + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; +/* top of stack page */ +}; + +#endif /* __KERNEL__ */ +#endif /* !__i386__ */ + #ifdef __KERNEL__ -#define user_mode(regs) (!!((regs)->cs & 3)) -#define user_mode_vm(regs) user_mode(regs) -#define instruction_pointer(regs) ((regs)->rip) -#define frame_pointer(regs) ((regs)->rbp) -#define stack_pointer(regs) ((regs)->rsp) -#define regs_return_value(regs) ((regs)->rax) +/* the DS BTS struct is used for ptrace as well */ +#include <asm/ds.h> + +struct task_struct; + +extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); extern unsigned long profile_pc(struct pt_regs *regs); + +extern unsigned long +convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); + +#ifdef CONFIG_X86_32 +extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); +#else void signal_fault(struct pt_regs *regs, void __user *frame, char *where); +#endif -struct task_struct; +#define regs_return_value(regs) ((regs)->ax) + +/* + * user_mode_vm(regs) determines whether a register set came from user mode. + * This is true if V8086 mode was enabled OR if the register set was from + * protected mode with RPL-3 CS value. This tricky test checks that with + * one comparison. Many places in the kernel can bypass this full check + * if they have already ruled out V8086 mode, so user_mode(regs) can be used. + */ +static inline int user_mode(struct pt_regs *regs) +{ +#ifdef CONFIG_X86_32 + return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; +#else + return !!(regs->cs & 3); +#endif +} + +static inline int user_mode_vm(struct pt_regs *regs) +{ +#ifdef CONFIG_X86_32 + return ((regs->cs & SEGMENT_RPL_MASK) | + (regs->flags & VM_MASK)) >= USER_RPL; +#else + return user_mode(regs); +#endif +} + +static inline int v8086_mode(struct pt_regs *regs) +{ +#ifdef CONFIG_X86_32 + return (regs->flags & VM_MASK); +#else + return 0; /* No V86 mode support in long mode */ +#endif +} + +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps. So regs will be the current sp. + * + * This is valid only for kernel mode traps. + */ +static inline unsigned long kernel_trap_sp(struct pt_regs *regs) +{ +#ifdef CONFIG_X86_32 + return (unsigned long)regs; +#else + return regs->sp; +#endif +} + +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return regs->ip; +} + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return regs->bp; +} + +/* + * These are defined as per linux/ptrace.h, which see. + */ +#define arch_has_single_step() (1) +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); + +extern void user_enable_block_step(struct task_struct *); +#ifdef CONFIG_X86_DEBUGCTLMSR +#define arch_has_block_step() (1) +#else +#define arch_has_block_step() (boot_cpu_data.x86 >= 6) +#endif + +struct user_desc; +extern int do_get_thread_area(struct task_struct *p, int idx, + struct user_desc __user *info); +extern int do_set_thread_area(struct task_struct *p, int idx, + struct user_desc __user *info, int can_allocate); -extern unsigned long -convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs); - -enum { - EF_CF = 0x00000001, - EF_PF = 0x00000004, - EF_AF = 0x00000010, - EF_ZF = 0x00000040, - EF_SF = 0x00000080, - EF_TF = 0x00000100, - EF_IE = 0x00000200, - EF_DF = 0x00000400, - EF_OF = 0x00000800, - EF_IOPL = 0x00003000, - EF_IOPL_RING0 = 0x00000000, - EF_IOPL_RING1 = 0x00001000, - EF_IOPL_RING2 = 0x00002000, - EF_NT = 0x00004000, /* nested task */ - EF_RF = 0x00010000, /* resume */ - EF_VM = 0x00020000, /* virtual mode */ - EF_AC = 0x00040000, /* alignment */ - EF_VIF = 0x00080000, /* virtual interrupt */ - EF_VIP = 0x00100000, /* virtual interrupt pending */ - EF_ID = 0x00200000, /* id */ -}; #endif /* __KERNEL__ */ -#endif /* !__i386__ */ + #endif /* !__ASSEMBLY__ */ #endif diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 9b6dd093a9f7..46f725b0bc82 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h @@ -1,5 +1,20 @@ -#ifdef CONFIG_X86_32 -# include "resume-trace_32.h" -#else -# include "resume-trace_64.h" +#ifndef _ASM_X86_RESUME_TRACE_H +#define _ASM_X86_RESUME_TRACE_H + +#include <asm/asm.h> + +#define TRACE_RESUME(user) do { \ + if (pm_trace_enabled) { \ + void *tracedata; \ + asm volatile(_ASM_MOV_UL " $1f,%0\n" \ + ".section .tracedata,\"a\"\n" \ + "1:\t.word %c1\n\t" \ + _ASM_PTR " %c2\n" \ + ".previous" \ + :"=r" (tracedata) \ + : "i" (__LINE__), "i" (__FILE__)); \ + generate_resume_trace(tracedata, user); \ + } \ +} while (0) + #endif diff --git a/include/asm-x86/resume-trace_32.h b/include/asm-x86/resume-trace_32.h deleted file mode 100644 index ec9cfd656230..000000000000 --- a/include/asm-x86/resume-trace_32.h +++ /dev/null @@ -1,13 +0,0 @@ -#define TRACE_RESUME(user) do { \ - if (pm_trace_enabled) { \ - void *tracedata; \ - asm volatile("movl $1f,%0\n" \ - ".section .tracedata,\"a\"\n" \ - "1:\t.word %c1\n" \ - "\t.long %c2\n" \ - ".previous" \ - :"=r" (tracedata) \ - : "i" (__LINE__), "i" (__FILE__)); \ - generate_resume_trace(tracedata, user); \ - } \ -} while (0) diff --git a/include/asm-x86/resume-trace_64.h b/include/asm-x86/resume-trace_64.h deleted file mode 100644 index 34bf998fdf62..000000000000 --- a/include/asm-x86/resume-trace_64.h +++ /dev/null @@ -1,13 +0,0 @@ -#define TRACE_RESUME(user) do { \ - if (pm_trace_enabled) { \ - void *tracedata; \ - asm volatile("movq $1f,%0\n" \ - ".section .tracedata,\"a\"\n" \ - "1:\t.word %c1\n" \ - "\t.quad %c2\n" \ - ".previous" \ - :"=r" (tracedata) \ - : "i" (__LINE__), "i" (__FILE__)); \ - generate_resume_trace(tracedata, user); \ - } \ -} while (0) diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h index c7350f6d2015..97cdcc9887ba 100644 --- a/include/asm-x86/rio.h +++ b/include/asm-x86/rio.h @@ -1,6 +1,6 @@ /* - * Derived from include/asm-i386/mach-summit/mach_mpparse.h - * and include/asm-i386/mach-default/bios_ebda.h + * Derived from include/asm-x86/mach-summit/mach_mpparse.h + * and include/asm-x86/mach-default/bios_ebda.h * * Author: Laurent Vivier <Laurent.Vivier@bull.net> */ diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h index f2b64a429e6b..6a8c0d645108 100644 --- a/include/asm-x86/rwlock.h +++ b/include/asm-x86/rwlock.h @@ -2,7 +2,6 @@ #define _ASM_X86_RWLOCK_H #define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 041906f3c6df..520a379f4b80 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h @@ -2,7 +2,7 @@ * * Written by David Howells (dhowells@redhat.com). * - * Derived from asm-i386/semaphore.h + * Derived from asm-x86/semaphore.h * * * The MSW of the count is the negated number of active writers and waiting @@ -44,10 +44,14 @@ struct rwsem_waiter; -extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); -extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); +extern asmregparm struct rw_semaphore * + rwsem_down_read_failed(struct rw_semaphore *sem); +extern asmregparm struct rw_semaphore * + rwsem_down_write_failed(struct rw_semaphore *sem); +extern asmregparm struct rw_semaphore * + rwsem_wake(struct rw_semaphore *); +extern asmregparm struct rw_semaphore * + rwsem_downgrade_wake(struct rw_semaphore *sem); /* * the semaphore definition diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h index 3a1e76257a27..d13c197866d6 100644 --- a/include/asm-x86/scatterlist.h +++ b/include/asm-x86/scatterlist.h @@ -1,5 +1,35 @@ +#ifndef _ASM_X86_SCATTERLIST_H +#define _ASM_X86_SCATTERLIST_H + +#include <asm/types.h> + +struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; +#ifdef CONFIG_X86_64 + unsigned int dma_length; +#endif +}; + +#define ARCH_HAS_SG_CHAIN +#define ISA_DMA_THRESHOLD (0x00ffffff) + +/* + * These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns. + */ +#define sg_dma_address(sg) ((sg)->dma_address) #ifdef CONFIG_X86_32 -# include "scatterlist_32.h" +# define sg_dma_len(sg) ((sg)->length) #else -# include "scatterlist_64.h" +# define sg_dma_len(sg) ((sg)->dma_length) +#endif + #endif diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h deleted file mode 100644 index 0e7d997a34be..000000000000 --- a/include/asm-x86/scatterlist_32.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _I386_SCATTERLIST_H -#define _I386_SCATTERLIST_H - -#include <asm/types.h> - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; - -#define ARCH_HAS_SG_CHAIN - -/* These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) - -#define ISA_DMA_THRESHOLD (0x00ffffff) - -#endif /* !(_I386_SCATTERLIST_H) */ diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h deleted file mode 100644 index 1847c72befeb..000000000000 --- a/include/asm-x86/scatterlist_64.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _X8664_SCATTERLIST_H -#define _X8664_SCATTERLIST_H - -#include <asm/types.h> - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - unsigned int length; - dma_addr_t dma_address; - unsigned int dma_length; -}; - -#define ARCH_HAS_SG_CHAIN - -#define ISA_DMA_THRESHOLD (0x00ffffff) - -/* These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->dma_length) - -#endif diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index 605068280e28..23f0535fec61 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h @@ -1,5 +1,204 @@ +#ifndef _ASM_X86_SEGMENT_H_ +#define _ASM_X86_SEGMENT_H_ + +/* Simple and small GDT entries for booting only */ + +#define GDT_ENTRY_BOOT_CS 2 +#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) + +#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) +#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) + +#define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2) +#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8) + #ifdef CONFIG_X86_32 -# include "segment_32.h" +/* + * The layout of the per-CPU GDT under Linux: + * + * 0 - null + * 1 - reserved + * 2 - reserved + * 3 - reserved + * + * 4 - unused <==== new cacheline + * 5 - unused + * + * ------- start of TLS (Thread-Local Storage) segments: + * + * 6 - TLS segment #1 [ glibc's TLS segment ] + * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] + * 8 - TLS segment #3 + * 9 - reserved + * 10 - reserved + * 11 - reserved + * + * ------- start of kernel segments: + * + * 12 - kernel code segment <==== new cacheline + * 13 - kernel data segment + * 14 - default user CS + * 15 - default user DS + * 16 - TSS + * 17 - LDT + * 18 - PNPBIOS support (16->32 gate) + * 19 - PNPBIOS support + * 20 - PNPBIOS support + * 21 - PNPBIOS support + * 22 - PNPBIOS support + * 23 - APM BIOS support + * 24 - APM BIOS support + * 25 - APM BIOS support + * + * 26 - ESPFIX small SS + * 27 - per-cpu [ offset to per-cpu data area ] + * 28 - unused + * 29 - unused + * 30 - unused + * 31 - TSS for double fault handler + */ +#define GDT_ENTRY_TLS_MIN 6 +#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) + +#define GDT_ENTRY_DEFAULT_USER_CS 14 +#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) + +#define GDT_ENTRY_DEFAULT_USER_DS 15 +#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) + +#define GDT_ENTRY_KERNEL_BASE 12 + +#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) +#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) + +#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) +#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) + +#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) +#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) + +#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) +#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) + +#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) +#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) + +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) +#ifdef CONFIG_SMP +#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) #else -# include "segment_64.h" +#define __KERNEL_PERCPU 0 +#endif + +#define GDT_ENTRY_DOUBLEFAULT_TSS 31 + +/* + * The GDT has 32 entries + */ +#define GDT_ENTRIES 32 + +/* The PnP BIOS entries in the GDT */ +#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) +#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) +#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) +#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) +#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) + +/* The PnP BIOS selectors */ +#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ +#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ +#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ +#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ +#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ + +/* Bottom two bits of selector give the ring privilege level */ +#define SEGMENT_RPL_MASK 0x3 +/* Bit 2 is table indicator (LDT/GDT) */ +#define SEGMENT_TI_MASK 0x4 + +/* User mode is privilege level 3 */ +#define USER_RPL 0x3 +/* LDT segment has TI set, GDT has it cleared */ +#define SEGMENT_LDT 0x4 +#define SEGMENT_GDT 0x0 + +/* + * Matching rules for certain types of segments. + */ + +/* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */ +#define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8) + +/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ +#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) + +/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) + + +#else +#include <asm/cache.h> + +#define __KERNEL_CS 0x10 +#define __KERNEL_DS 0x18 + +#define __KERNEL32_CS 0x08 + +/* + * we cannot use the same code segment descriptor for user and kernel + * -- not even in the long flat mode, because of different DPL /kkeil + * The segment offset needs to contain a RPL. Grr. -AK + * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) + */ + +#define __USER32_CS 0x23 /* 4*8+3 */ +#define __USER_DS 0x2b /* 5*8+3 */ +#define __USER_CS 0x33 /* 6*8+3 */ +#define __USER32_DS __USER_DS + +#define GDT_ENTRY_TSS 8 /* needs two entries */ +#define GDT_ENTRY_LDT 10 /* needs two entries */ +#define GDT_ENTRY_TLS_MIN 12 +#define GDT_ENTRY_TLS_MAX 14 + +#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ +#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) + +/* TLS indexes for 64bit - hardcoded in arch_prctl */ +#define FS_TLS 0 +#define GS_TLS 1 + +#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) +#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) + +#define GDT_ENTRIES 16 + +#endif + +#ifndef CONFIG_PARAVIRT +#define get_kernel_rpl() 0 +#endif + +/* User mode is privilege level 3 */ +#define USER_RPL 0x3 +/* LDT segment has TI set, GDT has it cleared */ +#define SEGMENT_LDT 0x4 +#define SEGMENT_GDT 0x0 + +/* Bottom two bits of selector give the ring privilege level */ +#define SEGMENT_RPL_MASK 0x3 +/* Bit 2 is table indicator (LDT/GDT) */ +#define SEGMENT_TI_MASK 0x4 + +#define IDT_ENTRIES 256 +#define GDT_SIZE (GDT_ENTRIES * 8) +#define GDT_ENTRY_TLS_ENTRIES 3 +#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +extern const char early_idt_handlers[IDT_ENTRIES][10]; +#endif +#endif + #endif diff --git a/include/asm-x86/segment_32.h b/include/asm-x86/segment_32.h deleted file mode 100644 index 597a47c2515f..000000000000 --- a/include/asm-x86/segment_32.h +++ /dev/null @@ -1,148 +0,0 @@ -#ifndef _ASM_SEGMENT_H -#define _ASM_SEGMENT_H - -/* - * The layout of the per-CPU GDT under Linux: - * - * 0 - null - * 1 - reserved - * 2 - reserved - * 3 - reserved - * - * 4 - unused <==== new cacheline - * 5 - unused - * - * ------- start of TLS (Thread-Local Storage) segments: - * - * 6 - TLS segment #1 [ glibc's TLS segment ] - * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] - * 8 - TLS segment #3 - * 9 - reserved - * 10 - reserved - * 11 - reserved - * - * ------- start of kernel segments: - * - * 12 - kernel code segment <==== new cacheline - * 13 - kernel data segment - * 14 - default user CS - * 15 - default user DS - * 16 - TSS - * 17 - LDT - * 18 - PNPBIOS support (16->32 gate) - * 19 - PNPBIOS support - * 20 - PNPBIOS support - * 21 - PNPBIOS support - * 22 - PNPBIOS support - * 23 - APM BIOS support - * 24 - APM BIOS support - * 25 - APM BIOS support - * - * 26 - ESPFIX small SS - * 27 - per-cpu [ offset to per-cpu data area ] - * 28 - unused - * 29 - unused - * 30 - unused - * 31 - TSS for double fault handler - */ -#define GDT_ENTRY_TLS_ENTRIES 3 -#define GDT_ENTRY_TLS_MIN 6 -#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) - -#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) - -#define GDT_ENTRY_DEFAULT_USER_CS 14 -#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) - -#define GDT_ENTRY_DEFAULT_USER_DS 15 -#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) - -#define GDT_ENTRY_KERNEL_BASE 12 - -#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) -#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) - -#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) -#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) - -#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) -#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) - -#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) -#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) - -#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) -#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) - -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) -#ifdef CONFIG_SMP -#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) -#else -#define __KERNEL_PERCPU 0 -#endif - -#define GDT_ENTRY_DOUBLEFAULT_TSS 31 - -/* - * The GDT has 32 entries - */ -#define GDT_ENTRIES 32 -#define GDT_SIZE (GDT_ENTRIES * 8) - -/* Simple and small GDT entries for booting only */ - -#define GDT_ENTRY_BOOT_CS 2 -#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) - -#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) -#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) - -/* The PnP BIOS entries in the GDT */ -#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) -#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) -#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) -#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) -#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) - -/* The PnP BIOS selectors */ -#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ -#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ -#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ -#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ -#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ - -/* - * The interrupt descriptor table has room for 256 idt's, - * the global descriptor table is dependent on the number - * of tasks we can have.. - */ -#define IDT_ENTRIES 256 - -/* Bottom two bits of selector give the ring privilege level */ -#define SEGMENT_RPL_MASK 0x3 -/* Bit 2 is table indicator (LDT/GDT) */ -#define SEGMENT_TI_MASK 0x4 - -/* User mode is privilege level 3 */ -#define USER_RPL 0x3 -/* LDT segment has TI set, GDT has it cleared */ -#define SEGMENT_LDT 0x4 -#define SEGMENT_GDT 0x0 - -#ifndef CONFIG_PARAVIRT -#define get_kernel_rpl() 0 -#endif -/* - * Matching rules for certain types of segments. - */ - -/* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */ -#define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8) - -/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ -#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) - -/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) - -#endif diff --git a/include/asm-x86/segment_64.h b/include/asm-x86/segment_64.h deleted file mode 100644 index 04b8ab21328f..000000000000 --- a/include/asm-x86/segment_64.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef _ASM_SEGMENT_H -#define _ASM_SEGMENT_H - -#include <asm/cache.h> - -/* Simple and small GDT entries for booting only */ - -#define GDT_ENTRY_BOOT_CS 2 -#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) - -#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) -#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) - -#define __KERNEL_CS 0x10 -#define __KERNEL_DS 0x18 - -#define __KERNEL32_CS 0x08 - -/* - * we cannot use the same code segment descriptor for user and kernel - * -- not even in the long flat mode, because of different DPL /kkeil - * The segment offset needs to contain a RPL. Grr. -AK - * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) - */ - -#define __USER32_CS 0x23 /* 4*8+3 */ -#define __USER_DS 0x2b /* 5*8+3 */ -#define __USER_CS 0x33 /* 6*8+3 */ -#define __USER32_DS __USER_DS - -#define GDT_ENTRY_TSS 8 /* needs two entries */ -#define GDT_ENTRY_LDT 10 /* needs two entries */ -#define GDT_ENTRY_TLS_MIN 12 -#define GDT_ENTRY_TLS_MAX 14 - -#define GDT_ENTRY_TLS_ENTRIES 3 - -#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ -#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) - -/* TLS indexes for 64bit - hardcoded in arch_prctl */ -#define FS_TLS 0 -#define GS_TLS 1 - -#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) -#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) - -#define IDT_ENTRIES 256 -#define GDT_ENTRIES 16 -#define GDT_SIZE (GDT_ENTRIES * 8) -#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) - -#endif diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h index 835c1d751a9f..ac96d3804d0c 100644 --- a/include/asm-x86/semaphore_32.h +++ b/include/asm-x86/semaphore_32.h @@ -83,10 +83,10 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem) sema_init(sem, 0); } -fastcall void __down_failed(void /* special register calling convention */); -fastcall int __down_failed_interruptible(void /* params in registers */); -fastcall int __down_failed_trylock(void /* params in registers */); -fastcall void __up_wakeup(void /* special register calling convention */); +extern asmregparm void __down_failed(atomic_t *count_ptr); +extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr); +extern asmregparm int __down_failed_trylock(atomic_t *count_ptr); +extern asmregparm void __up_wakeup(atomic_t *count_ptr); /* * This is ugly, but we want the default case to fall through. diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 24d786e07b49..071e054abd82 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h @@ -3,6 +3,13 @@ #define COMMAND_LINE_SIZE 2048 +#ifndef __ASSEMBLY__ +char *machine_specific_memory_setup(void); +#ifndef CONFIG_PARAVIRT +#define paravirt_post_allocator_init() do {} while (0) +#endif +#endif /* __ASSEMBLY__ */ + #ifdef __KERNEL__ #ifdef __i386__ @@ -51,9 +58,7 @@ void __init add_memory_region(unsigned long long start, extern unsigned long init_pg_tables_end; -#ifndef CONFIG_PARAVIRT -#define paravirt_post_allocator_init() do {} while (0) -#endif + #endif /* __i386__ */ #endif /* _SETUP */ diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index c047f9dc3423..681deade5f00 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h @@ -63,20 +63,20 @@ struct sigcontext { unsigned short fs, __fsh; unsigned short es, __esh; unsigned short ds, __dsh; - unsigned long edi; - unsigned long esi; - unsigned long ebp; - unsigned long esp; - unsigned long ebx; - unsigned long edx; - unsigned long ecx; - unsigned long eax; + unsigned long di; + unsigned long si; + unsigned long bp; + unsigned long sp; + unsigned long bx; + unsigned long dx; + unsigned long cx; + unsigned long ax; unsigned long trapno; unsigned long err; - unsigned long eip; + unsigned long ip; unsigned short cs, __csh; - unsigned long eflags; - unsigned long esp_at_signal; + unsigned long flags; + unsigned long sp_at_signal; unsigned short ss, __ssh; struct _fpstate __user * fpstate; unsigned long oldmask; @@ -111,16 +111,16 @@ struct sigcontext { unsigned long r13; unsigned long r14; unsigned long r15; - unsigned long rdi; - unsigned long rsi; - unsigned long rbp; - unsigned long rbx; - unsigned long rdx; - unsigned long rax; - unsigned long rcx; - unsigned long rsp; - unsigned long rip; - unsigned long eflags; /* RFLAGS */ + unsigned long di; + unsigned long si; + unsigned long bp; + unsigned long bx; + unsigned long dx; + unsigned long ax; + unsigned long cx; + unsigned long sp; + unsigned long ip; + unsigned long flags; unsigned short cs; unsigned short gs; unsigned short fs; diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 3d657038ab7c..6ffab4fd593a 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h @@ -48,20 +48,20 @@ struct sigcontext_ia32 { unsigned short fs, __fsh; unsigned short es, __esh; unsigned short ds, __dsh; - unsigned int edi; - unsigned int esi; - unsigned int ebp; - unsigned int esp; - unsigned int ebx; - unsigned int edx; - unsigned int ecx; - unsigned int eax; + unsigned int di; + unsigned int si; + unsigned int bp; + unsigned int sp; + unsigned int bx; + unsigned int dx; + unsigned int cx; + unsigned int ax; unsigned int trapno; unsigned int err; - unsigned int eip; + unsigned int ip; unsigned short cs, __csh; - unsigned int eflags; - unsigned int esp_at_signal; + unsigned int flags; + unsigned int sp_at_signal; unsigned short ss, __ssh; unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ unsigned int oldmask; diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h index 987a422a2c78..aee7eca585ab 100644 --- a/include/asm-x86/signal.h +++ b/include/asm-x86/signal.h @@ -245,21 +245,14 @@ static __inline__ int sigfindinword(unsigned long word) struct pt_regs; -#define ptrace_signal_deliver(regs, cookie) \ - do { \ - if (current->ptrace & PT_DTRACE) { \ - current->ptrace &= ~PT_DTRACE; \ - (regs)->eflags &= ~TF_MASK; \ - } \ - } while (0) - #else /* __i386__ */ #undef __HAVE_ARCH_SIG_BITOPS +#endif /* !__i386__ */ + #define ptrace_signal_deliver(regs, cookie) do { } while (0) -#endif /* !__i386__ */ #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index e10b7affdfe5..56152e312287 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -1,51 +1,41 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +#ifndef __ASSEMBLY__ +#include <linux/cpumask.h> +#include <linux/init.h> + /* * We need the APIC definitions automatically as part of 'smp.h' */ -#ifndef __ASSEMBLY__ -#include <linux/kernel.h> -#include <linux/threads.h> -#include <linux/cpumask.h> +#ifdef CONFIG_X86_LOCAL_APIC +# include <asm/mpspec.h> +# include <asm/apic.h> +# ifdef CONFIG_X86_IO_APIC +# include <asm/io_apic.h> +# endif #endif -#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__) -#include <linux/bitops.h> -#include <asm/mpspec.h> -#include <asm/apic.h> -#ifdef CONFIG_X86_IO_APIC -#include <asm/io_apic.h> -#endif -#endif +extern cpumask_t cpu_callout_map; +extern cpumask_t cpu_callin_map; -#define BAD_APICID 0xFFu -#ifdef CONFIG_SMP -#ifndef __ASSEMBLY__ +extern int smp_num_siblings; +extern unsigned int num_processors; -/* - * Private routines/data - */ - extern void smp_alloc_memory(void); -extern int pic_mode; -extern int smp_num_siblings; -DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -DECLARE_PER_CPU(cpumask_t, cpu_core_map); +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); -#define MAX_APICID 256 extern u8 __initdata x86_cpu_to_apicid_init[]; -extern void *x86_cpu_to_apicid_ptr; -DECLARE_PER_CPU(u8, x86_cpu_to_apicid); - -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +extern void *x86_cpu_to_apicid_early_ptr; -extern void set_cpu_sibling_map(int cpu); +DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); +DECLARE_PER_CPU(cpumask_t, cpu_core_map); +DECLARE_PER_CPU(u8, cpu_llc_id); +DECLARE_PER_CPU(u8, x86_cpu_to_apicid); #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); @@ -53,6 +43,9 @@ extern void cpu_uninit(void); extern void remove_siblinginfo(int cpu); #endif +/* Globals due to paravirt */ +extern void set_cpu_sibling_map(int cpu); + struct smp_ops { void (*smp_prepare_boot_cpu)(void); @@ -67,6 +60,7 @@ struct smp_ops int wait); }; +#ifdef CONFIG_SMP extern struct smp_ops smp_ops; static inline void smp_prepare_boot_cpu(void) @@ -107,10 +101,12 @@ int native_cpu_up(unsigned int cpunum); void native_smp_cpus_done(unsigned int max_cpus); #ifndef CONFIG_PARAVIRT -#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \ -do { } while (0) +#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); + /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), @@ -119,9 +115,11 @@ do { } while (0) DECLARE_PER_CPU(int, cpu_number); #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) -extern cpumask_t cpu_callout_map; -extern cpumask_t cpu_callin_map; -extern cpumask_t cpu_possible_map; +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) + +extern int safe_smp_processor_id(void); + +void __cpuinit smp_store_cpu_info(int id); /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) @@ -129,56 +127,39 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } -extern int safe_smp_processor_id(void); -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); -extern unsigned int num_processors; - -void __cpuinit smp_store_cpu_info(int id); - -#endif /* !__ASSEMBLY__ */ - #else /* CONFIG_SMP */ #define safe_smp_processor_id() 0 #define cpu_physical_id(cpu) boot_cpu_physical_apicid -#define NO_PROC_ID 0xFF /* No processor magic marker */ - -#endif /* CONFIG_SMP */ - -#ifndef __ASSEMBLY__ +#endif /* !CONFIG_SMP */ #ifdef CONFIG_X86_LOCAL_APIC -#ifdef APIC_DEFINITION +static __inline int logical_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); +} + +# ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); -#else -#include <mach_apicdef.h> +# else +# include <mach_apicdef.h> static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); + return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); } -#endif /* APIC_DEFINITION */ +# endif /* APIC_DEFINITION */ #else /* CONFIG_X86_LOCAL_APIC */ -#ifndef CONFIG_SMP -#define hard_smp_processor_id() 0 -#endif +# ifndef CONFIG_SMP +# define hard_smp_processor_id() 0 +# endif #endif /* CONFIG_X86_LOCAL_APIC */ -extern u8 apicid_2_node[]; - -#ifdef CONFIG_X86_LOCAL_APIC -static __inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); -} -#endif -#endif - +#endif /* !ASSEMBLY */ #endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index ab612b0ff270..e0a75519ad21 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -1,130 +1,101 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#include <linux/threads.h> #include <linux/cpumask.h> -#include <linux/bitops.h> #include <linux/init.h> -extern int disable_apic; -#include <asm/mpspec.h> +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ #include <asm/apic.h> #include <asm/io_apic.h> -#include <asm/thread_info.h> - -#ifdef CONFIG_SMP - +#include <asm/mpspec.h> #include <asm/pda.h> +#include <asm/thread_info.h> -struct pt_regs; - -extern cpumask_t cpu_present_mask; -extern cpumask_t cpu_possible_map; -extern cpumask_t cpu_online_map; extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; -/* - * Private routines/data - */ - +extern int smp_num_siblings; +extern unsigned int num_processors; + extern void smp_alloc_memory(void); -extern volatile unsigned long smp_invalidate_needed; extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); -extern int smp_num_siblings; -extern void smp_send_reschedule(int cpu); + extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -/* - * cpu_sibling_map and cpu_core_map now live - * in the per cpu area - * - * extern cpumask_t cpu_sibling_map[NR_CPUS]; - * extern cpumask_t cpu_core_map[NR_CPUS]; - */ +extern u16 __initdata x86_cpu_to_apicid_init[]; +extern u16 __initdata x86_bios_cpu_apicid_init[]; +extern void *x86_cpu_to_apicid_early_ptr; +extern void *x86_bios_cpu_apicid_early_ptr; + DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u8, cpu_llc_id); - -#define SMP_TRAMPOLINE_BASE 0x6000 - -/* - * On x86 all CPUs are mapped 1:1 to the APIC space. - * This simplifies scheduling and IPI sending and - * compresses data structures. - */ +DECLARE_PER_CPU(u16, cpu_llc_id); +DECLARE_PER_CPU(u16, x86_cpu_to_apicid); +DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); -static inline int num_booting_cpus(void) +static inline int cpu_present_to_apicid(int mps_cpu) { - return cpus_weight(cpu_callout_map); + if (cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; } -#define raw_smp_processor_id() read_pda(cpunumber) +#ifdef CONFIG_SMP + +#define SMP_TRAMPOLINE_BASE 0x6000 extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); -extern unsigned num_processors; extern unsigned __cpuinitdata disabled_cpus; -#define NO_PROC_ID 0xFF /* No processor magic marker */ - -#endif /* CONFIG_SMP */ +#define raw_smp_processor_id() read_pda(cpunumber) +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -#define safe_smp_processor_id() smp_processor_id() - -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); -} +#define stack_smp_processor_id() \ + ({ \ + struct thread_info *ti; \ + __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + ti->cpu; \ +}) /* - * Some lowlevel functions might want to know about - * the real APIC ID <-> CPU # mapping. + * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies + * scheduling and IPI sending and compresses data structures. */ -extern u8 __initdata x86_cpu_to_apicid_init[]; -extern void *x86_cpu_to_apicid_ptr; -DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */ -extern u8 bios_cpu_apicid[]; - -static inline int cpu_present_to_apicid(int mps_cpu) +static inline int num_booting_cpus(void) { - if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; + return cpus_weight(cpu_callout_map); } -#ifndef CONFIG_SMP +extern void smp_send_reschedule(int cpu); + +#else /* CONFIG_SMP */ + +extern unsigned int boot_cpu_id; +#define cpu_physical_id(cpu) boot_cpu_id #define stack_smp_processor_id() 0 -#define cpu_logical_map(x) (x) -#else -#include <asm/thread_info.h> -#define stack_smp_processor_id() \ -({ \ - struct thread_info *ti; \ - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ - ti->cpu; \ -}) -#endif + +#endif /* !CONFIG_SMP */ + +#define safe_smp_processor_id() smp_processor_id() static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); + return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); +} + +static inline int hard_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); } -#ifdef CONFIG_SMP -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -#else -extern unsigned int boot_cpu_id; -#define cpu_physical_id(cpu) boot_cpu_id -#endif /* !CONFIG_SMP */ #endif diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index 3f203b1d9ee8..fa58cd55411a 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h @@ -1,5 +1,34 @@ +#ifndef _ASM_X86_SPARSEMEM_H +#define _ASM_X86_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM +/* + * generic non-linear memory support: + * + * 1) we will not split memory into more chunks than will fit into the flags + * field of the struct page + * + * SECTION_SIZE_BITS 2^n: size of each section + * MAX_PHYSADDR_BITS 2^n: max size of physical address space + * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space + * + */ + #ifdef CONFIG_X86_32 -# include "sparsemem_32.h" -#else -# include "sparsemem_64.h" +# ifdef CONFIG_X86_PAE +# define SECTION_SIZE_BITS 30 +# define MAX_PHYSADDR_BITS 36 +# define MAX_PHYSMEM_BITS 36 +# else +# define SECTION_SIZE_BITS 26 +# define MAX_PHYSADDR_BITS 32 +# define MAX_PHYSMEM_BITS 32 +# endif +#else /* CONFIG_X86_32 */ +# define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ +# define MAX_PHYSADDR_BITS 40 +# define MAX_PHYSMEM_BITS 40 +#endif + +#endif /* CONFIG_SPARSEMEM */ #endif diff --git a/include/asm-x86/sparsemem_32.h b/include/asm-x86/sparsemem_32.h deleted file mode 100644 index cfeed990585f..000000000000 --- a/include/asm-x86/sparsemem_32.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _I386_SPARSEMEM_H -#define _I386_SPARSEMEM_H -#ifdef CONFIG_SPARSEMEM - -/* - * generic non-linear memory support: - * - * 1) we will not split memory into more chunks than will fit into the - * flags field of the struct page - */ - -/* - * SECTION_SIZE_BITS 2^N: how big each section will be - * MAX_PHYSADDR_BITS 2^N: how much physical address space we have - * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space - */ -#ifdef CONFIG_X86_PAE -#define SECTION_SIZE_BITS 30 -#define MAX_PHYSADDR_BITS 36 -#define MAX_PHYSMEM_BITS 36 -#else -#define SECTION_SIZE_BITS 26 -#define MAX_PHYSADDR_BITS 32 -#define MAX_PHYSMEM_BITS 32 -#endif - -/* XXX: FIXME -- wli */ -#define kern_addr_valid(kaddr) (0) - -#endif /* CONFIG_SPARSEMEM */ -#endif /* _I386_SPARSEMEM_H */ diff --git a/include/asm-x86/sparsemem_64.h b/include/asm-x86/sparsemem_64.h deleted file mode 100644 index dabb16714a71..000000000000 --- a/include/asm-x86/sparsemem_64.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef _ASM_X86_64_SPARSEMEM_H -#define _ASM_X86_64_SPARSEMEM_H 1 - -#ifdef CONFIG_SPARSEMEM - -/* - * generic non-linear memory support: - * - * 1) we will not split memory into more chunks than will fit into the flags - * field of the struct page - * - * SECTION_SIZE_BITS 2^n: size of each section - * MAX_PHYSADDR_BITS 2^n: max size of physical address space - * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space - * - */ - -#define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ -#define MAX_PHYSADDR_BITS 40 -#define MAX_PHYSMEM_BITS 40 - -extern int early_pfn_to_nid(unsigned long pfn); - -#endif /* CONFIG_SPARSEMEM */ - -#endif /* _ASM_X86_64_SPARSEMEM_H */ diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index d74d85e71dcb..23804c1890ff 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -1,5 +1,296 @@ +#ifndef _X86_SPINLOCK_H_ +#define _X86_SPINLOCK_H_ + +#include <asm/atomic.h> +#include <asm/rwlock.h> +#include <asm/page.h> +#include <asm/processor.h> +#include <linux/compiler.h> + +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + * + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * These are fair FIFO ticket locks, which are currently limited to 256 + * CPUs. + * + * (the type definitions are in asm/spinlock_types.h) + */ + #ifdef CONFIG_X86_32 -# include "spinlock_32.h" +typedef char _slock_t; +# define LOCK_INS_DEC "decb" +# define LOCK_INS_XCH "xchgb" +# define LOCK_INS_MOV "movb" +# define LOCK_INS_CMP "cmpb" +# define LOCK_PTR_REG "a" #else -# include "spinlock_64.h" +typedef int _slock_t; +# define LOCK_INS_DEC "decl" +# define LOCK_INS_XCH "xchgl" +# define LOCK_INS_MOV "movl" +# define LOCK_INS_CMP "cmpl" +# define LOCK_PTR_REG "D" +#endif + +#if defined(CONFIG_X86_32) && \ + (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) +/* + * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock + * (PPro errata 66, 92) + */ +# define UNLOCK_LOCK_PREFIX LOCK_PREFIX +#else +# define UNLOCK_LOCK_PREFIX +#endif + +/* + * Ticket locks are conceptually two parts, one indicating the current head of + * the queue, and the other indicating the current tail. The lock is acquired + * by atomically noting the tail and incrementing it by one (thus adding + * ourself to the queue and noting our position), then waiting until the head + * becomes equal to the the initial value of the tail. + * + * We use an xadd covering *both* parts of the lock, to increment the tail and + * also load the position of the head, which takes care of memory ordering + * issues and should be optimal for the uncontended case. Note the tail must be + * in the high part, because a wide xadd increment of the low part would carry + * up and contaminate the high part. + * + * With fewer than 2^8 possible CPUs, we can use x86's partial registers to + * save some instructions and make the code more elegant. There really isn't + * much between them in performance though, especially as locks are out of line. + */ +#if (NR_CPUS < 256) +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + int tmp = *(volatile signed int *)(&(lock)->slock); + + return (((tmp >> 8) & 0xff) != (tmp & 0xff)); +} + +static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +{ + int tmp = *(volatile signed int *)(&(lock)->slock); + + return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; +} + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + short inc = 0x0100; + + __asm__ __volatile__ ( + LOCK_PREFIX "xaddw %w0, %1\n" + "1:\t" + "cmpb %h0, %b0\n\t" + "je 2f\n\t" + "rep ; nop\n\t" + "movb %1, %b0\n\t" + /* don't need lfence here, because loads are in-order */ + "jmp 1b\n" + "2:" + :"+Q" (inc), "+m" (lock->slock) + : + :"memory", "cc"); +} + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + int tmp; + short new; + + asm volatile( + "movw %2,%w0\n\t" + "cmpb %h0,%b0\n\t" + "jne 1f\n\t" + "movw %w0,%w1\n\t" + "incb %h1\n\t" + "lock ; cmpxchgw %w1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) + : + : "memory", "cc"); + + return tmp; +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __asm__ __volatile__( + UNLOCK_LOCK_PREFIX "incb %0" + :"+m" (lock->slock) + : + :"memory", "cc"); +} +#else +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + int tmp = *(volatile signed int *)(&(lock)->slock); + + return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); +} + +static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +{ + int tmp = *(volatile signed int *)(&(lock)->slock); + + return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; +} + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + int inc = 0x00010000; + int tmp; + + __asm__ __volatile__ ( + "lock ; xaddl %0, %1\n" + "movzwl %w0, %2\n\t" + "shrl $16, %0\n\t" + "1:\t" + "cmpl %0, %2\n\t" + "je 2f\n\t" + "rep ; nop\n\t" + "movzwl %1, %2\n\t" + /* don't need lfence here, because loads are in-order */ + "jmp 1b\n" + "2:" + :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) + : + :"memory", "cc"); +} + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + int tmp; + int new; + + asm volatile( + "movl %2,%0\n\t" + "movl %0,%1\n\t" + "roll $16, %0\n\t" + "cmpl %0,%1\n\t" + "jne 1f\n\t" + "addl $0x00010000, %1\n\t" + "lock ; cmpxchgl %1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + :"=&a" (tmp), "=r" (new), "+m" (lock->slock) + : + : "memory", "cc"); + + return tmp; +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __asm__ __volatile__( + UNLOCK_LOCK_PREFIX "incw %0" + :"+m" (lock->slock) + : + :"memory", "cc"); +} +#endif + +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * NOTE! it is quite common to have readers in interrupts + * but no interrupt writers. For those circumstances we + * can "mix" irq-safe locks - any writer needs to get a + * irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + * + * On x86, we implement read-write locks as a 32-bit counter + * with the high bit (sign) being the "contended" bit. + */ + +/** + * read_can_lock - would read_trylock() succeed? + * @lock: the rwlock in question. + */ +static inline int __raw_read_can_lock(raw_rwlock_t *lock) +{ + return (int)(lock)->lock > 0; +} + +/** + * write_can_lock - would write_trylock() succeed? + * @lock: the rwlock in question. + */ +static inline int __raw_write_can_lock(raw_rwlock_t *lock) +{ + return (lock)->lock == RW_LOCK_BIAS; +} + +static inline void __raw_read_lock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" + "jns 1f\n" + "call __read_lock_failed\n\t" + "1:\n" + ::LOCK_PTR_REG (rw) : "memory"); +} + +static inline void __raw_write_lock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" + "jz 1f\n" + "call __write_lock_failed\n\t" + "1:\n" + ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); +} + +static inline int __raw_read_trylock(raw_rwlock_t *lock) +{ + atomic_t *count = (atomic_t *)lock; + + atomic_dec(count); + if (atomic_read(count) >= 0) + return 1; + atomic_inc(count); + return 0; +} + +static inline int __raw_write_trylock(raw_rwlock_t *lock) +{ + atomic_t *count = (atomic_t *)lock; + + if (atomic_sub_and_test(RW_LOCK_BIAS, count)) + return 1; + atomic_add(RW_LOCK_BIAS, count); + return 0; +} + +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + asm volatile(LOCK_PREFIX "addl %1, %0" + : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); +} + +#define _raw_spin_relax(lock) cpu_relax() +#define _raw_read_relax(lock) cpu_relax() +#define _raw_write_relax(lock) cpu_relax() + #endif diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h deleted file mode 100644 index d3bcebed60ca..000000000000 --- a/include/asm-x86/spinlock_32.h +++ /dev/null @@ -1,221 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -#include <asm/atomic.h> -#include <asm/rwlock.h> -#include <asm/page.h> -#include <asm/processor.h> -#include <linux/compiler.h> - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define CLI_STRING "cli" -#define STI_STRING "sti" -#define CLI_STI_CLOBBERS -#define CLI_STI_INPUT_ARGS -#endif /* CONFIG_PARAVIRT */ - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - * - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - * - * (the type definitions are in asm/spinlock_types.h) - */ - -static inline int __raw_spin_is_locked(raw_spinlock_t *x) -{ - return *(volatile signed char *)(&(x)->slock) <= 0; -} - -static inline void __raw_spin_lock(raw_spinlock_t *lock) -{ - asm volatile("\n1:\t" - LOCK_PREFIX " ; decb %0\n\t" - "jns 3f\n" - "2:\t" - "rep;nop\n\t" - "cmpb $0,%0\n\t" - "jle 2b\n\t" - "jmp 1b\n" - "3:\n\t" - : "+m" (lock->slock) : : "memory"); -} - -/* - * It is easier for the lock validator if interrupts are not re-enabled - * in the middle of a lock-acquire. This is a performance feature anyway - * so we turn it off: - * - * NOTE: there's an irqs-on section here, which normally would have to be - * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. - */ -#ifndef CONFIG_PROVE_LOCKING -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) -{ - asm volatile( - "\n1:\t" - LOCK_PREFIX " ; decb %[slock]\n\t" - "jns 5f\n" - "2:\t" - "testl $0x200, %[flags]\n\t" - "jz 4f\n\t" - STI_STRING "\n" - "3:\t" - "rep;nop\n\t" - "cmpb $0, %[slock]\n\t" - "jle 3b\n\t" - CLI_STRING "\n\t" - "jmp 1b\n" - "4:\t" - "rep;nop\n\t" - "cmpb $0, %[slock]\n\t" - "jg 1b\n\t" - "jmp 4b\n" - "5:\n\t" - : [slock] "+m" (lock->slock) - : [flags] "r" (flags) - CLI_STI_INPUT_ARGS - : "memory" CLI_STI_CLOBBERS); -} -#endif - -static inline int __raw_spin_trylock(raw_spinlock_t *lock) -{ - char oldval; - asm volatile( - "xchgb %b0,%1" - :"=q" (oldval), "+m" (lock->slock) - :"0" (0) : "memory"); - return oldval > 0; -} - -/* - * __raw_spin_unlock based on writing $1 to the low byte. - * This method works. Despite all the confusion. - * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) - * (PPro errata 66, 92) - */ - -#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) - -static inline void __raw_spin_unlock(raw_spinlock_t *lock) -{ - asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); -} - -#else - -static inline void __raw_spin_unlock(raw_spinlock_t *lock) -{ - char oldval = 1; - - asm volatile("xchgb %b0, %1" - : "=q" (oldval), "+m" (lock->slock) - : "0" (oldval) : "memory"); -} - -#endif - -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) -{ - while (__raw_spin_is_locked(lock)) - cpu_relax(); -} - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - * - * On x86, we implement read-write locks as a 32-bit counter - * with the high bit (sign) being the "contended" bit. - * - * The inline assembly is non-obvious. Think about it. - * - * Changed to use the same technique as rw semaphores. See - * semaphore.h for details. -ben - * - * the helpers are in arch/i386/kernel/semaphore.c - */ - -/** - * read_can_lock - would read_trylock() succeed? - * @lock: the rwlock in question. - */ -static inline int __raw_read_can_lock(raw_rwlock_t *x) -{ - return (int)(x)->lock > 0; -} - -/** - * write_can_lock - would write_trylock() succeed? - * @lock: the rwlock in question. - */ -static inline int __raw_write_can_lock(raw_rwlock_t *x) -{ - return (x)->lock == RW_LOCK_BIAS; -} - -static inline void __raw_read_lock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" - "jns 1f\n" - "call __read_lock_failed\n\t" - "1:\n" - ::"a" (rw) : "memory"); -} - -static inline void __raw_write_lock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" - "jz 1f\n" - "call __write_lock_failed\n\t" - "1:\n" - ::"a" (rw) : "memory"); -} - -static inline int __raw_read_trylock(raw_rwlock_t *lock) -{ - atomic_t *count = (atomic_t *)lock; - atomic_dec(count); - if (atomic_read(count) >= 0) - return 1; - atomic_inc(count); - return 0; -} - -static inline int __raw_write_trylock(raw_rwlock_t *lock) -{ - atomic_t *count = (atomic_t *)lock; - if (atomic_sub_and_test(RW_LOCK_BIAS, count)) - return 1; - atomic_add(RW_LOCK_BIAS, count); - return 0; -} - -static inline void __raw_read_unlock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); -} - -static inline void __raw_write_unlock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" - : "+m" (rw->lock) : : "memory"); -} - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() - -#endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-x86/spinlock_64.h b/include/asm-x86/spinlock_64.h deleted file mode 100644 index 88bf981e73cf..000000000000 --- a/include/asm-x86/spinlock_64.h +++ /dev/null @@ -1,167 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -#include <asm/atomic.h> -#include <asm/rwlock.h> -#include <asm/page.h> -#include <asm/processor.h> - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - * - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - * - * (the type definitions are in asm/spinlock_types.h) - */ - -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) -{ - return *(volatile signed int *)(&(lock)->slock) <= 0; -} - -static inline void __raw_spin_lock(raw_spinlock_t *lock) -{ - asm volatile( - "\n1:\t" - LOCK_PREFIX " ; decl %0\n\t" - "jns 2f\n" - "3:\n" - "rep;nop\n\t" - "cmpl $0,%0\n\t" - "jle 3b\n\t" - "jmp 1b\n" - "2:\t" : "=m" (lock->slock) : : "memory"); -} - -/* - * Same as __raw_spin_lock, but reenable interrupts during spinning. - */ -#ifndef CONFIG_PROVE_LOCKING -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) -{ - asm volatile( - "\n1:\t" - LOCK_PREFIX " ; decl %0\n\t" - "jns 5f\n" - "testl $0x200, %1\n\t" /* interrupts were disabled? */ - "jz 4f\n\t" - "sti\n" - "3:\t" - "rep;nop\n\t" - "cmpl $0, %0\n\t" - "jle 3b\n\t" - "cli\n\t" - "jmp 1b\n" - "4:\t" - "rep;nop\n\t" - "cmpl $0, %0\n\t" - "jg 1b\n\t" - "jmp 4b\n" - "5:\n\t" - : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory"); -} -#endif - -static inline int __raw_spin_trylock(raw_spinlock_t *lock) -{ - int oldval; - - asm volatile( - "xchgl %0,%1" - :"=q" (oldval), "=m" (lock->slock) - :"0" (0) : "memory"); - - return oldval > 0; -} - -static inline void __raw_spin_unlock(raw_spinlock_t *lock) -{ - asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); -} - -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) -{ - while (__raw_spin_is_locked(lock)) - cpu_relax(); -} - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - * - * On x86, we implement read-write locks as a 32-bit counter - * with the high bit (sign) being the "contended" bit. - */ - -static inline int __raw_read_can_lock(raw_rwlock_t *lock) -{ - return (int)(lock)->lock > 0; -} - -static inline int __raw_write_can_lock(raw_rwlock_t *lock) -{ - return (lock)->lock == RW_LOCK_BIAS; -} - -static inline void __raw_read_lock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" - "jns 1f\n" - "call __read_lock_failed\n" - "1:\n" - ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); -} - -static inline void __raw_write_lock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" - "jz 1f\n" - "\tcall __write_lock_failed\n\t" - "1:\n" - ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); -} - -static inline int __raw_read_trylock(raw_rwlock_t *lock) -{ - atomic_t *count = (atomic_t *)lock; - atomic_dec(count); - if (atomic_read(count) >= 0) - return 1; - atomic_inc(count); - return 0; -} - -static inline int __raw_write_trylock(raw_rwlock_t *lock) -{ - atomic_t *count = (atomic_t *)lock; - if (atomic_sub_and_test(RW_LOCK_BIAS, count)) - return 1; - atomic_add(RW_LOCK_BIAS, count); - return 0; -} - -static inline void __raw_read_unlock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); -} - -static inline void __raw_write_unlock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" - : "=m" (rw->lock) : : "memory"); -} - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() - -#endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h index 4da9345c1500..9029cf78cf5d 100644 --- a/include/asm-x86/spinlock_types.h +++ b/include/asm-x86/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { unsigned int slock; } raw_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } typedef struct { unsigned int lock; diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h index 70dd5bae3235..30f82526a8e2 100644 --- a/include/asm-x86/stacktrace.h +++ b/include/asm-x86/stacktrace.h @@ -9,12 +9,13 @@ struct stacktrace_ops { void (*warning)(void *data, char *msg); /* msg must contain %s for the symbol */ void (*warning_symbol)(void *data, char *msg, unsigned long symbol); - void (*address)(void *data, unsigned long address); + void (*address)(void *data, unsigned long address, int reliable); /* On negative return stop dumping */ int (*stack)(void *data, char *name); }; -void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, +void dump_trace(struct task_struct *tsk, struct pt_regs *regs, + unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data); #endif diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index a2520732ffd6..1bbda3ad7796 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h @@ -12,8 +12,8 @@ static inline int arch_prepare_suspend(void) { return 0; } struct saved_context { u16 es, fs, gs, ss; unsigned long cr0, cr2, cr3, cr4; - struct Xgt_desc_struct gdt; - struct Xgt_desc_struct idt; + struct desc_ptr gdt; + struct desc_ptr idt; u16 ldt; u16 tss; unsigned long tr; diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h index c505a76bcf6e..2eb92cb81a0d 100644 --- a/include/asm-x86/suspend_64.h +++ b/include/asm-x86/suspend_64.h @@ -15,7 +15,14 @@ arch_prepare_suspend(void) return 0; } -/* Image of the saved processor state. If you touch this, fix acpi/wakeup.S. */ +/* + * Image of the saved processor state, used by the low level ACPI suspend to + * RAM code and by the low level hibernation code. + * + * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that + * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c, + * still work as required. + */ struct saved_context { struct pt_regs regs; u16 ds, es, fs, gs, ss; @@ -38,8 +45,6 @@ struct saved_context { #define loaddebug(thread,register) \ set_debugreg((thread)->debugreg##register, register) -extern void fix_processor_context(void); - /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); extern char core_restore_code; diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 692562b48f2a..ee32ef9367f4 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -1,5 +1,414 @@ +#ifndef _ASM_X86_SYSTEM_H_ +#define _ASM_X86_SYSTEM_H_ + +#include <asm/asm.h> +#include <asm/segment.h> +#include <asm/cpufeature.h> +#include <asm/cmpxchg.h> +#include <asm/nops.h> + +#include <linux/kernel.h> +#include <linux/irqflags.h> + +/* entries in ARCH_DLINFO: */ +#ifdef CONFIG_IA32_EMULATION +# define AT_VECTOR_SIZE_ARCH 2 +#else +# define AT_VECTOR_SIZE_ARCH 1 +#endif + +#ifdef CONFIG_X86_32 + +struct task_struct; /* one of the stranger aspects of C forward declarations */ +extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev, + struct task_struct *next)); + +/* + * Saving eflags is important. It switches not only IOPL between tasks, + * it also protects other tasks from NT leaking through sysenter etc. + */ +#define switch_to(prev, next, last) do { \ + unsigned long esi, edi; \ + asm volatile("pushfl\n\t" /* Save flags */ \ + "pushl %%ebp\n\t" \ + "movl %%esp,%0\n\t" /* save ESP */ \ + "movl %5,%%esp\n\t" /* restore ESP */ \ + "movl $1f,%1\n\t" /* save EIP */ \ + "pushl %6\n\t" /* restore EIP */ \ + "jmp __switch_to\n" \ + "1:\t" \ + "popl %%ebp\n\t" \ + "popfl" \ + :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ + "=a" (last), "=S" (esi), "=D" (edi) \ + :"m" (next->thread.sp), "m" (next->thread.ip), \ + "2" (prev), "d" (next)); \ +} while (0) + +/* + * disable hlt during certain critical i/o operations + */ +#define HAVE_DISABLE_HLT +#else +#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" +#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" + +/* frame pointer must be last for get_wchan */ +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" + +#define __EXTRA_CLOBBER \ + , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ + "r12", "r13", "r14", "r15" + +/* Save restore flags to clear handle leaking NT */ +#define switch_to(prev, next, last) \ + asm volatile(SAVE_CONTEXT \ + "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ + "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ + "call __switch_to\n\t" \ + ".globl thread_return\n" \ + "thread_return:\n\t" \ + "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ + "movq %P[thread_info](%%rsi),%%r8\n\t" \ + LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ + "movq %%rax,%%rdi\n\t" \ + "jc ret_from_fork\n\t" \ + RESTORE_CONTEXT \ + : "=a" (last) \ + : [next] "S" (next), [prev] "D" (prev), \ + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ + [ti_flags] "i" (offsetof(struct thread_info, flags)), \ + [tif_fork] "i" (TIF_FORK), \ + [thread_info] "i" (offsetof(struct task_struct, stack)), \ + [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ + : "memory", "cc" __EXTRA_CLOBBER) +#endif + +#ifdef __KERNEL__ +#define _set_base(addr, base) do { unsigned long __pr; \ +__asm__ __volatile__ ("movw %%dx,%1\n\t" \ + "rorl $16,%%edx\n\t" \ + "movb %%dl,%2\n\t" \ + "movb %%dh,%3" \ + :"=&d" (__pr) \ + :"m" (*((addr)+2)), \ + "m" (*((addr)+4)), \ + "m" (*((addr)+7)), \ + "0" (base) \ + ); } while (0) + +#define _set_limit(addr, limit) do { unsigned long __lr; \ +__asm__ __volatile__ ("movw %%dx,%1\n\t" \ + "rorl $16,%%edx\n\t" \ + "movb %2,%%dh\n\t" \ + "andb $0xf0,%%dh\n\t" \ + "orb %%dh,%%dl\n\t" \ + "movb %%dl,%2" \ + :"=&d" (__lr) \ + :"m" (*(addr)), \ + "m" (*((addr)+6)), \ + "0" (limit) \ + ); } while (0) + +#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) +#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) + +extern void load_gs_index(unsigned); + +/* + * Load a segment. Fall back on loading the zero + * segment if something goes wrong.. + */ +#define loadsegment(seg, value) \ + asm volatile("\n" \ + "1:\t" \ + "movl %k0,%%" #seg "\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\t" \ + "movl %k1, %%" #seg "\n\t" \ + "jmp 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n\t" \ + _ASM_ALIGN "\n\t" \ + _ASM_PTR " 1b,3b\n" \ + ".previous" \ + : :"r" (value), "r" (0)) + + +/* + * Save a segment register away + */ +#define savesegment(seg, value) \ + asm volatile("mov %%" #seg ",%0":"=rm" (value)) + +static inline unsigned long get_limit(unsigned long segment) +{ + unsigned long __limit; + __asm__("lsll %1,%0" + :"=r" (__limit):"r" (segment)); + return __limit+1; +} + +static inline void native_clts(void) +{ + asm volatile ("clts"); +} + +/* + * Volatile isn't enough to prevent the compiler from reordering the + * read/write functions for the control registers and messing everything up. + * A memory clobber would solve the problem, but would prevent reordering of + * all loads stores around it, which can hurt performance. Solution is to + * use a variable and mimic reads and writes to it to enforce serialization + */ +static unsigned long __force_order; + +static inline unsigned long native_read_cr0(void) +{ + unsigned long val; + asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); + return val; +} + +static inline void native_write_cr0(unsigned long val) +{ + asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); +} + +static inline unsigned long native_read_cr2(void) +{ + unsigned long val; + asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); + return val; +} + +static inline void native_write_cr2(unsigned long val) +{ + asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); +} + +static inline unsigned long native_read_cr3(void) +{ + unsigned long val; + asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); + return val; +} + +static inline void native_write_cr3(unsigned long val) +{ + asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); +} + +static inline unsigned long native_read_cr4(void) +{ + unsigned long val; + asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); + return val; +} + +static inline unsigned long native_read_cr4_safe(void) +{ + unsigned long val; + /* This could fault if %cr4 does not exist. In x86_64, a cr4 always + * exists, so it will never fail. */ +#ifdef CONFIG_X86_32 + asm volatile("1: mov %%cr4, %0 \n" + "2: \n" + ".section __ex_table,\"a\" \n" + ".long 1b,2b \n" + ".previous \n" + : "=r" (val), "=m" (__force_order) : "0" (0)); +#else + val = native_read_cr4(); +#endif + return val; +} + +static inline void native_write_cr4(unsigned long val) +{ + asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); +} + +#ifdef CONFIG_X86_64 +static inline unsigned long native_read_cr8(void) +{ + unsigned long cr8; + asm volatile("movq %%cr8,%0" : "=r" (cr8)); + return cr8; +} + +static inline void native_write_cr8(unsigned long val) +{ + asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); +} +#endif + +static inline void native_wbinvd(void) +{ + asm volatile("wbinvd": : :"memory"); +} +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define read_cr0() (native_read_cr0()) +#define write_cr0(x) (native_write_cr0(x)) +#define read_cr2() (native_read_cr2()) +#define write_cr2(x) (native_write_cr2(x)) +#define read_cr3() (native_read_cr3()) +#define write_cr3(x) (native_write_cr3(x)) +#define read_cr4() (native_read_cr4()) +#define read_cr4_safe() (native_read_cr4_safe()) +#define write_cr4(x) (native_write_cr4(x)) +#define wbinvd() (native_wbinvd()) +#ifdef CONFIG_X86_64 +#define read_cr8() (native_read_cr8()) +#define write_cr8(x) (native_write_cr8(x)) +#endif + +/* Clear the 'TS' bit */ +#define clts() (native_clts()) + +#endif/* CONFIG_PARAVIRT */ + +#define stts() write_cr0(8 | read_cr0()) + +#endif /* __KERNEL__ */ + +static inline void clflush(void *__p) +{ + asm volatile("clflush %0" : "+m" (*(char __force *)__p)); +} + +#define nop() __asm__ __volatile__ ("nop") + +void disable_hlt(void); +void enable_hlt(void); + +extern int es7000_plat; +void cpu_idle_wait(void); + +extern unsigned long arch_align_stack(unsigned long sp); +extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + +void default_idle(void); + +/* + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + */ #ifdef CONFIG_X86_32 -# include "system_32.h" +/* + * For now, "wmb()" doesn't actually do anything, as all + * Intel CPU's follow what Intel calls a *Processor Order*, + * in which all writes are seen in the program order even + * outside the CPU. + * + * I expect future Intel CPU's to have a weaker ordering, + * but I'd also expect them to finally get their act together + * and add some real memory barriers if so. + * + * Some non intel clones support out of order store. wmb() ceases to be a + * nop for these. + */ +#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) +#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) #else -# include "system_64.h" +#define mb() asm volatile("mfence":::"memory") +#define rmb() asm volatile("lfence":::"memory") +#define wmb() asm volatile("sfence" ::: "memory") +#endif + +/** + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + **/ + +#define read_barrier_depends() do { } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#ifdef CONFIG_X86_PPRO_FENCE +# define smp_rmb() rmb() +#else +# define smp_rmb() barrier() +#endif +#ifdef CONFIG_X86_OOSTORE +# define smp_wmb() wmb() +#else +# define smp_wmb() barrier() +#endif +#define smp_read_barrier_depends() read_barrier_depends() +#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#define set_mb(var, value) do { var = value; barrier(); } while (0) +#endif + +/* + * Stop RDTSC speculation. This is needed when you need to use RDTSC + * (or get_cycles or vread that possibly accesses the TSC) in a defined + * code region. + * + * (Could use an alternative three way for this if there was one.) + */ +static inline void rdtsc_barrier(void) +{ + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); +} + #endif diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h deleted file mode 100644 index ef8468883bac..000000000000 --- a/include/asm-x86/system_32.h +++ /dev/null @@ -1,320 +0,0 @@ -#ifndef __ASM_SYSTEM_H -#define __ASM_SYSTEM_H - -#include <linux/kernel.h> -#include <asm/segment.h> -#include <asm/cpufeature.h> -#include <asm/cmpxchg.h> - -#ifdef __KERNEL__ -#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ - -struct task_struct; /* one of the stranger aspects of C forward declarations.. */ -extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); - -/* - * Saving eflags is important. It switches not only IOPL between tasks, - * it also protects other tasks from NT leaking through sysenter etc. - */ -#define switch_to(prev,next,last) do { \ - unsigned long esi,edi; \ - asm volatile("pushfl\n\t" /* Save flags */ \ - "pushl %%ebp\n\t" \ - "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %5,%%esp\n\t" /* restore ESP */ \ - "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %6\n\t" /* restore EIP */ \ - "jmp __switch_to\n" \ - "1:\t" \ - "popl %%ebp\n\t" \ - "popfl" \ - :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ - "=a" (last),"=S" (esi),"=D" (edi) \ - :"m" (next->thread.esp),"m" (next->thread.eip), \ - "2" (prev), "d" (next)); \ -} while (0) - -#define _set_base(addr,base) do { unsigned long __pr; \ -__asm__ __volatile__ ("movw %%dx,%1\n\t" \ - "rorl $16,%%edx\n\t" \ - "movb %%dl,%2\n\t" \ - "movb %%dh,%3" \ - :"=&d" (__pr) \ - :"m" (*((addr)+2)), \ - "m" (*((addr)+4)), \ - "m" (*((addr)+7)), \ - "0" (base) \ - ); } while(0) - -#define _set_limit(addr,limit) do { unsigned long __lr; \ -__asm__ __volatile__ ("movw %%dx,%1\n\t" \ - "rorl $16,%%edx\n\t" \ - "movb %2,%%dh\n\t" \ - "andb $0xf0,%%dh\n\t" \ - "orb %%dh,%%dl\n\t" \ - "movb %%dl,%2" \ - :"=&d" (__lr) \ - :"m" (*(addr)), \ - "m" (*((addr)+6)), \ - "0" (limit) \ - ); } while(0) - -#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) -#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) - -/* - * Load a segment. Fall back on loading the zero - * segment if something goes wrong.. - */ -#define loadsegment(seg,value) \ - asm volatile("\n" \ - "1:\t" \ - "mov %0,%%" #seg "\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\t" \ - "pushl $0\n\t" \ - "popl %%" #seg "\n\t" \ - "jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".long 1b,3b\n" \ - ".previous" \ - : :"rm" (value)) - -/* - * Save a segment register away - */ -#define savesegment(seg, value) \ - asm volatile("mov %%" #seg ",%0":"=rm" (value)) - - -static inline void native_clts(void) -{ - asm volatile ("clts"); -} - -static inline unsigned long native_read_cr0(void) -{ - unsigned long val; - asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); - return val; -} - -static inline void native_write_cr0(unsigned long val) -{ - asm volatile("movl %0,%%cr0": :"r" (val)); -} - -static inline unsigned long native_read_cr2(void) -{ - unsigned long val; - asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); - return val; -} - -static inline void native_write_cr2(unsigned long val) -{ - asm volatile("movl %0,%%cr2": :"r" (val)); -} - -static inline unsigned long native_read_cr3(void) -{ - unsigned long val; - asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); - return val; -} - -static inline void native_write_cr3(unsigned long val) -{ - asm volatile("movl %0,%%cr3": :"r" (val)); -} - -static inline unsigned long native_read_cr4(void) -{ - unsigned long val; - asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); - return val; -} - -static inline unsigned long native_read_cr4_safe(void) -{ - unsigned long val; - /* This could fault if %cr4 does not exist */ - asm volatile("1: movl %%cr4, %0 \n" - "2: \n" - ".section __ex_table,\"a\" \n" - ".long 1b,2b \n" - ".previous \n" - : "=r" (val): "0" (0)); - return val; -} - -static inline void native_write_cr4(unsigned long val) -{ - asm volatile("movl %0,%%cr4": :"r" (val)); -} - -static inline void native_wbinvd(void) -{ - asm volatile("wbinvd": : :"memory"); -} - -static inline void clflush(volatile void *__p) -{ - asm volatile("clflush %0" : "+m" (*(char __force *)__p)); -} - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define read_cr0() (native_read_cr0()) -#define write_cr0(x) (native_write_cr0(x)) -#define read_cr2() (native_read_cr2()) -#define write_cr2(x) (native_write_cr2(x)) -#define read_cr3() (native_read_cr3()) -#define write_cr3(x) (native_write_cr3(x)) -#define read_cr4() (native_read_cr4()) -#define read_cr4_safe() (native_read_cr4_safe()) -#define write_cr4(x) (native_write_cr4(x)) -#define wbinvd() (native_wbinvd()) - -/* Clear the 'TS' bit */ -#define clts() (native_clts()) - -#endif/* CONFIG_PARAVIRT */ - -/* Set the 'TS' bit */ -#define stts() write_cr0(8 | read_cr0()) - -#endif /* __KERNEL__ */ - -static inline unsigned long get_limit(unsigned long segment) -{ - unsigned long __limit; - __asm__("lsll %1,%0" - :"=r" (__limit):"r" (segment)); - return __limit+1; -} - -#define nop() __asm__ __volatile__ ("nop") - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - * - * For now, "wmb()" doesn't actually do anything, as all - * Intel CPU's follow what Intel calls a *Processor Order*, - * in which all writes are seen in the program order even - * outside the CPU. - * - * I expect future Intel CPU's to have a weaker ordering, - * but I'd also expect them to finally get their act together - * and add some real memory barriers if so. - * - * Some non intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ - - -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while(0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#ifdef CONFIG_X86_PPRO_FENCE -# define smp_rmb() rmb() -#else -# define smp_rmb() barrier() -#endif -#ifdef CONFIG_X86_OOSTORE -# define smp_wmb() wmb() -#else -# define smp_wmb() barrier() -#endif -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif - -#include <linux/irqflags.h> - -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -void disable_hlt(void); -void enable_hlt(void); - -extern int es7000_plat; -void cpu_idle_wait(void); - -extern unsigned long arch_align_stack(unsigned long sp); -extern void free_init_pages(char *what, unsigned long begin, unsigned long end); - -void default_idle(void); -void __show_registers(struct pt_regs *, int all); - -#endif diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index 6e9e4841a2da..97fa251ccb2b 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h @@ -1,126 +1,9 @@ #ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H -#include <linux/kernel.h> #include <asm/segment.h> #include <asm/cmpxchg.h> -#ifdef __KERNEL__ - -/* entries in ARCH_DLINFO: */ -#ifdef CONFIG_IA32_EMULATION -# define AT_VECTOR_SIZE_ARCH 2 -#else -# define AT_VECTOR_SIZE_ARCH 1 -#endif - -#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" -#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" - -/* frame pointer must be last for get_wchan */ -#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" - -#define __EXTRA_CLOBBER \ - ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" - -/* Save restore flags to clear handle leaking NT */ -#define switch_to(prev,next,last) \ - asm volatile(SAVE_CONTEXT \ - "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ - "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ - "call __switch_to\n\t" \ - ".globl thread_return\n" \ - "thread_return:\n\t" \ - "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ - "movq %P[thread_info](%%rsi),%%r8\n\t" \ - LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ - "movq %%rax,%%rdi\n\t" \ - "jc ret_from_fork\n\t" \ - RESTORE_CONTEXT \ - : "=a" (last) \ - : [next] "S" (next), [prev] "D" (prev), \ - [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ - [ti_flags] "i" (offsetof(struct thread_info, flags)),\ - [tif_fork] "i" (TIF_FORK), \ - [thread_info] "i" (offsetof(struct task_struct, stack)), \ - [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ - : "memory", "cc" __EXTRA_CLOBBER) - -extern void load_gs_index(unsigned); - -/* - * Load a segment. Fall back on loading the zero - * segment if something goes wrong.. - */ -#define loadsegment(seg,value) \ - asm volatile("\n" \ - "1:\t" \ - "movl %k0,%%" #seg "\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\t" \ - "movl %1,%%" #seg "\n\t" \ - "jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 8\n\t" \ - ".quad 1b,3b\n" \ - ".previous" \ - : :"r" (value), "r" (0)) - -/* - * Clear and set 'TS' bit respectively - */ -#define clts() __asm__ __volatile__ ("clts") - -static inline unsigned long read_cr0(void) -{ - unsigned long cr0; - asm volatile("movq %%cr0,%0" : "=r" (cr0)); - return cr0; -} - -static inline void write_cr0(unsigned long val) -{ - asm volatile("movq %0,%%cr0" :: "r" (val)); -} - -static inline unsigned long read_cr2(void) -{ - unsigned long cr2; - asm volatile("movq %%cr2,%0" : "=r" (cr2)); - return cr2; -} - -static inline void write_cr2(unsigned long val) -{ - asm volatile("movq %0,%%cr2" :: "r" (val)); -} - -static inline unsigned long read_cr3(void) -{ - unsigned long cr3; - asm volatile("movq %%cr3,%0" : "=r" (cr3)); - return cr3; -} - -static inline void write_cr3(unsigned long val) -{ - asm volatile("movq %0,%%cr3" :: "r" (val) : "memory"); -} - -static inline unsigned long read_cr4(void) -{ - unsigned long cr4; - asm volatile("movq %%cr4,%0" : "=r" (cr4)); - return cr4; -} - -static inline void write_cr4(unsigned long val) -{ - asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); -} static inline unsigned long read_cr8(void) { @@ -134,52 +17,6 @@ static inline void write_cr8(unsigned long val) asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); } -#define stts() write_cr0(8 | read_cr0()) - -#define wbinvd() \ - __asm__ __volatile__ ("wbinvd": : :"memory") - -#endif /* __KERNEL__ */ - -static inline void clflush(volatile void *__p) -{ - asm volatile("clflush %0" : "+m" (*(char __force *)__p)); -} - -#define nop() __asm__ __volatile__ ("nop") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do {} while(0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do {} while(0) -#endif - - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - */ -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") - -#define read_barrier_depends() do {} while(0) -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) - -#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) - #include <linux/irqflags.h> -void cpu_idle_wait(void); - -extern unsigned long arch_align_stack(unsigned long sp); -extern void free_init_pages(char *what, unsigned long begin, unsigned long end); - #endif diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 22a8cbcd35e2..5bd508260ffb 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h @@ -85,7 +85,7 @@ struct thread_info { /* how to get the current stack pointer from C */ -register unsigned long current_stack_pointer asm("esp") __attribute_used__; +register unsigned long current_stack_pointer asm("esp") __used; /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) @@ -132,11 +132,16 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ #define TIF_SECCOMP 7 /* secure computing */ #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ +#define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */ #define TIF_MEMDIE 16 #define TIF_DEBUG 17 /* uses debug registers */ #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ #define TIF_FREEZE 19 /* is freezing for suspend */ #define TIF_NOTSC 20 /* TSC is not accessible in userland */ +#define TIF_FORCED_TF 21 /* true if TF in eflags artificially */ +#define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */ +#define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ +#define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) @@ -147,10 +152,15 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) +#define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) #define _TIF_DEBUG (1<<TIF_DEBUG) #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) #define _TIF_FREEZE (1<<TIF_FREEZE) #define _TIF_NOTSC (1<<TIF_NOTSC) +#define _TIF_FORCED_TF (1<<TIF_FORCED_TF) +#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) +#define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) +#define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ @@ -160,8 +170,12 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) /* flags to check in __switch_to() */ -#define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG) -#define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC) +#define _TIF_WORK_CTXSW \ + (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ + _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) +#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW +#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) + /* * Thread-synchronous status. diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h index beae2bfb62ca..9b531ea015a8 100644 --- a/include/asm-x86/thread_info_64.h +++ b/include/asm-x86/thread_info_64.h @@ -21,7 +21,7 @@ #ifndef __ASSEMBLY__ struct task_struct; struct exec_domain; -#include <asm/mmsegment.h> +#include <asm/processor.h> struct thread_info { struct task_struct *task; /* main task structure */ @@ -33,6 +33,9 @@ struct thread_info { mm_segment_t addr_limit; struct restart_block restart_block; +#ifdef CONFIG_IA32_EMULATION + void __user *sysenter_return; +#endif }; #endif @@ -74,20 +77,14 @@ static inline struct thread_info *stack_thread_info(void) /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE -#define alloc_thread_info(tsk) \ - ({ \ - struct thread_info *ret; \ - \ - ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \ - if (ret) \ - memset(ret, 0, THREAD_SIZE); \ - ret; \ - }) +#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) #else -#define alloc_thread_info(tsk) \ - ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) +#define THREAD_FLAGS GFP_KERNEL #endif +#define alloc_thread_info(tsk) \ + ((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER)) + #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) #else /* !__ASSEMBLY__ */ @@ -115,6 +112,7 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_SECCOMP 8 /* secure computing */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ +#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ /* 16 free */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ @@ -123,6 +121,10 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_DEBUG 21 /* uses debug registers */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ #define TIF_FREEZE 23 /* is freezing for suspend */ +#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ +#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ +#define TIF_DS_AREA_MSR 25 /* uses thread_struct.ds_area_msr */ +#define TIF_BTS_TRACE_TS 26 /* record scheduling event timestamps */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) @@ -133,12 +135,17 @@ static inline struct thread_info *stack_thread_info(void) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) +#define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) #define _TIF_IA32 (1<<TIF_IA32) #define _TIF_FORK (1<<TIF_FORK) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_DEBUG (1<<TIF_DEBUG) #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) #define _TIF_FREEZE (1<<TIF_FREEZE) +#define _TIF_FORCED_TF (1<<TIF_FORCED_TF) +#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) +#define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) +#define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ @@ -146,8 +153,14 @@ static inline struct thread_info *stack_thread_info(void) /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) +#define _TIF_DO_NOTIFY_MASK \ + (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) + /* flags to check in __switch_to() */ -#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP) +#define _TIF_WORK_CTXSW \ + (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS) +#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW +#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) #define PREEMPT_ACTIVE 0x10000000 diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h index eac011366dc2..68779b048a3e 100644 --- a/include/asm-x86/time.h +++ b/include/asm-x86/time.h @@ -1,8 +1,12 @@ -#ifndef _ASMi386_TIME_H -#define _ASMi386_TIME_H +#ifndef _ASMX86_TIME_H +#define _ASMX86_TIME_H +extern void (*late_time_init)(void); +extern void hpet_time_init(void); + +#include <asm/mc146818rtc.h> +#ifdef CONFIG_X86_32 #include <linux/efi.h> -#include "mach_time.h" static inline unsigned long native_get_wallclock(void) { @@ -28,8 +32,20 @@ static inline int native_set_wallclock(unsigned long nowtime) return retval; } -extern void (*late_time_init)(void); -extern void hpet_time_init(void); +#else +extern void native_time_init_hook(void); + +static inline unsigned long native_get_wallclock(void) +{ + return mach_get_cmos_time(); +} + +static inline int native_set_wallclock(unsigned long nowtime) +{ + return mach_set_rtc_mmss(nowtime); +} + +#endif #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h index 0db7e994fb8b..4f6fcb050c11 100644 --- a/include/asm-x86/timer.h +++ b/include/asm-x86/timer.h @@ -2,6 +2,7 @@ #define _ASMi386_TIMER_H #include <linux/init.h> #include <linux/pm.h> +#include <linux/percpu.h> #define TICK_SIZE (tick_nsec / 1000) @@ -16,7 +17,7 @@ extern int recalibrate_cpu_khz(void); #define calculate_cpu_khz() native_calculate_cpu_khz() #endif -/* Accellerators for sched_clock() +/* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) @@ -31,20 +32,32 @@ extern int recalibrate_cpu_khz(void); * And since SC is a constant power of two, we can convert the div * into a shift. * - * We can use khz divisor instead of mhz to keep a better percision, since + * We can use khz divisor instead of mhz to keep a better precision, since * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. * (mathieu.desnoyers@polymtl.ca) * * -johnstul@us.ibm.com "math is hard, lets go shopping!" */ -extern unsigned long cyc2ns_scale __read_mostly; + +DECLARE_PER_CPU(unsigned long, cyc2ns); #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ -static inline unsigned long long cycles_2_ns(unsigned long long cyc) +static inline unsigned long long __cycles_2_ns(unsigned long long cyc) { - return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; + return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR; } +static inline unsigned long long cycles_2_ns(unsigned long long cyc) +{ + unsigned long long ns; + unsigned long flags; + + local_irq_save(flags); + ns = __cycles_2_ns(cyc); + local_irq_restore(flags); + + return ns; +} #endif diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h index 39a21ab030f0..27cfd6c599ba 100644 --- a/include/asm-x86/timex.h +++ b/include/asm-x86/timex.h @@ -7,6 +7,8 @@ #ifdef CONFIG_X86_ELAN # define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ +#elif defined(CONFIG_X86_RDC321X) +# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ #else # define PIT_TICK_RATE 1193182 /* Underlying HZ */ #endif diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 9af4cc83a1af..3998709ed637 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h @@ -1,5 +1,158 @@ +#ifndef _ASM_X86_TLBFLUSH_H +#define _ASM_X86_TLBFLUSH_H + +#include <linux/mm.h> +#include <linux/sched.h> + +#include <asm/processor.h> +#include <asm/system.h> + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define __flush_tlb() __native_flush_tlb() +#define __flush_tlb_global() __native_flush_tlb_global() +#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) +#endif + +static inline void __native_flush_tlb(void) +{ + write_cr3(read_cr3()); +} + +static inline void __native_flush_tlb_global(void) +{ + unsigned long cr4 = read_cr4(); + + /* clear PGE */ + write_cr4(cr4 & ~X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + write_cr4(cr4); +} + +static inline void __native_flush_tlb_single(unsigned long addr) +{ + __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); +} + +static inline void __flush_tlb_all(void) +{ + if (cpu_has_pge) + __flush_tlb_global(); + else + __flush_tlb(); +} + +static inline void __flush_tlb_one(unsigned long addr) +{ + if (cpu_has_invlpg) + __flush_tlb_single(addr); + else + __flush_tlb(); +} + #ifdef CONFIG_X86_32 -# include "tlbflush_32.h" +# define TLB_FLUSH_ALL 0xffffffff #else -# include "tlbflush_64.h" +# define TLB_FLUSH_ALL -1ULL +#endif + +/* + * TLB flushing: + * + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus + * + * ..but the i386 has somewhat limited tlb flushing capabilities, + * and page-granular flushes are available only on i486 and up. + * + * x86-64 can only flush individual pages or full VMs. For a range flush + * we always do the full VM. Might be worth trying if for a small + * range a few INVLPGs in a row are a win. + */ + +#ifndef CONFIG_SMP + +#define flush_tlb() __flush_tlb() +#define flush_tlb_all() __flush_tlb_all() +#define local_flush_tlb() __flush_tlb() + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + if (mm == current->active_mm) + __flush_tlb(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr) +{ + if (vma->vm_mm == current->active_mm) + __flush_tlb_one(addr); +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (vma->vm_mm == current->active_mm) + __flush_tlb(); +} + +static inline void native_flush_tlb_others(const cpumask_t *cpumask, + struct mm_struct *mm, + unsigned long va) +{ +} + +#else /* SMP */ + +#include <asm/smp.h> + +#define local_flush_tlb() __flush_tlb() + +extern void flush_tlb_all(void); +extern void flush_tlb_current_task(void); +extern void flush_tlb_mm(struct mm_struct *); +extern void flush_tlb_page(struct vm_area_struct *, unsigned long); + +#define flush_tlb() flush_tlb_current_task() + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} + +void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, + unsigned long va); + +#define TLBSTATE_OK 1 +#define TLBSTATE_LAZY 2 + +#ifdef CONFIG_X86_32 +struct tlb_state +{ + struct mm_struct *active_mm; + int state; + char __cacheline_padding[L1_CACHE_BYTES-8]; +}; +DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); +#endif + +#endif /* SMP */ + +#ifndef CONFIG_PARAVIRT +#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) #endif + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_all(); +} + +#endif /* _ASM_X86_TLBFLUSH_H */ diff --git a/include/asm-x86/tlbflush_32.h b/include/asm-x86/tlbflush_32.h deleted file mode 100644 index 2bd5b95e2048..000000000000 --- a/include/asm-x86/tlbflush_32.h +++ /dev/null @@ -1,168 +0,0 @@ -#ifndef _I386_TLBFLUSH_H -#define _I386_TLBFLUSH_H - -#include <linux/mm.h> -#include <asm/processor.h> - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define __flush_tlb() __native_flush_tlb() -#define __flush_tlb_global() __native_flush_tlb_global() -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) -#endif - -#define __native_flush_tlb() \ - do { \ - unsigned int tmpreg; \ - \ - __asm__ __volatile__( \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - : "=r" (tmpreg) \ - :: "memory"); \ - } while (0) - -/* - * Global pages have to be flushed a bit differently. Not a real - * performance problem because this does not happen often. - */ -#define __native_flush_tlb_global() \ - do { \ - unsigned int tmpreg, cr4, cr4_orig; \ - \ - __asm__ __volatile__( \ - "movl %%cr4, %2; # turn off PGE \n" \ - "movl %2, %1; \n" \ - "andl %3, %1; \n" \ - "movl %1, %%cr4; \n" \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - "movl %2, %%cr4; # turn PGE back on \n" \ - : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ - : "i" (~X86_CR4_PGE) \ - : "memory"); \ - } while (0) - -#define __native_flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") - -# define __flush_tlb_all() \ - do { \ - if (cpu_has_pge) \ - __flush_tlb_global(); \ - else \ - __flush_tlb(); \ - } while (0) - -#define cpu_has_invlpg (boot_cpu_data.x86 > 3) - -#ifdef CONFIG_X86_INVLPG -# define __flush_tlb_one(addr) __flush_tlb_single(addr) -#else -# define __flush_tlb_one(addr) \ - do { \ - if (cpu_has_invlpg) \ - __flush_tlb_single(addr); \ - else \ - __flush_tlb(); \ - } while (0) -#endif - -/* - * TLB flushing: - * - * - flush_tlb() flushes the current mm struct TLBs - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus - * - * ..but the i386 has somewhat limited tlb flushing capabilities, - * and page-granular flushes are available only on i486 and up. - */ - -#define TLB_FLUSH_ALL 0xffffffff - - -#ifndef CONFIG_SMP - -#include <linux/sched.h> - -#define flush_tlb() __flush_tlb() -#define flush_tlb_all() __flush_tlb_all() -#define local_flush_tlb() __flush_tlb() - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - if (mm == current->active_mm) - __flush_tlb(); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb_one(addr); -} - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb(); -} - -static inline void native_flush_tlb_others(const cpumask_t *cpumask, - struct mm_struct *mm, unsigned long va) -{ -} - -#else /* SMP */ - -#include <asm/smp.h> - -#define local_flush_tlb() \ - __flush_tlb() - -extern void flush_tlb_all(void); -extern void flush_tlb_current_task(void); -extern void flush_tlb_mm(struct mm_struct *); -extern void flush_tlb_page(struct vm_area_struct *, unsigned long); - -#define flush_tlb() flush_tlb_current_task() - -static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) -{ - flush_tlb_mm(vma->vm_mm); -} - -void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, - unsigned long va); - -#define TLBSTATE_OK 1 -#define TLBSTATE_LAZY 2 - -struct tlb_state -{ - struct mm_struct *active_mm; - int state; - char __cacheline_padding[L1_CACHE_BYTES-8]; -}; -DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); -#endif /* SMP */ - -#ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, mm, va) \ - native_flush_tlb_others(&mask, mm, va) -#endif - -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} - -#endif /* _I386_TLBFLUSH_H */ diff --git a/include/asm-x86/tlbflush_64.h b/include/asm-x86/tlbflush_64.h deleted file mode 100644 index 7731fd23d572..000000000000 --- a/include/asm-x86/tlbflush_64.h +++ /dev/null @@ -1,100 +0,0 @@ -#ifndef _X8664_TLBFLUSH_H -#define _X8664_TLBFLUSH_H - -#include <linux/mm.h> -#include <linux/sched.h> -#include <asm/processor.h> -#include <asm/system.h> - -static inline void __flush_tlb(void) -{ - write_cr3(read_cr3()); -} - -static inline void __flush_tlb_all(void) -{ - unsigned long cr4 = read_cr4(); - write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ - write_cr4(cr4); /* write old PGE again and flush TLBs */ -} - -#define __flush_tlb_one(addr) \ - __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") - - -/* - * TLB flushing: - * - * - flush_tlb() flushes the current mm struct TLBs - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - * x86-64 can only flush individual pages or full VMs. For a range flush - * we always do the full VM. Might be worth trying if for a small - * range a few INVLPGs in a row are a win. - */ - -#ifndef CONFIG_SMP - -#define flush_tlb() __flush_tlb() -#define flush_tlb_all() __flush_tlb_all() -#define local_flush_tlb() __flush_tlb() - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - if (mm == current->active_mm) - __flush_tlb(); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb_one(addr); -} - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb(); -} - -#else - -#include <asm/smp.h> - -#define local_flush_tlb() \ - __flush_tlb() - -extern void flush_tlb_all(void); -extern void flush_tlb_current_task(void); -extern void flush_tlb_mm(struct mm_struct *); -extern void flush_tlb_page(struct vm_area_struct *, unsigned long); - -#define flush_tlb() flush_tlb_current_task() - -static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) -{ - flush_tlb_mm(vma->vm_mm); -} - -#define TLBSTATE_OK 1 -#define TLBSTATE_LAZY 2 - -/* Roughly an IPI every 20MB with 4k pages for freeing page table - ranges. Cost is about 42k of memory for each CPU. */ -#define ARCH_FREE_PTE_NR 5350 - -#endif - -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} - -#endif /* _X8664_TLBFLUSH_H */ diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index b10fde9798ea..8af05a93f097 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -1,5 +1,188 @@ +/* + * Written by: Matthew Dobson, IBM Corporation + * + * Copyright (C) 2002, IBM Corp. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to <colpatch@us.ibm.com> + */ +#ifndef _ASM_X86_TOPOLOGY_H +#define _ASM_X86_TOPOLOGY_H + +#ifdef CONFIG_NUMA +#include <linux/cpumask.h> +#include <asm/mpspec.h> + +/* Mappings between logical cpu number and node number */ #ifdef CONFIG_X86_32 -# include "topology_32.h" +extern int cpu_to_node_map[]; + #else -# include "topology_64.h" +DECLARE_PER_CPU(int, x86_cpu_to_node_map); +extern int x86_cpu_to_node_map_init[]; +extern void *x86_cpu_to_node_map_early_ptr; +/* Returns the number of the current Node. */ +#define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) +#endif + +extern cpumask_t node_to_cpumask_map[]; + +#define NUMA_NO_NODE (-1) + +/* Returns the number of the node containing CPU 'cpu' */ +#ifdef CONFIG_X86_32 +#define early_cpu_to_node(cpu) cpu_to_node(cpu) +static inline int cpu_to_node(int cpu) +{ + return cpu_to_node_map[cpu]; +} + +#else /* CONFIG_X86_64 */ +static inline int early_cpu_to_node(int cpu) +{ + int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; + + if (cpu_to_node_map) + return cpu_to_node_map[cpu]; + else if (per_cpu_offset(cpu)) + return per_cpu(x86_cpu_to_node_map, cpu); + else + return NUMA_NO_NODE; +} + +static inline int cpu_to_node(int cpu) +{ +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + if (x86_cpu_to_node_map_early_ptr) { + printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", + (int)cpu); + dump_stack(); + return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; + } +#endif + if (per_cpu_offset(cpu)) + return per_cpu(x86_cpu_to_node_map, cpu); + else + return NUMA_NO_NODE; +} +#endif /* CONFIG_X86_64 */ + +/* + * Returns the number of the node containing Node 'node'. This + * architecture is flat, so it is a pretty simple function! + */ +#define parent_node(node) (node) + +/* Returns a bitmask of CPUs on Node 'node'. */ +static inline cpumask_t node_to_cpumask(int node) +{ + return node_to_cpumask_map[node]; +} + +/* Returns the number of the first CPU on Node 'node'. */ +static inline int node_to_first_cpu(int node) +{ + cpumask_t mask = node_to_cpumask(node); + + return first_cpu(mask); +} + +#define pcibus_to_node(bus) __pcibus_to_node(bus) +#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus) + +#ifdef CONFIG_X86_32 +extern unsigned long node_start_pfn[]; +extern unsigned long node_end_pfn[]; +extern unsigned long node_remap_size[]; +#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) + +# ifdef CONFIG_X86_HT +# define ENABLE_TOPO_DEFINES +# endif + +# define SD_CACHE_NICE_TRIES 1 +# define SD_IDLE_IDX 1 +# define SD_NEWIDLE_IDX 2 +# define SD_FORKEXEC_IDX 0 + +#else + +# ifdef CONFIG_SMP +# define ENABLE_TOPO_DEFINES +# endif + +# define SD_CACHE_NICE_TRIES 2 +# define SD_IDLE_IDX 2 +# define SD_NEWIDLE_IDX 0 +# define SD_FORKEXEC_IDX 1 + +#endif + +/* sched_domains SD_NODE_INIT for NUMAQ machines */ +#define SD_NODE_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .child = NULL, \ + .groups = NULL, \ + .min_interval = 8, \ + .max_interval = 32, \ + .busy_factor = 32, \ + .imbalance_pct = 125, \ + .cache_nice_tries = SD_CACHE_NICE_TRIES, \ + .busy_idx = 3, \ + .idle_idx = SD_IDLE_IDX, \ + .newidle_idx = SD_NEWIDLE_IDX, \ + .wake_idx = 1, \ + .forkexec_idx = SD_FORKEXEC_IDX, \ + .flags = SD_LOAD_BALANCE \ + | SD_BALANCE_EXEC \ + | SD_BALANCE_FORK \ + | SD_SERIALIZE \ + | SD_WAKE_BALANCE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} + +#ifdef CONFIG_X86_64_ACPI_NUMA +extern int __node_distance(int, int); +#define node_distance(a, b) __node_distance(a, b) +#endif + +#else /* CONFIG_NUMA */ + +#include <asm-generic/topology.h> + +#endif + +extern cpumask_t cpu_coregroup_map(int cpu); + +#ifdef ENABLE_TOPO_DEFINES +#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) +#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) +#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) +#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) +#endif + +#ifdef CONFIG_SMP +#define mc_capable() (boot_cpu_data.x86_max_cores > 1) +#define smt_capable() (smp_num_siblings > 1) +#endif + #endif diff --git a/include/asm-x86/topology_32.h b/include/asm-x86/topology_32.h deleted file mode 100644 index 9040f5a61278..000000000000 --- a/include/asm-x86/topology_32.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * linux/include/asm-i386/topology.h - * - * Written by: Matthew Dobson, IBM Corporation - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <colpatch@us.ibm.com> - */ -#ifndef _ASM_I386_TOPOLOGY_H -#define _ASM_I386_TOPOLOGY_H - -#ifdef CONFIG_X86_HT -#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) -#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) -#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) -#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) -#endif - -#ifdef CONFIG_NUMA - -#include <asm/mpspec.h> - -#include <linux/cpumask.h> - -/* Mappings between logical cpu number and node number */ -extern cpumask_t node_2_cpu_mask[]; -extern int cpu_2_node[]; - -/* Returns the number of the node containing CPU 'cpu' */ -static inline int cpu_to_node(int cpu) -{ - return cpu_2_node[cpu]; -} - -/* Returns the number of the node containing Node 'node'. This architecture is flat, - so it is a pretty simple function! */ -#define parent_node(node) (node) - -/* Returns a bitmask of CPUs on Node 'node'. */ -static inline cpumask_t node_to_cpumask(int node) -{ - return node_2_cpu_mask[node]; -} - -/* Returns the number of the first CPU on Node 'node'. */ -static inline int node_to_first_cpu(int node) -{ - cpumask_t mask = node_to_cpumask(node); - return first_cpu(mask); -} - -#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node -#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) - -/* sched_domains SD_NODE_INIT for NUMAQ machines */ -#define SD_NODE_INIT (struct sched_domain) { \ - .span = CPU_MASK_NONE, \ - .parent = NULL, \ - .child = NULL, \ - .groups = NULL, \ - .min_interval = 8, \ - .max_interval = 32, \ - .busy_factor = 32, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 3, \ - .idle_idx = 1, \ - .newidle_idx = 2, \ - .wake_idx = 1, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_EXEC \ - | SD_BALANCE_FORK \ - | SD_SERIALIZE \ - | SD_WAKE_BALANCE, \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .nr_balance_failed = 0, \ -} - -extern unsigned long node_start_pfn[]; -extern unsigned long node_end_pfn[]; -extern unsigned long node_remap_size[]; - -#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) - -#else /* !CONFIG_NUMA */ -/* - * Other i386 platforms should define their own version of the - * above macros here. - */ - -#include <asm-generic/topology.h> - -#endif /* CONFIG_NUMA */ - -extern cpumask_t cpu_coregroup_map(int cpu); - -#ifdef CONFIG_SMP -#define mc_capable() (boot_cpu_data.x86_max_cores > 1) -#define smt_capable() (smp_num_siblings > 1) -#endif - -#endif /* _ASM_I386_TOPOLOGY_H */ diff --git a/include/asm-x86/topology_64.h b/include/asm-x86/topology_64.h deleted file mode 100644 index a718dda037e0..000000000000 --- a/include/asm-x86/topology_64.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef _ASM_X86_64_TOPOLOGY_H -#define _ASM_X86_64_TOPOLOGY_H - - -#ifdef CONFIG_NUMA - -#include <asm/mpspec.h> -#include <linux/bitops.h> - -extern cpumask_t cpu_online_map; - -extern unsigned char cpu_to_node[]; -extern cpumask_t node_to_cpumask[]; - -#ifdef CONFIG_ACPI_NUMA -extern int __node_distance(int, int); -#define node_distance(a,b) __node_distance(a,b) -/* #else fallback version */ -#endif - -#define cpu_to_node(cpu) (cpu_to_node[cpu]) -#define parent_node(node) (node) -#define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node])) -#define node_to_cpumask(node) (node_to_cpumask[node]) -#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node -#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); - -#define numa_node_id() read_pda(nodenumber) - -/* sched_domains SD_NODE_INIT for x86_64 machines */ -#define SD_NODE_INIT (struct sched_domain) { \ - .span = CPU_MASK_NONE, \ - .parent = NULL, \ - .child = NULL, \ - .groups = NULL, \ - .min_interval = 8, \ - .max_interval = 32, \ - .busy_factor = 32, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 2, \ - .busy_idx = 3, \ - .idle_idx = 2, \ - .newidle_idx = 0, \ - .wake_idx = 1, \ - .forkexec_idx = 1, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_FORK \ - | SD_BALANCE_EXEC \ - | SD_SERIALIZE \ - | SD_WAKE_BALANCE, \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .nr_balance_failed = 0, \ -} - -#endif - -#ifdef CONFIG_SMP -#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) -#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) -#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) -#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) -#define mc_capable() (boot_cpu_data.x86_max_cores > 1) -#define smt_capable() (smp_num_siblings > 1) -#endif - -#include <asm-generic/topology.h> - -extern cpumask_t cpu_coregroup_map(int cpu); - -#endif diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 6baab30dc2c8..7d3e27f7d484 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h @@ -17,6 +17,8 @@ typedef unsigned long long cycles_t; extern unsigned int cpu_khz; extern unsigned int tsc_khz; +extern void disable_TSC(void); + static inline cycles_t get_cycles(void) { unsigned long long ret = 0; @@ -25,39 +27,22 @@ static inline cycles_t get_cycles(void) if (!cpu_has_tsc) return 0; #endif - -#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) rdtscll(ret); -#endif + return ret; } -/* Like get_cycles, but make sure the CPU is synchronized. */ -static __always_inline cycles_t get_cycles_sync(void) +static inline cycles_t vget_cycles(void) { - unsigned long long ret; - unsigned eax, edx; - - /* - * Use RDTSCP if possible; it is guaranteed to be synchronous - * and doesn't cause a VMEXIT on Hypervisors - */ - alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, - ASM_OUTPUT2("=a" (eax), "=d" (edx)), - "a" (0U), "d" (0U) : "ecx", "memory"); - ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); - if (ret) - return ret; - /* - * Don't do an additional sync on CPUs where we know - * RDTSC is already synchronous: + * We only do VDSOs on TSC capable CPUs, so this shouldnt + * access boot_cpu_data (which is not VDSO-safe): */ - alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, - "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); - rdtscll(ret); - - return ret; +#ifndef CONFIG_X86_TSC + if (!cpu_has_tsc) + return 0; +#endif + return (cycles_t) __native_read_tsc(); } extern void tsc_init(void); @@ -73,8 +58,7 @@ int check_tsc_unstable(void); extern void check_tsc_sync_source(int cpu); extern void check_tsc_sync_target(void); -#ifdef CONFIG_X86_64 extern void tsc_calibrate(void); -#endif +extern int notsc_setup(char *); #endif diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index f4ce8768ad44..31d794702719 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -65,6 +65,8 @@ struct exception_table_entry unsigned long insn, fixup; }; +extern int fixup_exception(struct pt_regs *regs); + #define ARCH_HAS_SEARCH_EXTABLE /* diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 9b15545eb9b5..8d8f9b5adbb9 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -333,8 +333,6 @@ #ifdef __KERNEL__ -#define NR_syscalls 325 - #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h index 0e85d2a5e33a..ed8b8fc6906c 100644 --- a/include/asm-x86/user_32.h +++ b/include/asm-x86/user_32.h @@ -75,13 +75,23 @@ struct user_fxsr_struct { * doesn't use the extra segment registers) */ struct user_regs_struct { - long ebx, ecx, edx, esi, edi, ebp, eax; - unsigned short ds, __ds, es, __es; - unsigned short fs, __fs, gs, __gs; - long orig_eax, eip; - unsigned short cs, __cs; - long eflags, esp; - unsigned short ss, __ss; + unsigned long bx; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long bp; + unsigned long ax; + unsigned long ds; + unsigned long es; + unsigned long fs; + unsigned long gs; + unsigned long orig_ax; + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; }; /* When the kernel dumps core, it starts by dumping the user struct - diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h index 12785c649ac5..a5449d456cc0 100644 --- a/include/asm-x86/user_64.h +++ b/include/asm-x86/user_64.h @@ -40,13 +40,13 @@ * and both the standard and SIMD floating point data can be accessed via * the new ptrace requests. In either case, changes to the FPU environment * will be reflected in the task's state as expected. - * + * * x86-64 support by Andi Kleen. */ /* This matches the 64bit FXSAVE format as defined by AMD. It is the same as the 32bit format defined by Intel, except that the selector:offset pairs for - data and eip are replaced with flat 64bit pointers. */ + data and eip are replaced with flat 64bit pointers. */ struct user_i387_struct { unsigned short cwd; unsigned short swd; @@ -65,13 +65,34 @@ struct user_i387_struct { * Segment register layout in coredumps. */ struct user_regs_struct { - unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; - unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; - unsigned long rip,cs,eflags; - unsigned long rsp,ss; - unsigned long fs_base, gs_base; - unsigned long ds,es,fs,gs; -}; + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long bp; + unsigned long bx; + unsigned long r11; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long orig_ax; + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; + unsigned long fs_base; + unsigned long gs_base; + unsigned long ds; + unsigned long es; + unsigned long fs; + unsigned long gs; +}; /* When the kernel dumps core, it starts by dumping the user struct - this will be used by gdb to figure out where the data and stack segments @@ -94,7 +115,7 @@ struct user{ This is actually the bottom of the stack, the top of the stack is always found in the esp register. */ - long int signal; /* Signal that caused the core dump. */ + long int signal; /* Signal that caused the core dump. */ int reserved; /* No longer used */ int pad1; struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h new file mode 100644 index 000000000000..629bcb6e8e45 --- /dev/null +++ b/include/asm-x86/vdso.h @@ -0,0 +1,28 @@ +#ifndef _ASM_X86_VDSO_H +#define _ASM_X86_VDSO_H 1 + +#ifdef CONFIG_X86_64 +extern const char VDSO64_PRELINK[]; + +/* + * Given a pointer to the vDSO image, find the pointer to VDSO64_name + * as that symbol is defined in the vDSO sources or linker script. + */ +#define VDSO64_SYMBOL(base, name) ({ \ + extern const char VDSO64_##name[]; \ + (void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); }) +#endif + +#if defined CONFIG_X86_32 || defined CONFIG_COMPAT +extern const char VDSO32_PRELINK[]; + +/* + * Given a pointer to the vDSO image, find the pointer to VDSO32_name + * as that symbol is defined in the vDSO sources or linker script. + */ +#define VDSO32_SYMBOL(base, name) ({ \ + extern const char VDSO32_##name[]; \ + (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) +#endif + +#endif /* asm-x86/vdso.h */ diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h index f01c49f5d108..17b3700949bf 100644 --- a/include/asm-x86/vsyscall.h +++ b/include/asm-x86/vsyscall.h @@ -36,6 +36,8 @@ extern volatile unsigned long __jiffies; extern int vgetcpu_mode; extern struct timezone sys_tz; +extern void map_vsyscall(void); + #endif /* __KERNEL__ */ #endif /* _ASM_X86_64_VSYSCALL_H_ */ diff --git a/include/asm-x86/vsyscall32.h b/include/asm-x86/vsyscall32.h deleted file mode 100644 index c631c082f8f7..000000000000 --- a/include/asm-x86/vsyscall32.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_VSYSCALL32_H -#define _ASM_VSYSCALL32_H 1 - -/* Values need to match arch/x86_64/ia32/vsyscall.lds */ - -#ifdef __ASSEMBLY__ -#define VSYSCALL32_BASE 0xffffe000 -#define VSYSCALL32_SYSEXIT (VSYSCALL32_BASE + 0x410) -#else -#define VSYSCALL32_BASE 0xffffe000UL -#define VSYSCALL32_END (VSYSCALL32_BASE + PAGE_SIZE) -#define VSYSCALL32_EHDR ((const struct elf32_hdr *) VSYSCALL32_BASE) - -#define VSYSCALL32_VSYSCALL ((void *)VSYSCALL32_BASE + 0x400) -#define VSYSCALL32_SYSEXIT ((void *)VSYSCALL32_BASE + 0x410) -#define VSYSCALL32_SIGRETURN ((void __user *)VSYSCALL32_BASE + 0x500) -#define VSYSCALL32_RTSIGRETURN ((void __user *)VSYSCALL32_BASE + 0x600) -#endif - -#endif diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index 23c86cef3b25..a41ef1bdd424 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h @@ -1,6 +1,4 @@ /* - * include/asm-i386/xor.h - * * Optimized RAID-5 checksumming functions for MMX and SSE. * * This program is free software; you can redistribute it and/or modify diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index f942fcc21831..1eee7fcb2420 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h @@ -1,6 +1,4 @@ /* - * include/asm-x86_64/xor.h - * * Optimized RAID-5 checksumming functions for MMX and SSE. * * This program is free software; you can redistribute it and/or modify diff --git a/include/crypto/aead.h b/include/crypto/aead.h new file mode 100644 index 000000000000..0edf949f6369 --- /dev/null +++ b/include/crypto/aead.h @@ -0,0 +1,105 @@ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_AEAD_H +#define _CRYPTO_AEAD_H + +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +/** + * struct aead_givcrypt_request - AEAD request with IV generation + * @seq: Sequence number for IV generation + * @giv: Space for generated IV + * @areq: The AEAD request itself + */ +struct aead_givcrypt_request { + u64 seq; + u8 *giv; + + struct aead_request areq; +}; + +static inline struct crypto_aead *aead_givcrypt_reqtfm( + struct aead_givcrypt_request *req) +{ + return crypto_aead_reqtfm(&req->areq); +} + +static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req) +{ + struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); + return crt->givencrypt(req); +}; + +static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req) +{ + struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); + return crt->givdecrypt(req); +}; + +static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req, + struct crypto_aead *tfm) +{ + req->areq.base.tfm = crypto_aead_tfm(tfm); +} + +static inline struct aead_givcrypt_request *aead_givcrypt_alloc( + struct crypto_aead *tfm, gfp_t gfp) +{ + struct aead_givcrypt_request *req; + + req = kmalloc(sizeof(struct aead_givcrypt_request) + + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_givcrypt_set_tfm(req, tfm); + + return req; +} + +static inline void aead_givcrypt_free(struct aead_givcrypt_request *req) +{ + kfree(req); +} + +static inline void aead_givcrypt_set_callback( + struct aead_givcrypt_request *req, u32 flags, + crypto_completion_t complete, void *data) +{ + aead_request_set_callback(&req->areq, flags, complete, data); +} + +static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + aead_request_set_crypt(&req->areq, src, dst, nbytes, iv); +} + +static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req, + struct scatterlist *assoc, + unsigned int assoclen) +{ + aead_request_set_assoc(&req->areq, assoc, assoclen); +} + +static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req, + u8 *giv, u64 seq) +{ + req->giv = giv; + req->seq = seq; +} + +#endif /* _CRYPTO_AEAD_H */ diff --git a/include/crypto/aes.h b/include/crypto/aes.h new file mode 100644 index 000000000000..d480b76715a8 --- /dev/null +++ b/include/crypto/aes.h @@ -0,0 +1,31 @@ +/* + * Common values for AES algorithms + */ + +#ifndef _CRYPTO_AES_H +#define _CRYPTO_AES_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 +#define AES_KEYSIZE_128 16 +#define AES_KEYSIZE_192 24 +#define AES_KEYSIZE_256 32 +#define AES_BLOCK_SIZE 16 + +struct crypto_aes_ctx { + u32 key_length; + u32 key_enc[60]; + u32 key_dec[60]; +}; + +extern u32 crypto_ft_tab[4][256]; +extern u32 crypto_fl_tab[4][256]; +extern u32 crypto_it_tab[4][256]; +extern u32 crypto_il_tab[4][256]; + +int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); +#endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index b9b05d399d2b..60d06e784be3 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -111,8 +111,15 @@ void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); +static inline void crypto_set_spawn(struct crypto_spawn *spawn, + struct crypto_instance *inst) +{ + spawn->inst = inst; +} + struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type); +const char *crypto_attr_alg_name(struct rtattr *rta); struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); int crypto_attr_u32(struct rtattr *rta, u32 *num); struct crypto_instance *crypto_alloc_instance(const char *name, @@ -124,6 +131,10 @@ int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); +/* These functions require the input/output to be aligned as u32. */ +void crypto_inc(u8 *a, unsigned int size); +void crypto_xor(u8 *dst, const u8 *src, unsigned int size); + int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); int blkcipher_walk_virt(struct blkcipher_desc *desc, @@ -187,20 +198,11 @@ static inline struct crypto_instance *crypto_aead_alg_instance( return crypto_tfm_alg_instance(&aead->base); } -static inline struct crypto_ablkcipher *crypto_spawn_ablkcipher( - struct crypto_spawn *spawn) -{ - u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; - u32 mask = CRYPTO_ALG_TYPE_MASK; - - return __crypto_ablkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); -} - static inline struct crypto_blkcipher *crypto_spawn_blkcipher( struct crypto_spawn *spawn) { u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; - u32 mask = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; + u32 mask = CRYPTO_ALG_TYPE_MASK; return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); } @@ -303,5 +305,14 @@ static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, return crypto_attr_alg(tb[1], type, mask); } +/* + * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. + * Otherwise returns zero. + */ +static inline int crypto_requires_sync(u32 type, u32 mask) +{ + return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h new file mode 100644 index 000000000000..e47b044929a8 --- /dev/null +++ b/include/crypto/authenc.h @@ -0,0 +1,27 @@ +/* + * Authenc: Simple AEAD wrapper for IPsec + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_AUTHENC_H +#define _CRYPTO_AUTHENC_H + +#include <linux/types.h> + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC, + CRYPTO_AUTHENC_KEYA_PARAM, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +#endif /* _CRYPTO_AUTHENC_H */ + diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h new file mode 100644 index 000000000000..4180fc080e3b --- /dev/null +++ b/include/crypto/ctr.h @@ -0,0 +1,20 @@ +/* + * CTR: Counter mode + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_CTR_H +#define _CRYPTO_CTR_H + +#define CTR_RFC3686_NONCE_SIZE 4 +#define CTR_RFC3686_IV_SIZE 8 +#define CTR_RFC3686_BLOCK_SIZE 16 + +#endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/des.h b/include/crypto/des.h new file mode 100644 index 000000000000..2971c6304ade --- /dev/null +++ b/include/crypto/des.h @@ -0,0 +1,19 @@ +/* + * DES & Triple DES EDE Cipher Algorithms. + */ + +#ifndef __CRYPTO_DES_H +#define __CRYPTO_DES_H + +#define DES_KEY_SIZE 8 +#define DES_EXPKEY_WORDS 32 +#define DES_BLOCK_SIZE 8 + +#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) +#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) +#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE + + +extern unsigned long des_ekey(u32 *pe, const u8 *k); + +#endif /* __CRYPTO_DES_H */ diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h new file mode 100644 index 000000000000..d838c945575a --- /dev/null +++ b/include/crypto/internal/aead.h @@ -0,0 +1,80 @@ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_AEAD_H +#define _CRYPTO_INTERNAL_AEAD_H + +#include <crypto/aead.h> +#include <crypto/algapi.h> +#include <linux/types.h> + +struct rtattr; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +extern const struct crypto_type crypto_nivaead_type; + +static inline void crypto_set_aead_spawn( + struct crypto_aead_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_aead_spawn_alg( + struct crypto_aead_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_aead *crypto_spawn_aead( + struct crypto_aead_spawn *spawn) +{ + return __crypto_aead_cast( + crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD, + CRYPTO_ALG_TYPE_MASK)); +} + +struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, + u32 mask); +void aead_geniv_free(struct crypto_instance *inst); +int aead_geniv_init(struct crypto_tfm *tfm); +void aead_geniv_exit(struct crypto_tfm *tfm); + +static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv) +{ + return crypto_aead_crt(geniv)->base; +} + +static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req) +{ + return aead_request_ctx(&req->areq); +} + +static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req, + int err) +{ + aead_request_complete(&req->areq, err); +} + +#endif /* _CRYPTO_INTERNAL_AEAD_H */ + diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h new file mode 100644 index 000000000000..2ba42cd7d6aa --- /dev/null +++ b/include/crypto/internal/skcipher.h @@ -0,0 +1,110 @@ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_SKCIPHER_H +#define _CRYPTO_INTERNAL_SKCIPHER_H + +#include <crypto/algapi.h> +#include <crypto/skcipher.h> +#include <linux/types.h> + +struct rtattr; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +extern const struct crypto_type crypto_givcipher_type; + +static inline void crypto_set_skcipher_spawn( + struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_skcipher_spawn_alg( + struct crypto_skcipher_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_ablkcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) +{ + return __crypto_ablkcipher_cast( + crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0), + crypto_skcipher_mask(0))); +} + +int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); +int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); +const char *crypto_default_geniv(const struct crypto_alg *alg); + +struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, + u32 mask); +void skcipher_geniv_free(struct crypto_instance *inst); +int skcipher_geniv_init(struct crypto_tfm *tfm); +void skcipher_geniv_exit(struct crypto_tfm *tfm); + +static inline struct crypto_ablkcipher *skcipher_geniv_cipher( + struct crypto_ablkcipher *geniv) +{ + return crypto_ablkcipher_crt(geniv)->base; +} + +static inline int skcipher_enqueue_givcrypt( + struct crypto_queue *queue, struct skcipher_givcrypt_request *request) +{ + return ablkcipher_enqueue_request(queue, &request->creq); +} + +static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( + struct crypto_queue *queue) +{ + return container_of(ablkcipher_dequeue_request(queue), + struct skcipher_givcrypt_request, creq); +} + +static inline void *skcipher_givcrypt_reqctx( + struct skcipher_givcrypt_request *req) +{ + return ablkcipher_request_ctx(&req->creq); +} + +static inline void ablkcipher_request_complete(struct ablkcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline void skcipher_givcrypt_complete( + struct skcipher_givcrypt_request *req, int err) +{ + ablkcipher_request_complete(&req->creq, err); +} + +static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) +{ + return req->base.flags; +} + +#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ + diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h new file mode 100644 index 000000000000..224658b8d806 --- /dev/null +++ b/include/crypto/scatterwalk.h @@ -0,0 +1,119 @@ +/* + * Cryptographic scatter and gather helpers. + * + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com> + * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_SCATTERWALK_H +#define _CRYPTO_SCATTERWALK_H + +#include <asm/kmap_types.h> +#include <crypto/algapi.h> +#include <linux/hardirq.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/sched.h> + +static inline enum km_type crypto_kmap_type(int out) +{ + enum km_type type; + + if (in_softirq()) + type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0; + else + type = out * (KM_USER1 - KM_USER0) + KM_USER0; + + return type; +} + +static inline void *crypto_kmap(struct page *page, int out) +{ + return kmap_atomic(page, crypto_kmap_type(out)); +} + +static inline void crypto_kunmap(void *vaddr, int out) +{ + kunmap_atomic(vaddr, crypto_kmap_type(out)); +} + +static inline void crypto_yield(u32 flags) +{ + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) + cond_resched(); +} + +static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, + struct scatterlist *sg2) +{ + sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); +} + +static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) +{ + return (++sg)->length ? sg : (void *)sg_page(sg); +} + +static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, + struct scatter_walk *walk_out) +{ + return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) + + (int)(walk_in->offset - walk_out->offset)); +} + +static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +{ + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; + return len_this_page > len ? len : len_this_page; +} + +static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, + unsigned int nbytes) +{ + unsigned int len_this_page = scatterwalk_pagelen(walk); + return nbytes > len_this_page ? len_this_page : nbytes; +} + +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) +{ + walk->offset += nbytes; +} + +static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, + unsigned int alignmask) +{ + return !(walk->offset & alignmask); +} + +static inline struct page *scatterwalk_page(struct scatter_walk *walk) +{ + return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); +} + +static inline void scatterwalk_unmap(void *vaddr, int out) +{ + crypto_kunmap(vaddr, out); +} + +void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out); +void *scatterwalk_map(struct scatter_walk *walk, int out); +void scatterwalk_done(struct scatter_walk *walk, int out, int more); + +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out); + +#endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 0686e1f7a24b..c0ccc2b1a2d8 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -8,6 +8,9 @@ #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 + #define SHA256_DIGEST_SIZE 32 #define SHA256_BLOCK_SIZE 64 @@ -23,6 +26,15 @@ #define SHA1_H3 0x10325476UL #define SHA1_H4 0xc3d2e1f0UL +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + #define SHA256_H0 0x6a09e667UL #define SHA256_H1 0xbb67ae85UL #define SHA256_H2 0x3c6ef372UL diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h new file mode 100644 index 000000000000..25fd6126522d --- /dev/null +++ b/include/crypto/skcipher.h @@ -0,0 +1,110 @@ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_SKCIPHER_H +#define _CRYPTO_SKCIPHER_H + +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +/** + * struct skcipher_givcrypt_request - Crypto request with IV generation + * @seq: Sequence number for IV generation + * @giv: Space for generated IV + * @creq: The crypto request itself + */ +struct skcipher_givcrypt_request { + u64 seq; + u8 *giv; + + struct ablkcipher_request creq; +}; + +static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( + struct skcipher_givcrypt_request *req) +{ + return crypto_ablkcipher_reqtfm(&req->creq); +} + +static inline int crypto_skcipher_givencrypt( + struct skcipher_givcrypt_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); + return crt->givencrypt(req); +}; + +static inline int crypto_skcipher_givdecrypt( + struct skcipher_givcrypt_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); + return crt->givdecrypt(req); +}; + +static inline void skcipher_givcrypt_set_tfm( + struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) +{ + req->creq.base.tfm = crypto_ablkcipher_tfm(tfm); +} + +static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast( + struct crypto_async_request *req) +{ + return container_of(ablkcipher_request_cast(req), + struct skcipher_givcrypt_request, creq); +} + +static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc( + struct crypto_ablkcipher *tfm, gfp_t gfp) +{ + struct skcipher_givcrypt_request *req; + + req = kmalloc(sizeof(struct skcipher_givcrypt_request) + + crypto_ablkcipher_reqsize(tfm), gfp); + + if (likely(req)) + skcipher_givcrypt_set_tfm(req, tfm); + + return req; +} + +static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req) +{ + kfree(req); +} + +static inline void skcipher_givcrypt_set_callback( + struct skcipher_givcrypt_request *req, u32 flags, + crypto_completion_t complete, void *data) +{ + ablkcipher_request_set_callback(&req->creq, flags, complete, data); +} + +static inline void skcipher_givcrypt_set_crypt( + struct skcipher_givcrypt_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv); +} + +static inline void skcipher_givcrypt_set_giv( + struct skcipher_givcrypt_request *req, u8 *giv, u64 seq) +{ + req->giv = giv; + req->seq = seq; +} + +#endif /* _CRYPTO_SKCIPHER_H */ + diff --git a/include/linux/Kbuild b/include/linux/Kbuild index f30fa92a44a1..85b2482cc736 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -1,4 +1,5 @@ header-y += byteorder/ +header-y += can/ header-y += dvb/ header-y += hdlc/ header-y += isdn/ @@ -34,13 +35,13 @@ header-y += atmsap.h header-y += atmsvc.h header-y += atm_zatm.h header-y += auto_fs4.h -header-y += auxvec.h header-y += ax25.h header-y += b1lli.h header-y += baycom.h header-y += bfs_fs.h header-y += blkpg.h header-y += bpqether.h +header-y += can.h header-y += cdk.h header-y += chio.h header-y += coda_psdev.h @@ -49,6 +50,7 @@ header-y += comstats.h header-y += const.h header-y += cgroupstats.h header-y += cycx_cfm.h +header-y += dlmconstants.h header-y += dlm_device.h header-y += dlm_netlink.h header-y += dm-ioctl.h @@ -72,7 +74,7 @@ header-y += gen_stats.h header-y += gigaset_dev.h header-y += hdsmart.h header-y += hysdn_if.h -header-y += i2c-dev.h +header-y += i2o-dev.h header-y += i8k.h header-y += if_arcnet.h header-y += if_bonding.h @@ -98,7 +100,6 @@ header-y += iso_fs.h header-y += ixjuser.h header-y += jffs2.h header-y += keyctl.h -header-y += kvm.h header-y += limits.h header-y += lock_dlm_plock.h header-y += magic.h @@ -157,7 +158,6 @@ header-y += veth.h header-y += video_decoder.h header-y += video_encoder.h header-y += videotext.h -header-y += vt.h header-y += x25.h unifdef-y += acct.h @@ -172,6 +172,7 @@ unifdef-y += atm.h unifdef-y += atm_tcp.h unifdef-y += audit.h unifdef-y += auto_fs.h +unifdef-y += auxvec.h unifdef-y += binfmts.h unifdef-y += capability.h unifdef-y += capi.h @@ -213,7 +214,7 @@ unifdef-y += hdreg.h unifdef-y += hiddev.h unifdef-y += hpet.h unifdef-y += i2c.h -unifdef-y += i2o-dev.h +unifdef-y += i2c-dev.h unifdef-y += icmp.h unifdef-y += icmpv6.h unifdef-y += if_addr.h @@ -228,7 +229,6 @@ unifdef-y += if_ltalk.h unifdef-y += if_link.h unifdef-y += if_pppol2tp.h unifdef-y += if_pppox.h -unifdef-y += if_shaper.h unifdef-y += if_tr.h unifdef-y += if_tun.h unifdef-y += if_vlan.h @@ -255,6 +255,7 @@ unifdef-y += kd.h unifdef-y += kernelcapi.h unifdef-y += kernel.h unifdef-y += keyboard.h +unifdef-$(CONFIG_HAVE_KVM) += kvm.h unifdef-y += llc.h unifdef-y += loop.h unifdef-y += lp.h @@ -348,6 +349,7 @@ unifdef-y += videodev.h unifdef-y += virtio_config.h unifdef-y += virtio_blk.h unifdef-y += virtio_net.h +unifdef-y += vt.h unifdef-y += wait.h unifdef-y += wanrouter.h unifdef-y += watchdog.h diff --git a/include/linux/acpi.h b/include/linux/acpi.h index e3c16c981e46..63f2e6ed698f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -40,6 +40,7 @@ #include <acpi/acpi_drivers.h> #include <acpi/acpi_numa.h> #include <asm/acpi.h> +#include <linux/dmi.h> #ifdef CONFIG_ACPI @@ -192,7 +193,9 @@ extern int ec_transaction(u8 command, #endif /*CONFIG_ACPI_EC*/ extern int acpi_blacklisted(void); -extern void acpi_bios_year(char *s); +#ifdef CONFIG_DMI +extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); +#endif #ifdef CONFIG_ACPI_NUMA int acpi_get_pxm(acpi_handle handle); @@ -226,5 +229,5 @@ static inline int acpi_boot_table_init(void) return 0; } -#endif /* CONFIG_ACPI */ +#endif /* !CONFIG_ACPI */ #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h index 1d0ef1ae8036..7e3d2859be50 100644 --- a/include/linux/acpi_pmtmr.h +++ b/include/linux/acpi_pmtmr.h @@ -25,6 +25,8 @@ static inline u32 acpi_pm_read_early(void) return acpi_pm_read_verified() & ACPI_PM_MASK; } +extern void pmtimer_wait(unsigned); + #else static inline u32 acpi_pm_read_early(void) diff --git a/include/linux/ata.h b/include/linux/ata.h index e672e80202a8..78bbacaed8c4 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -286,9 +286,10 @@ enum { ATA_CBL_NONE = 0, ATA_CBL_PATA40 = 1, ATA_CBL_PATA80 = 2, - ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */ - ATA_CBL_PATA_UNK = 4, - ATA_CBL_SATA = 5, + ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */ + ATA_CBL_PATA_UNK = 4, /* don't know, maybe 80c? */ + ATA_CBL_PATA_IGN = 5, /* don't know, ignore cable handling */ + ATA_CBL_SATA = 6, /* SATA Status and Control Registers */ SCR_STATUS = 0, @@ -324,6 +325,13 @@ enum { ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ + + /* protocol flags */ + ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ + ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ + ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA, + ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ + ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ }; enum ata_tf_protocols { @@ -333,9 +341,9 @@ enum ata_tf_protocols { ATA_PROT_PIO, /* PIO data xfer */ ATA_PROT_DMA, /* DMA */ ATA_PROT_NCQ, /* NCQ */ - ATA_PROT_ATAPI, /* packet command, PIO data xfer*/ - ATA_PROT_ATAPI_NODATA, /* packet command, no data */ - ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */ + ATAPI_PROT_NODATA, /* packet command, no data */ + ATAPI_PROT_PIO, /* packet command, PIO data xfer*/ + ATAPI_PROT_DMA, /* packet command with special DMA sauce */ }; enum ata_ioctls { @@ -346,8 +354,8 @@ enum ata_ioctls { /* core structures */ struct ata_prd { - u32 addr; - u32 flags_len; + __le32 addr; + __le32 flags_len; }; struct ata_taskfile { @@ -373,13 +381,69 @@ struct ata_taskfile { u8 command; /* IO operation */ }; +/* + * protocol tests + */ +static inline unsigned int ata_prot_flags(u8 prot) +{ + switch (prot) { + case ATA_PROT_NODATA: + return 0; + case ATA_PROT_PIO: + return ATA_PROT_FLAG_PIO; + case ATA_PROT_DMA: + return ATA_PROT_FLAG_DMA; + case ATA_PROT_NCQ: + return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ; + case ATAPI_PROT_NODATA: + return ATA_PROT_FLAG_ATAPI; + case ATAPI_PROT_PIO: + return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO; + case ATAPI_PROT_DMA: + return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA; + } + return 0; +} + +static inline int ata_is_atapi(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI; +} + +static inline int ata_is_nodata(u8 prot) +{ + return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA); +} + +static inline int ata_is_pio(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO; +} + +static inline int ata_is_dma(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA; +} + +static inline int ata_is_ncq(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ; +} + +static inline int ata_is_data(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA; +} + +/* + * id tests + */ #define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0) #define ata_id_has_lba(id) ((id)[49] & (1 << 9)) #define ata_id_has_dma(id) ((id)[49] & (1 << 8)) #define ata_id_has_ncq(id) ((id)[76] & (1 << 8)) #define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1) #define ata_id_removeable(id) ((id)[0] & (1 << 7)) -#define ata_id_has_dword_io(id) ((id)[48] & (1 << 0)) #define ata_id_has_atapi_AN(id) \ ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ ((id)[78] & (1 << 5)) ) @@ -415,6 +479,7 @@ static inline bool ata_id_has_dipm(const u16 *id) return val & (1 << 3); } + static inline int ata_id_has_fua(const u16 *id) { if ((id[84] & 0xC000) != 0x4000) @@ -519,6 +584,26 @@ static inline int ata_id_is_sata(const u16 *id) return ata_id_major_version(id) >= 5 && id[93] == 0; } +static inline int ata_id_has_tpm(const u16 *id) +{ + /* The TPM bits are only valid on ATA8 */ + if (ata_id_major_version(id) < 8) + return 0; + if ((id[48] & 0xC000) != 0x4000) + return 0; + return id[48] & (1 << 0); +} + +static inline int ata_id_has_dword_io(const u16 *id) +{ + /* ATA 8 reuses this flag for "trusted" computing */ + if (ata_id_major_version(id) > 7) + return 0; + if (id[48] & (1 << 0)) + return 1; + return 0; +} + static inline int ata_id_current_chs_valid(const u16 *id) { /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command @@ -574,13 +659,6 @@ static inline int atapi_command_packet_set(const u16 *dev_id) return (dev_id[0] >> 8) & 0x1f; } -static inline int is_atapi_taskfile(const struct ata_taskfile *tf) -{ - return (tf->protocol == ATA_PROT_ATAPI) || - (tf->protocol == ATA_PROT_ATAPI_NODATA) || - (tf->protocol == ATA_PROT_ATAPI_DMA); -} - static inline int is_multi_taskfile(struct ata_taskfile *tf) { return (tf->command == ATA_CMD_READ_MULTI) || diff --git a/include/linux/atmbr2684.h b/include/linux/atmbr2684.h index 969fb6c9e1cc..52bf72affbba 100644 --- a/include/linux/atmbr2684.h +++ b/include/linux/atmbr2684.h @@ -14,6 +14,9 @@ #define BR2684_MEDIA_FDDI (3) #define BR2684_MEDIA_802_6 (4) /* 802.6 */ + /* used only at device creation: */ +#define BR2684_FLAG_ROUTED (1<<16) /* payload is routed, not bridged */ + /* * Is there FCS inbound on this VC? This currently isn't supported. */ @@ -36,15 +39,22 @@ #define BR2684_ENCAPS_AUTODETECT (2) /* Unsuported */ /* + * Is this VC bridged or routed? + */ + +#define BR2684_PAYLOAD_ROUTED (0) +#define BR2684_PAYLOAD_BRIDGED (1) + +/* * This is for the ATM_NEWBACKENDIF call - these are like socket families: * the first element of the structure is the backend number and the rest * is per-backend specific */ struct atm_newif_br2684 { - atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */ - int media; /* BR2684_MEDIA_* */ - char ifname[IFNAMSIZ]; - int mtu; + atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */ + int media; /* BR2684_MEDIA_*, flags in upper bits */ + char ifname[IFNAMSIZ]; + int mtu; }; /* @@ -55,10 +65,10 @@ struct atm_newif_br2684 { #define BR2684_FIND_BYNUM (1) #define BR2684_FIND_BYIFNAME (2) struct br2684_if_spec { - int method; /* BR2684_FIND_* */ + int method; /* BR2684_FIND_* */ union { - char ifname[IFNAMSIZ]; - int devnum; + char ifname[IFNAMSIZ]; + int devnum; } spec; }; @@ -68,16 +78,16 @@ struct br2684_if_spec { * is per-backend specific */ struct atm_backend_br2684 { - atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */ + atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */ struct br2684_if_spec ifspec; - int fcs_in; /* BR2684_FCSIN_* */ - int fcs_out; /* BR2684_FCSOUT_* */ - int fcs_auto; /* 1: fcs_{in,out} disabled if no FCS rx'ed */ - int encaps; /* BR2684_ENCAPS_* */ - int has_vpiid; /* 1: use vpn_id - Unsupported */ - __u8 vpn_id[7]; - int send_padding; /* unsupported */ - int min_size; /* we will pad smaller packets than this */ + int fcs_in; /* BR2684_FCSIN_* */ + int fcs_out; /* BR2684_FCSOUT_* */ + int fcs_auto; /* 1: fcs_{in,out} disabled if no FCS rx'ed */ + int encaps; /* BR2684_ENCAPS_* */ + int has_vpiid; /* 1: use vpn_id - Unsupported */ + __u8 vpn_id[7]; + int send_padding; /* unsupported */ + int min_size; /* we will pad smaller packets than this */ }; /* @@ -86,8 +96,8 @@ struct atm_backend_br2684 { * efficient per-if in/out filters, this support will be removed */ struct br2684_filter { - __be32 prefix; /* network byte order */ - __be32 netmask; /* 0 = disable filter */ + __be32 prefix; /* network byte order */ + __be32 netmask; /* 0 = disable filter */ }; struct br2684_filter_set { @@ -95,6 +105,11 @@ struct br2684_filter_set { struct br2684_filter filter; }; +enum br2684_payload { + p_routed = BR2684_PAYLOAD_ROUTED, + p_bridged = BR2684_PAYLOAD_BRIDGED, +}; + #define BR2684_SETFILT _IOW( 'a', ATMIOC_BACKEND + 0, \ struct br2684_filter_set) diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 2096e5c72827..a3d07c29d16c 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -359,7 +359,7 @@ struct atm_dev { struct proc_dir_entry *proc_entry; /* proc entry */ char *proc_name; /* proc entry name */ #endif - struct class_device class_dev; /* sysfs class device */ + struct device class_dev; /* sysfs device */ struct list_head dev_list; /* linkage */ }; @@ -461,7 +461,7 @@ static inline void atm_dev_put(struct atm_dev *dev) BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); if (dev->ops->dev_close) dev->ops->dev_close(dev); - class_device_put(&dev->class_dev); + put_device(&dev->class_dev); } } diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h index 8ff274933948..f5582332af04 100644 --- a/include/linux/attribute_container.h +++ b/include/linux/attribute_container.h @@ -17,6 +17,7 @@ struct attribute_container { struct list_head node; struct klist containers; struct class *class; + struct attribute_group *grp; struct class_device_attribute **attrs; int (*match)(struct attribute_container *, struct device *); #define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01 diff --git a/include/linux/audit.h b/include/linux/audit.h index c68781692838..bdd6f5de5fc4 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -115,6 +115,8 @@ #define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Not used */ #define AUDIT_MAC_IPSEC_DELSPD 1414 /* Not used */ #define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */ +#define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */ +#define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */ #define AUDIT_FIRST_KERN_ANOM_MSG 1700 #define AUDIT_LAST_KERN_ANOM_MSG 1799 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d18ee67b40f8..e18d4192f6e8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -34,83 +34,10 @@ struct sg_io_hdr; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ -/* - * This is the per-process anticipatory I/O scheduler state. - */ -struct as_io_context { - spinlock_t lock; - - void (*dtor)(struct as_io_context *aic); /* destructor */ - void (*exit)(struct as_io_context *aic); /* called on task exit */ - - unsigned long state; - atomic_t nr_queued; /* queued reads & sync writes */ - atomic_t nr_dispatched; /* number of requests gone to the drivers */ - - /* IO History tracking */ - /* Thinktime */ - unsigned long last_end_request; - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; - /* Layout pattern */ - unsigned int seek_samples; - sector_t last_request_pos; - u64 seek_total; - sector_t seek_mean; -}; - -struct cfq_queue; -struct cfq_io_context { - struct rb_node rb_node; - void *key; - - struct cfq_queue *cfqq[2]; - - struct io_context *ioc; - - unsigned long last_end_request; - sector_t last_request_pos; - - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; - - unsigned int seek_samples; - u64 seek_total; - sector_t seek_mean; - - struct list_head queue_list; - - void (*dtor)(struct io_context *); /* destructor */ - void (*exit)(struct io_context *); /* called on task exit */ -}; - -/* - * This is the per-process I/O subsystem state. It is refcounted and - * kmalloc'ed. Currently all fields are modified in process io context - * (apart from the atomic refcount), so require no locking. - */ -struct io_context { - atomic_t refcount; - struct task_struct *task; - - unsigned int ioprio_changed; - - /* - * For request batching - */ - unsigned long last_waited; /* Time last woken after wait for request */ - int nr_batch_requests; /* Number of requests left in the batch */ - - struct as_io_context *aic; - struct rb_root cic_root; - void *ioc_data; -}; - -void put_io_context(struct io_context *ioc); +int put_io_context(struct io_context *ioc); void exit_io_context(void); struct io_context *get_io_context(gfp_t gfp_flags, int node); +struct io_context *alloc_io_context(gfp_t gfp_flags, int node); void copy_io_context(struct io_context **pdst, struct io_context **psrc); void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); @@ -143,8 +70,6 @@ enum rq_cmd_type_bits { * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver * private REQ_LB opcodes to differentiate what type of request this is */ - REQ_TYPE_ATA_CMD, - REQ_TYPE_ATA_TASK, REQ_TYPE_ATA_TASKFILE, REQ_TYPE_ATA_PC, }; @@ -431,6 +356,8 @@ struct request_queue unsigned int max_segment_size; unsigned long seg_boundary_mask; + void *dma_drain_buffer; + unsigned int dma_drain_size; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; @@ -539,6 +466,8 @@ enum { #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) +/* rq->queuelist of dequeued request must be list_empty() */ +#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) @@ -718,29 +647,32 @@ static inline void blk_run_address_space(struct address_space *mapping) } /* - * end_request() and friends. Must be called with the request queue spinlock - * acquired. All functions called within end_request() _must_be_ atomic. + * blk_end_request() and friends. + * __blk_end_request() and end_request() must be called with + * the request queue spinlock acquired. * * Several drivers define their own end_request and call - * end_that_request_first() and end_that_request_last() - * for parts of the original function. This prevents - * code duplication in drivers. + * blk_end_request() for parts of the original function. + * This prevents code duplication in drivers. */ -extern int end_that_request_first(struct request *, int, int); -extern int end_that_request_chunk(struct request *, int, int); -extern void end_that_request_last(struct request *, int); +extern int blk_end_request(struct request *rq, int error, int nr_bytes); +extern int __blk_end_request(struct request *rq, int error, int nr_bytes); +extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, + int bidi_bytes); extern void end_request(struct request *, int); extern void end_queued_request(struct request *, int); extern void end_dequeued_request(struct request *, int); +extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes, + int (drv_callback)(struct request *)); extern void blk_complete_request(struct request *); /* - * end_that_request_first/chunk() takes an uptodate argument. we account - * any value <= as an io error. 0 means -EIO for compatability reasons, - * any other < 0 value is the direct error type. An uptodate value of - * 1 indicates successful io completion + * blk_end_request() takes bytes instead of sectors as a complete size. + * blk_rq_bytes() returns bytes left to complete in the entire request. + * blk_rq_cur_bytes() returns bytes left to complete in the current segment. */ -#define end_io_error(uptodate) (unlikely((uptodate) <= 0)) +extern unsigned int blk_rq_bytes(struct request *rq); +extern unsigned int blk_rq_cur_bytes(struct request *rq); static inline void blkdev_dequeue_request(struct request *req) { @@ -762,10 +694,13 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern int blk_queue_dma_drain(struct request_queue *q, void *buf, + unsigned int size); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); @@ -837,12 +772,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev) static inline int queue_dma_alignment(struct request_queue *q) { - int retval = 511; - - if (q && q->dma_alignment) - retval = q->dma_alignment; - - return retval; + return q ? q->dma_alignment : 511; } /* assumes size > 256 */ @@ -895,6 +825,13 @@ static inline void exit_io_context(void) { } +struct io_context; +static inline int put_io_context(struct io_context *ioc) +{ + return 1; +} + + #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7e11d23ac36a..cfc3147e5cf9 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -148,7 +148,7 @@ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); extern int do_blk_trace_setup(struct request_queue *q, - struct block_device *bdev, struct blk_user_trace_setup *buts); + char *name, dev_t dev, struct blk_user_trace_setup *buts); /** @@ -282,6 +282,11 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); } +extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + char __user *arg); +extern int blk_trace_startstop(struct request_queue *q, int start); +extern int blk_trace_remove(struct request_queue *q); + #else /* !CONFIG_BLK_DEV_IO_TRACE */ #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) #define blk_trace_shutdown(q) do { } while (0) @@ -290,7 +295,10 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, #define blk_add_trace_generic(q, rq, rw, what) do { } while (0) #define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) #define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) -#define do_blk_trace_setup(q, bdev, buts) (-ENOTTY) +#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) +#define blk_trace_setup(q, name, dev, arg) (-ENOTTY) +#define blk_trace_startstop(q, start) (-ENOTTY) +#define blk_trace_remove(q) (-ENOTTY) #endif /* CONFIG_BLK_DEV_IO_TRACE */ #endif /* __KERNEL__ */ #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index da0d83fbadc0..e98801f06dcc 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -192,6 +192,8 @@ int sync_dirty_buffer(struct buffer_head *bh); int submit_bh(int, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); +int bh_uptodate_or_lock(struct buffer_head *bh); +int bh_submit_read(struct buffer_head *bh); extern int buffer_heads_over_limit; diff --git a/include/linux/can.h b/include/linux/can.h new file mode 100644 index 000000000000..d18333302cbd --- /dev/null +++ b/include/linux/can.h @@ -0,0 +1,111 @@ +/* + * linux/can.h + * + * Definitions for CAN network layer (socket addr / CAN frame / CAN filter) + * + * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Urs Thuermann <urs.thuermann@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + * Send feedback to <socketcan-users@lists.berlios.de> + * + */ + +#ifndef CAN_H +#define CAN_H + +#include <linux/types.h> +#include <linux/socket.h> + +/* controller area network (CAN) kernel definitions */ + +/* special address description flags for the CAN_ID */ +#define CAN_EFF_FLAG 0x80000000U /* EFF/SFF is set in the MSB */ +#define CAN_RTR_FLAG 0x40000000U /* remote transmission request */ +#define CAN_ERR_FLAG 0x20000000U /* error frame */ + +/* valid bits in CAN ID for frame formats */ +#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */ +#define CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */ +#define CAN_ERR_MASK 0x1FFFFFFFU /* omit EFF, RTR, ERR flags */ + +/* + * Controller Area Network Identifier structure + * + * bit 0-28 : CAN identifier (11/29 bit) + * bit 29 : error frame flag (0 = data frame, 1 = error frame) + * bit 30 : remote transmission request flag (1 = rtr frame) + * bit 31 : frame format flag (0 = standard 11 bit, 1 = extended 29 bit) + */ +typedef __u32 canid_t; + +/* + * Controller Area Network Error Frame Mask structure + * + * bit 0-28 : error class mask (see include/linux/can/error.h) + * bit 29-31 : set to zero + */ +typedef __u32 can_err_mask_t; + +/** + * struct can_frame - basic CAN frame structure + * @can_id: the CAN ID of the frame and CAN_*_FLAG flags, see above. + * @can_dlc: the data length field of the CAN frame + * @data: the CAN frame payload. + */ +struct can_frame { + canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ + __u8 can_dlc; /* data length code: 0 .. 8 */ + __u8 data[8] __attribute__((aligned(8))); +}; + +/* particular protocols of the protocol family PF_CAN */ +#define CAN_RAW 1 /* RAW sockets */ +#define CAN_BCM 2 /* Broadcast Manager */ +#define CAN_TP16 3 /* VAG Transport Protocol v1.6 */ +#define CAN_TP20 4 /* VAG Transport Protocol v2.0 */ +#define CAN_MCNET 5 /* Bosch MCNet */ +#define CAN_ISOTP 6 /* ISO 15765-2 Transport Protocol */ +#define CAN_NPROTO 7 + +#define SOL_CAN_BASE 100 + +/** + * struct sockaddr_can - the sockaddr structure for CAN sockets + * @can_family: address family number AF_CAN. + * @can_ifindex: CAN network interface index. + * @can_addr: protocol specific address information + */ +struct sockaddr_can { + sa_family_t can_family; + int can_ifindex; + union { + /* transport protocol class address information (e.g. ISOTP) */ + struct { canid_t rx_id, tx_id; } tp; + + /* reserved for future CAN protocols address information */ + } can_addr; +}; + +/** + * struct can_filter - CAN ID based filter in can_register(). + * @can_id: relevant bits of CAN ID which are not masked out. + * @can_mask: CAN mask (see description) + * + * Description: + * A filter matches, when + * + * <received_can_id> & mask == can_id & mask + * + * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can + * filter for error frames (CAN_ERR_FLAG bit set in mask). + */ +struct can_filter { + canid_t can_id; + canid_t can_mask; +}; + +#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ + +#endif /* CAN_H */ diff --git a/include/linux/can/Kbuild b/include/linux/can/Kbuild new file mode 100644 index 000000000000..eff898aac02b --- /dev/null +++ b/include/linux/can/Kbuild @@ -0,0 +1,3 @@ +header-y += raw.h +header-y += bcm.h +header-y += error.h diff --git a/include/linux/can/bcm.h b/include/linux/can/bcm.h new file mode 100644 index 000000000000..7f293273c444 --- /dev/null +++ b/include/linux/can/bcm.h @@ -0,0 +1,65 @@ +/* + * linux/can/bcm.h + * + * Definitions for CAN Broadcast Manager (BCM) + * + * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + * Send feedback to <socketcan-users@lists.berlios.de> + * + */ + +#ifndef CAN_BCM_H +#define CAN_BCM_H + +/** + * struct bcm_msg_head - head of messages to/from the broadcast manager + * @opcode: opcode, see enum below. + * @flags: special flags, see below. + * @count: number of frames to send before changing interval. + * @ival1: interval for the first @count frames. + * @ival2: interval for the following frames. + * @can_id: CAN ID of frames to be sent or received. + * @nframes: number of frames appended to the message head. + * @frames: array of CAN frames. + */ +struct bcm_msg_head { + __u32 opcode; + __u32 flags; + __u32 count; + struct timeval ival1, ival2; + canid_t can_id; + __u32 nframes; + struct can_frame frames[0]; +}; + +enum { + TX_SETUP = 1, /* create (cyclic) transmission task */ + TX_DELETE, /* remove (cyclic) transmission task */ + TX_READ, /* read properties of (cyclic) transmission task */ + TX_SEND, /* send one CAN frame */ + RX_SETUP, /* create RX content filter subscription */ + RX_DELETE, /* remove RX content filter subscription */ + RX_READ, /* read properties of RX content filter subscription */ + TX_STATUS, /* reply to TX_READ request */ + TX_EXPIRED, /* notification on performed transmissions (count=0) */ + RX_STATUS, /* reply to RX_READ request */ + RX_TIMEOUT, /* cyclic message is absent */ + RX_CHANGED /* updated CAN frame (detected content change) */ +}; + +#define SETTIMER 0x0001 +#define STARTTIMER 0x0002 +#define TX_COUNTEVT 0x0004 +#define TX_ANNOUNCE 0x0008 +#define TX_CP_CAN_ID 0x0010 +#define RX_FILTER_ID 0x0020 +#define RX_CHECK_DLC 0x0040 +#define RX_NO_AUTOTIMER 0x0080 +#define RX_ANNOUNCE_RESUME 0x0100 +#define TX_RESET_MULTI_IDX 0x0200 +#define RX_RTR_FRAME 0x0400 + +#endif /* CAN_BCM_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h new file mode 100644 index 000000000000..e9ca210ffa5b --- /dev/null +++ b/include/linux/can/core.h @@ -0,0 +1,64 @@ +/* + * linux/can/core.h + * + * Protoypes and definitions for CAN protocol modules using the PF_CAN core + * + * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Urs Thuermann <urs.thuermann@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + * Send feedback to <socketcan-users@lists.berlios.de> + * + */ + +#ifndef CAN_CORE_H +#define CAN_CORE_H + +#include <linux/can.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +#define CAN_VERSION "20071116" + +/* increment this number each time you change some user-space interface */ +#define CAN_ABI_VERSION "8" + +#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION + +#define DNAME(dev) ((dev) ? (dev)->name : "any") + +/** + * struct can_proto - CAN protocol structure + * @type: type argument in socket() syscall, e.g. SOCK_DGRAM. + * @protocol: protocol number in socket() syscall. + * @capability: capability needed to open the socket, or -1 for no restriction. + * @ops: pointer to struct proto_ops for sock->ops. + * @prot: pointer to struct proto structure. + */ +struct can_proto { + int type; + int protocol; + int capability; + struct proto_ops *ops; + struct proto *prot; +}; + +/* function prototypes for the CAN networklayer core (af_can.c) */ + +extern int can_proto_register(struct can_proto *cp); +extern void can_proto_unregister(struct can_proto *cp); + +extern int can_rx_register(struct net_device *dev, canid_t can_id, + canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data, char *ident); + +extern void can_rx_unregister(struct net_device *dev, canid_t can_id, + canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data); + +extern int can_send(struct sk_buff *skb, int loop); + +#endif /* CAN_CORE_H */ diff --git a/include/linux/can/error.h b/include/linux/can/error.h new file mode 100644 index 000000000000..d4127fd9e681 --- /dev/null +++ b/include/linux/can/error.h @@ -0,0 +1,93 @@ +/* + * linux/can/error.h + * + * Definitions of the CAN error frame to be filtered and passed to the user. + * + * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + * Send feedback to <socketcan-users@lists.berlios.de> + * + */ + +#ifndef CAN_ERROR_H +#define CAN_ERROR_H + +#define CAN_ERR_DLC 8 /* dlc for error frames */ + +/* error class (mask) in can_id */ +#define CAN_ERR_TX_TIMEOUT 0x00000001U /* TX timeout (by netdevice driver) */ +#define CAN_ERR_LOSTARB 0x00000002U /* lost arbitration / data[0] */ +#define CAN_ERR_CRTL 0x00000004U /* controller problems / data[1] */ +#define CAN_ERR_PROT 0x00000008U /* protocol violations / data[2..3] */ +#define CAN_ERR_TRX 0x00000010U /* transceiver status / data[4] */ +#define CAN_ERR_ACK 0x00000020U /* received no ACK on transmission */ +#define CAN_ERR_BUSOFF 0x00000040U /* bus off */ +#define CAN_ERR_BUSERROR 0x00000080U /* bus error (may flood!) */ +#define CAN_ERR_RESTARTED 0x00000100U /* controller restarted */ + +/* arbitration lost in bit ... / data[0] */ +#define CAN_ERR_LOSTARB_UNSPEC 0x00 /* unspecified */ + /* else bit number in bitstream */ + +/* error status of CAN-controller / data[1] */ +#define CAN_ERR_CRTL_UNSPEC 0x00 /* unspecified */ +#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /* RX buffer overflow */ +#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /* TX buffer overflow */ +#define CAN_ERR_CRTL_RX_WARNING 0x04 /* reached warning level for RX errors */ +#define CAN_ERR_CRTL_TX_WARNING 0x08 /* reached warning level for TX errors */ +#define CAN_ERR_CRTL_RX_PASSIVE 0x10 /* reached error passive status RX */ +#define CAN_ERR_CRTL_TX_PASSIVE 0x20 /* reached error passive status TX */ + /* (at least one error counter exceeds */ + /* the protocol-defined level of 127) */ + +/* error in CAN protocol (type) / data[2] */ +#define CAN_ERR_PROT_UNSPEC 0x00 /* unspecified */ +#define CAN_ERR_PROT_BIT 0x01 /* single bit error */ +#define CAN_ERR_PROT_FORM 0x02 /* frame format error */ +#define CAN_ERR_PROT_STUFF 0x04 /* bit stuffing error */ +#define CAN_ERR_PROT_BIT0 0x08 /* unable to send dominant bit */ +#define CAN_ERR_PROT_BIT1 0x10 /* unable to send recessive bit */ +#define CAN_ERR_PROT_OVERLOAD 0x20 /* bus overload */ +#define CAN_ERR_PROT_ACTIVE 0x40 /* active error announcement */ +#define CAN_ERR_PROT_TX 0x80 /* error occured on transmission */ + +/* error in CAN protocol (location) / data[3] */ +#define CAN_ERR_PROT_LOC_UNSPEC 0x00 /* unspecified */ +#define CAN_ERR_PROT_LOC_SOF 0x03 /* start of frame */ +#define CAN_ERR_PROT_LOC_ID28_21 0x02 /* ID bits 28 - 21 (SFF: 10 - 3) */ +#define CAN_ERR_PROT_LOC_ID20_18 0x06 /* ID bits 20 - 18 (SFF: 2 - 0 )*/ +#define CAN_ERR_PROT_LOC_SRTR 0x04 /* substitute RTR (SFF: RTR) */ +#define CAN_ERR_PROT_LOC_IDE 0x05 /* identifier extension */ +#define CAN_ERR_PROT_LOC_ID17_13 0x07 /* ID bits 17-13 */ +#define CAN_ERR_PROT_LOC_ID12_05 0x0F /* ID bits 12-5 */ +#define CAN_ERR_PROT_LOC_ID04_00 0x0E /* ID bits 4-0 */ +#define CAN_ERR_PROT_LOC_RTR 0x0C /* RTR */ +#define CAN_ERR_PROT_LOC_RES1 0x0D /* reserved bit 1 */ +#define CAN_ERR_PROT_LOC_RES0 0x09 /* reserved bit 0 */ +#define CAN_ERR_PROT_LOC_DLC 0x0B /* data length code */ +#define CAN_ERR_PROT_LOC_DATA 0x0A /* data section */ +#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /* CRC sequence */ +#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /* CRC delimiter */ +#define CAN_ERR_PROT_LOC_ACK 0x19 /* ACK slot */ +#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /* ACK delimiter */ +#define CAN_ERR_PROT_LOC_EOF 0x1A /* end of frame */ +#define CAN_ERR_PROT_LOC_INTERM 0x12 /* intermission */ + +/* error status of CAN-transceiver / data[4] */ +/* CANH CANL */ +#define CAN_ERR_TRX_UNSPEC 0x00 /* 0000 0000 */ +#define CAN_ERR_TRX_CANH_NO_WIRE 0x04 /* 0000 0100 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_BAT 0x05 /* 0000 0101 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_VCC 0x06 /* 0000 0110 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_GND 0x07 /* 0000 0111 */ +#define CAN_ERR_TRX_CANL_NO_WIRE 0x40 /* 0100 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_BAT 0x50 /* 0101 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_VCC 0x60 /* 0110 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /* 0111 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /* 1000 0000 */ + +/* controller specific additional information / data[5..7] */ + +#endif /* CAN_ERROR_H */ diff --git a/include/linux/can/raw.h b/include/linux/can/raw.h new file mode 100644 index 000000000000..b2a0f87492c5 --- /dev/null +++ b/include/linux/can/raw.h @@ -0,0 +1,31 @@ +/* + * linux/can/raw.h + * + * Definitions for raw CAN sockets + * + * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Urs Thuermann <urs.thuermann@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + * Send feedback to <socketcan-users@lists.berlios.de> + * + */ + +#ifndef CAN_RAW_H +#define CAN_RAW_H + +#include <linux/can.h> + +#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW) + +/* for socket options affecting the socket (not the global system) */ + +enum { + CAN_RAW_FILTER = 1, /* set 0 .. n can_filter(s) */ + CAN_RAW_ERR_FILTER, /* set filter for error frames */ + CAN_RAW_LOOPBACK, /* local loopback (default:on) */ + CAN_RAW_RECV_OWN_MSGS /* receive my own msgs (default:off) */ +}; + +#endif diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index c6d3e22c0624..fcdc11b9609b 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -451,6 +451,7 @@ struct cdrom_generic_command #define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e #define GPCMD_READ_10 0x28 #define GPCMD_READ_12 0xa8 +#define GPCMD_READ_BUFFER 0x3c #define GPCMD_READ_BUFFER_CAPACITY 0x5c #define GPCMD_READ_CDVD_CAPACITY 0x25 #define GPCMD_READ_CD 0xbe @@ -480,7 +481,9 @@ struct cdrom_generic_command #define GPCMD_TEST_UNIT_READY 0x00 #define GPCMD_VERIFY_10 0x2f #define GPCMD_WRITE_10 0x2a +#define GPCMD_WRITE_12 0xaa #define GPCMD_WRITE_AND_VERIFY_10 0x2e +#define GPCMD_WRITE_BUFFER 0x3b /* This is listed as optional in ATAPI 2.6, but is (curiously) * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji * Table 377 as an MMC command for SCSi devices though... Most ATAPI diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 107787aacb64..85778a4b1209 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -103,7 +103,7 @@ struct clocksource { #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 /* simplify initialization of mask field */ -#define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1) +#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) /** * clocksource_khz2mult - calculates mult from khz and shift @@ -215,6 +215,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, /* used to install a new clocksource */ extern int clocksource_register(struct clocksource*); +extern void clocksource_unregister(struct clocksource*); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_resume(void); diff --git a/include/linux/compat.h b/include/linux/compat.h index 0e69d2cf14aa..d38655f2be70 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -191,6 +191,10 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct compat_timeval __user *tvp); +asmlinkage long compat_sys_wait4(compat_pid_t pid, + compat_uint_t *stat_addr, int options, + struct compat_rusage *ru); + #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) #define BITS_TO_COMPAT_LONGS(bits) \ @@ -239,6 +243,17 @@ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, const compat_ulong_t __user *new_nodes); +extern int compat_ptrace_request(struct task_struct *child, + compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +#ifdef __ARCH_WANT_COMPAT_SYS_PTRACE +extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); +asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + compat_long_t addr, compat_long_t data); +#endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */ + /* * epoll (fs/eventpoll.c) compat bits follow ... */ diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index 2d8c0f48f55e..e5eb795f78a1 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -7,10 +7,8 @@ #if __GNUC_MINOR__ >= 3 # define __used __attribute__((__used__)) -# define __attribute_used__ __used /* deprecated */ #else # define __used __attribute__((__unused__)) -# define __attribute_used__ __used /* deprecated */ #endif #if __GNUC_MINOR__ >= 4 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index ee7ca5de970c..0ab3a3232330 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -15,7 +15,6 @@ #endif #define __used __attribute__((__used__)) -#define __attribute_used__ __used /* deprecated */ #define __must_check __attribute__((warn_unused_result)) #define __compiler_offsetof(a,b) __builtin_offsetof(a,b) #define __always_inline inline __attribute__((always_inline)) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c68b67b86ef1..d0e17e1657dc 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -126,10 +126,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); * Mark functions that are referenced only in inline assembly as __used so * the code is emitted even though it appears to be unreferenced. */ -#ifndef __attribute_used__ -# define __attribute_used__ /* deprecated */ -#endif - #ifndef __used # define __used /* unimplemented */ #endif @@ -175,4 +171,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); #define __cold #endif +/* Simple shorthand for a section definition */ +#ifndef __section +# define __section(S) __attribute__ ((__section__(#S))) +#endif + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/connector.h b/include/linux/connector.h index 13fc4541bf23..da6dd957f908 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -112,7 +112,6 @@ struct cn_queue_dev { struct list_head queue_list; spinlock_t queue_lock; - int netlink_groups; struct sock *nls; }; @@ -133,15 +132,13 @@ struct cn_callback_data { struct cn_callback_entry { struct list_head callback_entry; - struct cn_callback *cb; struct work_struct work; struct cn_queue_dev *pdev; struct cn_callback_id id; struct cn_callback_data data; - int seq, group; - struct sock *nls; + u32 seq, group; }; struct cn_ctl_entry { diff --git a/include/linux/const.h b/include/linux/const.h index 07b300bfe34b..c22c707c455d 100644 --- a/include/linux/const.h +++ b/include/linux/const.h @@ -7,13 +7,18 @@ * C code. Therefore we cannot annotate them always with * 'UL' and other type specifiers unilaterally. We * use the following macros to deal with this. + * + * Similarly, _AT() will cast an expression with a type in C, but + * leave it unchanged in asm. */ #ifdef __ASSEMBLY__ #define _AC(X,Y) X +#define _AT(T,X) X #else #define __AC(X,Y) (X##Y) #define _AC(X,Y) __AC(X,Y) +#define _AT(T,X) ((T)(X)) #endif #endif /* !(_LINUX_CONST_H) */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 92f2029a34f3..0be8d65bc3c8 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -71,18 +71,27 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) int cpu_up(unsigned int cpu); +extern void cpu_hotplug_init(void); + #else static inline int register_cpu_notifier(struct notifier_block *nb) { return 0; } + static inline void unregister_cpu_notifier(struct notifier_block *nb) { } +static inline void cpu_hotplug_init(void) +{ +} + #endif /* CONFIG_SMP */ extern struct sysdev_class cpu_sysdev_class; +extern void cpu_maps_update_begin(void); +extern void cpu_maps_update_done(void); #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ @@ -97,8 +106,8 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) mutex_unlock(cpu_hp_mutex); } -extern void lock_cpu_hotplug(void); -extern void unlock_cpu_hotplug(void); +extern void get_online_cpus(void); +extern void put_online_cpus(void); #define hotcpu_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ @@ -115,8 +124,8 @@ static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) { } -#define lock_cpu_hotplug() do { } while (0) -#define unlock_cpu_hotplug() do { } while (0) +#define get_online_cpus() do { } while (0) +#define put_online_cpus() do { } while (0) #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 85bd790c201e..7047f58306a7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -218,8 +218,8 @@ int __first_cpu(const cpumask_t *srcp); int __next_cpu(int n, const cpumask_t *srcp); #define next_cpu(n, src) __next_cpu((n), &(src)) #else -#define first_cpu(src) 0 -#define next_cpu(n, src) 1 +#define first_cpu(src) ({ (void)(src); 0; }) +#define next_cpu(n, src) ({ (void)(src); 1; }) #endif #define cpumask_of_cpu(cpu) \ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index f3110ebe894a..5e02d1b46370 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -33,10 +33,13 @@ #define CRYPTO_ALG_TYPE_DIGEST 0x00000002 #define CRYPTO_ALG_TYPE_HASH 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 -#define CRYPTO_ALG_TYPE_COMPRESS 0x00000005 -#define CRYPTO_ALG_TYPE_AEAD 0x00000006 +#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 +#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008 +#define CRYPTO_ALG_TYPE_AEAD 0x00000009 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e +#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_LARVAL 0x00000010 #define CRYPTO_ALG_DEAD 0x00000020 @@ -50,6 +53,12 @@ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 /* + * This bit is set for symmetric key ciphers that have already been wrapped + * with a generic IV generator to prevent them from being wrapped again. + */ +#define CRYPTO_ALG_GENIV 0x00000200 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_REQ_MASK 0x000fff00 @@ -81,13 +90,11 @@ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN #elif defined(ARCH_SLAB_MINALIGN) #define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN +#else +#define CRYPTO_MINALIGN __alignof__(unsigned long long) #endif -#ifdef CRYPTO_MINALIGN #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) -#else -#define CRYPTO_MINALIGN_ATTR -#endif struct scatterlist; struct crypto_ablkcipher; @@ -97,6 +104,8 @@ struct crypto_blkcipher; struct crypto_hash; struct crypto_tfm; struct crypto_type; +struct aead_givcrypt_request; +struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -176,6 +185,10 @@ struct ablkcipher_alg { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); + + const char *geniv; unsigned int min_keysize; unsigned int max_keysize; @@ -185,11 +198,16 @@ struct ablkcipher_alg { struct aead_alg { int (*setkey)(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); + int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); int (*encrypt)(struct aead_request *req); int (*decrypt)(struct aead_request *req); + int (*givencrypt)(struct aead_givcrypt_request *req); + int (*givdecrypt)(struct aead_givcrypt_request *req); + + const char *geniv; unsigned int ivsize; - unsigned int authsize; + unsigned int maxauthsize; }; struct blkcipher_alg { @@ -202,6 +220,8 @@ struct blkcipher_alg { struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); + const char *geniv; + unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; @@ -317,6 +337,11 @@ struct ablkcipher_tfm { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); + + struct crypto_ablkcipher *base; + unsigned int ivsize; unsigned int reqsize; }; @@ -326,6 +351,11 @@ struct aead_tfm { unsigned int keylen); int (*encrypt)(struct aead_request *req); int (*decrypt)(struct aead_request *req); + int (*givencrypt)(struct aead_givcrypt_request *req); + int (*givdecrypt)(struct aead_givcrypt_request *req); + + struct crypto_aead *base; + unsigned int ivsize; unsigned int authsize; unsigned int reqsize; @@ -525,17 +555,23 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( return (struct crypto_ablkcipher *)tfm; } -static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( - const char *alg_name, u32 type, u32 mask) +static inline u32 crypto_skcipher_type(u32 type) { - type &= ~CRYPTO_ALG_TYPE_MASK; + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; + return type; +} - return __crypto_ablkcipher_cast( - crypto_alloc_base(alg_name, type, mask)); +static inline u32 crypto_skcipher_mask(u32 mask) +{ + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; + return mask; } +struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask); + static inline struct crypto_tfm *crypto_ablkcipher_tfm( struct crypto_ablkcipher *tfm) { @@ -550,11 +586,8 @@ static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, u32 mask) { - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; - - return crypto_has_alg(alg_name, type, mask); + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); } static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( @@ -601,7 +634,9 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - return crypto_ablkcipher_crt(tfm)->setkey(tfm, key, keylen); + struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); + + return crt->setkey(crt->base, key, keylen); } static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( @@ -633,7 +668,7 @@ static inline unsigned int crypto_ablkcipher_reqsize( static inline void ablkcipher_request_set_tfm( struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) { - req->base.tfm = crypto_ablkcipher_tfm(tfm); + req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); } static inline struct ablkcipher_request *ablkcipher_request_cast( @@ -686,15 +721,7 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) return (struct crypto_aead *)tfm; } -static inline struct crypto_aead *crypto_alloc_aead(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_AEAD; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_aead_cast(crypto_alloc_base(alg_name, type, mask)); -} +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) { @@ -749,9 +776,13 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { - return crypto_aead_crt(tfm)->setkey(tfm, key, keylen); + struct aead_tfm *crt = crypto_aead_crt(tfm); + + return crt->setkey(crt->base, key, keylen); } +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); + static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) { return __crypto_aead_cast(req->base.tfm); @@ -775,7 +806,7 @@ static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) static inline void aead_request_set_tfm(struct aead_request *req, struct crypto_aead *tfm) { - req->base.tfm = crypto_aead_tfm(tfm); + req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); } static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, @@ -841,9 +872,9 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( static inline struct crypto_blkcipher *crypto_alloc_blkcipher( const char *alg_name, u32 type, u32 mask) { - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; + mask |= CRYPTO_ALG_TYPE_MASK; return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); } @@ -861,9 +892,9 @@ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) { - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; + mask |= CRYPTO_ALG_TYPE_MASK; return crypto_has_alg(alg_name, type, mask); } @@ -1081,6 +1112,7 @@ static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_HASH; mask |= CRYPTO_ALG_TYPE_HASH_MASK; @@ -1100,6 +1132,7 @@ static inline void crypto_free_hash(struct crypto_hash *tfm) static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_HASH; mask |= CRYPTO_ALG_TYPE_HASH_MASK; diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 333c3ea82a5d..484e45c7c89a 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -205,6 +205,7 @@ struct dccp_so_feat { #define DCCP_SOCKOPT_CHANGE_L 3 #define DCCP_SOCKOPT_CHANGE_R 4 #define DCCP_SOCKOPT_GET_CUR_MPS 5 +#define DCCP_SOCKOPT_SERVER_TIMEWAIT 6 #define DCCP_SOCKOPT_SEND_CSCOV 10 #define DCCP_SOCKOPT_RECV_CSCOV 11 #define DCCP_SOCKOPT_CCID_RX_INFO 128 @@ -227,37 +228,50 @@ struct dccp_so_feat { #include <net/tcp_states.h> enum dccp_state { - DCCP_OPEN = TCP_ESTABLISHED, - DCCP_REQUESTING = TCP_SYN_SENT, - DCCP_PARTOPEN = TCP_FIN_WAIT1, /* FIXME: - This mapping is horrible, but TCP has - no matching state for DCCP_PARTOPEN, - as TCP_SYN_RECV is already used by - DCCP_RESPOND, why don't stop using TCP - mapping of states? OK, now we don't use - sk_stream_sendmsg anymore, so doesn't - seem to exist any reason for us to - do the TCP mapping here */ - DCCP_LISTEN = TCP_LISTEN, - DCCP_RESPOND = TCP_SYN_RECV, - DCCP_CLOSING = TCP_CLOSING, - DCCP_TIME_WAIT = TCP_TIME_WAIT, - DCCP_CLOSED = TCP_CLOSE, - DCCP_MAX_STATES = TCP_MAX_STATES, + DCCP_OPEN = TCP_ESTABLISHED, + DCCP_REQUESTING = TCP_SYN_SENT, + DCCP_LISTEN = TCP_LISTEN, + DCCP_RESPOND = TCP_SYN_RECV, + /* + * States involved in closing a DCCP connection: + * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq. + * + * 2) CLOSING can have three different meanings (RFC 4340, 8.3): + * a. Client has performed active-close, has sent a Close to the server + * from state OPEN or PARTOPEN, and is waiting for the final Reset + * (in this case, SOCK_DONE == 1). + * b. Client is asked to perform passive-close, by receiving a CloseReq + * in (PART)OPEN state. It sends a Close and waits for final Reset + * (in this case, SOCK_DONE == 0). + * c. Server performs an active-close as in (a), keeps TIMEWAIT state. + * + * 3) The following intermediate states are employed to give passively + * closing nodes a chance to process their unread data: + * - PASSIVE_CLOSE (from OPEN => CLOSED) and + * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above). + */ + DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1, + DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */ + DCCP_CLOSING = TCP_CLOSING, + DCCP_TIME_WAIT = TCP_TIME_WAIT, + DCCP_CLOSED = TCP_CLOSE, + DCCP_PARTOPEN = TCP_MAX_STATES, + DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */ + DCCP_MAX_STATES }; -#define DCCP_STATE_MASK 0xf -#define DCCP_ACTION_FIN (1<<7) +#define DCCP_STATE_MASK 0x1f enum { - DCCPF_OPEN = TCPF_ESTABLISHED, - DCCPF_REQUESTING = TCPF_SYN_SENT, - DCCPF_PARTOPEN = TCPF_FIN_WAIT1, - DCCPF_LISTEN = TCPF_LISTEN, - DCCPF_RESPOND = TCPF_SYN_RECV, - DCCPF_CLOSING = TCPF_CLOSING, - DCCPF_TIME_WAIT = TCPF_TIME_WAIT, - DCCPF_CLOSED = TCPF_CLOSE, + DCCPF_OPEN = TCPF_ESTABLISHED, + DCCPF_REQUESTING = TCPF_SYN_SENT, + DCCPF_LISTEN = TCPF_LISTEN, + DCCPF_RESPOND = TCPF_SYN_RECV, + DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1, + DCCPF_CLOSING = TCPF_CLOSING, + DCCPF_TIME_WAIT = TCPF_TIME_WAIT, + DCCPF_CLOSED = TCPF_CLOSE, + DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN), }; static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb) @@ -393,13 +407,23 @@ struct dccp_opt_pend { extern void dccp_minisock_init(struct dccp_minisock *dmsk); -extern int dccp_parse_options(struct sock *sk, struct sk_buff *skb); - +/** + * struct dccp_request_sock - represent DCCP-specific connection request + * @dreq_inet_rsk: structure inherited from + * @dreq_iss: initial sequence number sent on the Response (RFC 4340, 7.1) + * @dreq_isr: initial sequence number received on the Request + * @dreq_service: service code present on the Request (there is just one) + * The following two fields are analogous to the ones in dccp_sock: + * @dreq_timestamp_echo: last received timestamp to echo (13.1) + * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo + */ struct dccp_request_sock { struct inet_request_sock dreq_inet_rsk; __u64 dreq_iss; __u64 dreq_isr; __be32 dreq_service; + __u32 dreq_timestamp_echo; + __u32 dreq_timestamp_time; }; static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req) @@ -409,6 +433,9 @@ static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req) extern struct inet_timewait_death_row dccp_death_row; +extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, + struct sk_buff *skb); + struct dccp_options_received { u32 dccpor_ndp; /* only 24 bits */ u32 dccpor_timestamp; @@ -462,8 +489,8 @@ struct dccp_ackvec; * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss * @dccps_service - first (passive sock) or unique (active sock) service code * @dccps_service_list - second .. last service code on passive socket - * @dccps_timestamp_time - time of latest TIMESTAMP option * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option + * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo * @dccps_l_ack_ratio - feature-local Ack Ratio * @dccps_r_ack_ratio - feature-remote Ack Ratio * @dccps_pcslen - sender partial checksum coverage (via sockopt) @@ -479,6 +506,7 @@ struct dccp_ackvec; * @dccps_role - role of this sock, one of %dccp_role * @dccps_hc_rx_insert_options - receiver wants to add options when acking * @dccps_hc_tx_insert_options - sender wants to add options when sending + * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) * @dccps_xmit_timer - timer for when CCID is not ready to send * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) */ @@ -498,8 +526,8 @@ struct dccp_sock { __u64 dccps_gar; __be32 dccps_service; struct dccp_service_list *dccps_service_list; - ktime_t dccps_timestamp_time; __u32 dccps_timestamp_echo; + __u32 dccps_timestamp_time; __u16 dccps_l_ack_ratio; __u16 dccps_r_ack_ratio; __u16 dccps_pcslen; @@ -515,6 +543,7 @@ struct dccp_sock { enum dccp_role dccps_role:2; __u8 dccps_hc_rx_insert_options:1; __u8 dccps_hc_tx_insert_options:1; + __u8 dccps_server_timewait:1; struct timer_list dccps_xmit_timer; }; diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 1678a5de7013..f4a5871767f5 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -47,6 +47,7 @@ struct task_struct; #ifdef CONFIG_LOCKDEP extern void debug_show_all_locks(void); +extern void __debug_show_held_locks(struct task_struct *task); extern void debug_show_held_locks(struct task_struct *task); extern void debug_check_no_locks_freed(const void *from, unsigned long len); extern void debug_check_no_locks_held(struct task_struct *task); @@ -55,6 +56,10 @@ static inline void debug_show_all_locks(void) { } +static inline void __debug_show_held_locks(struct task_struct *task) +{ +} + static inline void debug_show_held_locks(struct task_struct *task) { } diff --git a/include/linux/device.h b/include/linux/device.h index 2e15822fe409..db375be333c7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -25,75 +25,72 @@ #include <asm/device.h> #define DEVICE_NAME_SIZE 50 -#define DEVICE_NAME_HALF __stringify(20) /* Less than half to accommodate slop */ +/* DEVICE_NAME_HALF is really less than half to accommodate slop */ +#define DEVICE_NAME_HALF __stringify(20) #define DEVICE_ID_SIZE 32 #define BUS_ID_SIZE KOBJ_NAME_LEN struct device; struct device_driver; +struct driver_private; struct class; struct class_device; struct bus_type; +struct bus_type_private; struct bus_attribute { struct attribute attr; - ssize_t (*show)(struct bus_type *, char * buf); - ssize_t (*store)(struct bus_type *, const char * buf, size_t count); + ssize_t (*show)(struct bus_type *bus, char *buf); + ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); }; -#define BUS_ATTR(_name,_mode,_show,_store) \ -struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store) +#define BUS_ATTR(_name, _mode, _show, _store) \ +struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) extern int __must_check bus_create_file(struct bus_type *, struct bus_attribute *); extern void bus_remove_file(struct bus_type *, struct bus_attribute *); struct bus_type { - const char * name; - struct module * owner; + const char *name; + struct bus_attribute *bus_attrs; + struct device_attribute *dev_attrs; + struct driver_attribute *drv_attrs; - struct kset subsys; - struct kset drivers; - struct kset devices; - struct klist klist_devices; - struct klist klist_drivers; - - struct blocking_notifier_head bus_notifier; - - struct bus_attribute * bus_attrs; - struct device_attribute * dev_attrs; - struct driver_attribute * drv_attrs; - - int (*match)(struct device * dev, struct device_driver * drv); - int (*uevent)(struct device *dev, struct kobj_uevent_env *env); - int (*probe)(struct device * dev); - int (*remove)(struct device * dev); - void (*shutdown)(struct device * dev); + int (*match)(struct device *dev, struct device_driver *drv); + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + int (*probe)(struct device *dev); + int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); - int (*suspend)(struct device * dev, pm_message_t state); - int (*suspend_late)(struct device * dev, pm_message_t state); - int (*resume_early)(struct device * dev); - int (*resume)(struct device * dev); + int (*suspend)(struct device *dev, pm_message_t state); + int (*suspend_late)(struct device *dev, pm_message_t state); + int (*resume_early)(struct device *dev); + int (*resume)(struct device *dev); - unsigned int drivers_autoprobe:1; + struct bus_type_private *p; }; -extern int __must_check bus_register(struct bus_type * bus); -extern void bus_unregister(struct bus_type * bus); +extern int __must_check bus_register(struct bus_type *bus); +extern void bus_unregister(struct bus_type *bus); -extern int __must_check bus_rescan_devices(struct bus_type * bus); +extern int __must_check bus_rescan_devices(struct bus_type *bus); /* iterator helpers for buses */ -int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data, - int (*fn)(struct device *, void *)); -struct device * bus_find_device(struct bus_type *bus, struct device *start, - void *data, int (*match)(struct device *, void *)); +int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, + int (*fn)(struct device *dev, void *data)); +struct device *bus_find_device(struct bus_type *bus, struct device *start, + void *data, + int (*match)(struct device *dev, void *data)); +struct device *bus_find_device_by_name(struct bus_type *bus, + struct device *start, + const char *name); int __must_check bus_for_each_drv(struct bus_type *bus, - struct device_driver *start, void *data, - int (*fn)(struct device_driver *, void *)); + struct device_driver *start, void *data, + int (*fn)(struct device_driver *, void *)); /* * Bus notifiers: Get notified of addition/removal of devices @@ -118,111 +115,128 @@ extern int bus_unregister_notifier(struct bus_type *bus, #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be unbound */ +extern struct kset *bus_get_kset(struct bus_type *bus); +extern struct klist *bus_get_device_klist(struct bus_type *bus); + struct device_driver { - const char * name; - struct bus_type * bus; + const char *name; + struct bus_type *bus; - struct kobject kobj; - struct klist klist_devices; - struct klist_node knode_bus; + struct module *owner; + const char *mod_name; /* used for built-in modules */ - struct module * owner; - const char * mod_name; /* used for built-in modules */ - struct module_kobject * mkobj; + int (*probe) (struct device *dev); + int (*remove) (struct device *dev); + void (*shutdown) (struct device *dev); + int (*suspend) (struct device *dev, pm_message_t state); + int (*resume) (struct device *dev); + struct attribute_group **groups; - int (*probe) (struct device * dev); - int (*remove) (struct device * dev); - void (*shutdown) (struct device * dev); - int (*suspend) (struct device * dev, pm_message_t state); - int (*resume) (struct device * dev); + struct driver_private *p; }; -extern int __must_check driver_register(struct device_driver * drv); -extern void driver_unregister(struct device_driver * drv); +extern int __must_check driver_register(struct device_driver *drv); +extern void driver_unregister(struct device_driver *drv); -extern struct device_driver * get_driver(struct device_driver * drv); -extern void put_driver(struct device_driver * drv); -extern struct device_driver *driver_find(const char *name, struct bus_type *bus); +extern struct device_driver *get_driver(struct device_driver *drv); +extern void put_driver(struct device_driver *drv); +extern struct device_driver *driver_find(const char *name, + struct bus_type *bus); extern int driver_probe_done(void); /* sysfs interface for exporting driver attributes */ struct driver_attribute { - struct attribute attr; - ssize_t (*show)(struct device_driver *, char * buf); - ssize_t (*store)(struct device_driver *, const char * buf, size_t count); + struct attribute attr; + ssize_t (*show)(struct device_driver *driver, char *buf); + ssize_t (*store)(struct device_driver *driver, const char *buf, + size_t count); }; -#define DRIVER_ATTR(_name,_mode,_show,_store) \ -struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store) +#define DRIVER_ATTR(_name, _mode, _show, _store) \ +struct driver_attribute driver_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) -extern int __must_check driver_create_file(struct device_driver *, - struct driver_attribute *); -extern void driver_remove_file(struct device_driver *, struct driver_attribute *); +extern int __must_check driver_create_file(struct device_driver *driver, + struct driver_attribute *attr); +extern void driver_remove_file(struct device_driver *driver, + struct driver_attribute *attr); -extern int __must_check driver_for_each_device(struct device_driver * drv, - struct device *start, void *data, - int (*fn)(struct device *, void *)); -struct device * driver_find_device(struct device_driver *drv, - struct device *start, void *data, - int (*match)(struct device *, void *)); +extern int __must_check driver_add_kobj(struct device_driver *drv, + struct kobject *kobj, + const char *fmt, ...); + +extern int __must_check driver_for_each_device(struct device_driver *drv, + struct device *start, + void *data, + int (*fn)(struct device *dev, + void *)); +struct device *driver_find_device(struct device_driver *drv, + struct device *start, void *data, + int (*match)(struct device *dev, void *data)); /* * device classes */ struct class { - const char * name; - struct module * owner; + const char *name; + struct module *owner; struct kset subsys; struct list_head children; struct list_head devices; struct list_head interfaces; struct kset class_dirs; - struct semaphore sem; /* locks both the children and interfaces lists */ - - struct class_attribute * class_attrs; - struct class_device_attribute * class_dev_attrs; - struct device_attribute * dev_attrs; + struct semaphore sem; /* locks children, devices, interfaces */ + struct class_attribute *class_attrs; + struct class_device_attribute *class_dev_attrs; + struct device_attribute *dev_attrs; - int (*uevent)(struct class_device *dev, struct kobj_uevent_env *env); - int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); + int (*uevent)(struct class_device *dev, struct kobj_uevent_env *env); + int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); - void (*release)(struct class_device *dev); - void (*class_release)(struct class *class); - void (*dev_release)(struct device *dev); + void (*release)(struct class_device *dev); + void (*class_release)(struct class *class); + void (*dev_release)(struct device *dev); - int (*suspend)(struct device *, pm_message_t state); - int (*resume)(struct device *); + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); }; -extern int __must_check class_register(struct class *); -extern void class_unregister(struct class *); +extern int __must_check class_register(struct class *class); +extern void class_unregister(struct class *class); +extern int class_for_each_device(struct class *class, void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *class_find_device(struct class *class, void *data, + int (*match)(struct device *, void *)); +extern struct class_device *class_find_child(struct class *class, void *data, + int (*match)(struct class_device *, void *)); struct class_attribute { - struct attribute attr; - ssize_t (*show)(struct class *, char * buf); - ssize_t (*store)(struct class *, const char * buf, size_t count); + struct attribute attr; + ssize_t (*show)(struct class *class, char *buf); + ssize_t (*store)(struct class *class, const char *buf, size_t count); }; -#define CLASS_ATTR(_name,_mode,_show,_store) \ -struct class_attribute class_attr_##_name = __ATTR(_name,_mode,_show,_store) +#define CLASS_ATTR(_name, _mode, _show, _store) \ +struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) -extern int __must_check class_create_file(struct class *, - const struct class_attribute *); -extern void class_remove_file(struct class *, const struct class_attribute *); +extern int __must_check class_create_file(struct class *class, + const struct class_attribute *attr); +extern void class_remove_file(struct class *class, + const struct class_attribute *attr); struct class_device_attribute { - struct attribute attr; - ssize_t (*show)(struct class_device *, char * buf); - ssize_t (*store)(struct class_device *, const char * buf, size_t count); + struct attribute attr; + ssize_t (*show)(struct class_device *, char *buf); + ssize_t (*store)(struct class_device *, const char *buf, size_t count); }; -#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store) \ +#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \ struct class_device_attribute class_device_attr_##_name = \ - __ATTR(_name,_mode,_show,_store) + __ATTR(_name, _mode, _show, _store) extern int __must_check class_device_create_file(struct class_device *, const struct class_device_attribute *); @@ -255,26 +269,24 @@ struct class_device { struct list_head node; struct kobject kobj; - struct class * class; /* required */ - dev_t devt; /* dev_t, creates the sysfs "dev" */ - struct device * dev; /* not necessary, but nice to have */ - void * class_data; /* class-specific data */ - struct class_device *parent; /* parent of this child device, if there is one */ - struct attribute_group ** groups; /* optional groups */ - - void (*release)(struct class_device *dev); - int (*uevent)(struct class_device *dev, struct kobj_uevent_env *env); - char class_id[BUS_ID_SIZE]; /* unique to this class */ + struct class *class; + dev_t devt; + struct device *dev; + void *class_data; + struct class_device *parent; + struct attribute_group **groups; + + void (*release)(struct class_device *dev); + int (*uevent)(struct class_device *dev, struct kobj_uevent_env *env); + char class_id[BUS_ID_SIZE]; }; -static inline void * -class_get_devdata (struct class_device *dev) +static inline void *class_get_devdata(struct class_device *dev) { return dev->class_data; } -static inline void -class_set_devdata (struct class_device *dev, void *data) +static inline void class_set_devdata(struct class_device *dev, void *data) { dev->class_data = data; } @@ -286,10 +298,10 @@ extern void class_device_initialize(struct class_device *); extern int __must_check class_device_add(struct class_device *); extern void class_device_del(struct class_device *); -extern struct class_device * class_device_get(struct class_device *); +extern struct class_device *class_device_get(struct class_device *); extern void class_device_put(struct class_device *); -extern void class_device_remove_file(struct class_device *, +extern void class_device_remove_file(struct class_device *, const struct class_device_attribute *); extern int __must_check class_device_create_bin_file(struct class_device *, struct bin_attribute *); @@ -316,7 +328,7 @@ extern struct class_device *class_device_create(struct class *cls, dev_t devt, struct device *device, const char *fmt, ...) - __attribute__((format(printf,5,6))); + __attribute__((format(printf, 5, 6))); extern void class_device_destroy(struct class *cls, dev_t devt); /* @@ -333,8 +345,8 @@ struct device_type { struct attribute_group **groups; int (*uevent)(struct device *dev, struct kobj_uevent_env *env); void (*release)(struct device *dev); - int (*suspend)(struct device * dev, pm_message_t state); - int (*resume)(struct device * dev); + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); }; /* interface for exporting device attributes */ @@ -346,18 +358,19 @@ struct device_attribute { const char *buf, size_t count); }; -#define DEVICE_ATTR(_name,_mode,_show,_store) \ -struct device_attribute dev_attr_##_name = __ATTR(_name,_mode,_show,_store) +#define DEVICE_ATTR(_name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) extern int __must_check device_create_file(struct device *device, - struct device_attribute * entry); -extern void device_remove_file(struct device * dev, struct device_attribute * attr); + struct device_attribute *entry); +extern void device_remove_file(struct device *dev, + struct device_attribute *attr); extern int __must_check device_create_bin_file(struct device *dev, struct bin_attribute *attr); extern void device_remove_bin_file(struct device *dev, struct bin_attribute *attr); extern int device_schedule_callback_owner(struct device *dev, - void (*func)(struct device *), struct module *owner); + void (*func)(struct device *dev), struct module *owner); /* This is a macro to avoid include problems with THIS_MODULE */ #define device_schedule_callback(dev, func) \ @@ -368,21 +381,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES -extern void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, +extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, const char *name); #define devres_alloc(release, size, gfp) \ __devres_alloc(release, size, gfp, #release) #else -extern void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp); +extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); #endif extern void devres_free(void *res); extern void devres_add(struct device *dev, void *res); -extern void * devres_find(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -extern void * devres_get(struct device *dev, void *new_res, +extern void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); -extern void * devres_remove(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); +extern void *devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data); +extern void *devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); extern int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); @@ -399,7 +412,7 @@ extern void devm_kfree(struct device *dev, void *p); struct device { struct klist klist_children; - struct klist_node knode_parent; /* node in sibling list */ + struct klist_node knode_parent; /* node in sibling list */ struct klist_node knode_driver; struct klist_node knode_bus; struct device *parent; @@ -414,7 +427,7 @@ struct device { * its driver. */ - struct bus_type * bus; /* type of bus device is on */ + struct bus_type *bus; /* type of bus device is on */ struct device_driver *driver; /* which driver has allocated this device */ void *driver_data; /* data private to the driver */ @@ -445,10 +458,10 @@ struct device { /* class_device migration path */ struct list_head node; struct class *class; - dev_t devt; /* dev_t, creates the sysfs "dev" */ + dev_t devt; /* dev_t, creates the sysfs "dev" */ struct attribute_group **groups; /* optional groups */ - void (*release)(struct device * dev); + void (*release)(struct device *dev); }; #ifdef CONFIG_NUMA @@ -470,14 +483,12 @@ static inline void set_dev_node(struct device *dev, int node) } #endif -static inline void * -dev_get_drvdata (struct device *dev) +static inline void *dev_get_drvdata(struct device *dev) { return dev->driver_data; } -static inline void -dev_set_drvdata (struct device *dev, void *data) +static inline void dev_set_drvdata(struct device *dev, void *data) { dev->driver_data = data; } @@ -492,15 +503,15 @@ void driver_init(void); /* * High level routines for use by the bus drivers */ -extern int __must_check device_register(struct device * dev); -extern void device_unregister(struct device * dev); -extern void device_initialize(struct device * dev); -extern int __must_check device_add(struct device * dev); -extern void device_del(struct device * dev); -extern int device_for_each_child(struct device *, void *, - int (*fn)(struct device *, void *)); -extern struct device *device_find_child(struct device *, void *data, - int (*match)(struct device *, void *)); +extern int __must_check device_register(struct device *dev); +extern void device_unregister(struct device *dev); +extern void device_initialize(struct device *dev); +extern int __must_check device_add(struct device *dev); +extern void device_del(struct device *dev); +extern int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *device_find_child(struct device *dev, void *data, + int (*match)(struct device *dev, void *data)); extern int device_rename(struct device *dev, char *new_name); extern int device_move(struct device *dev, struct device *new_parent); @@ -509,8 +520,8 @@ extern int device_move(struct device *dev, struct device *new_parent); * for information on use. */ extern int __must_check device_bind_driver(struct device *dev); -extern void device_release_driver(struct device * dev); -extern int __must_check device_attach(struct device * dev); +extern void device_release_driver(struct device *dev); +extern int __must_check device_attach(struct device *dev); extern int __must_check driver_attach(struct device_driver *drv); extern int __must_check device_reprobe(struct device *dev); @@ -519,8 +530,16 @@ extern int __must_check device_reprobe(struct device *dev); */ extern struct device *device_create(struct class *cls, struct device *parent, dev_t devt, const char *fmt, ...) - __attribute__((format(printf,4,5))); + __attribute__((format(printf, 4, 5))); extern void device_destroy(struct class *cls, dev_t devt); +#ifdef CONFIG_PM_SLEEP +extern void destroy_suspended_device(struct class *cls, dev_t devt); +#else /* !CONFIG_PM_SLEEP */ +static inline void destroy_suspended_device(struct class *cls, dev_t devt) +{ + device_destroy(cls, devt); +} +#endif /* !CONFIG_PM_SLEEP */ /* * Platform "fixup" functions - allow the platform to have their say @@ -528,17 +547,17 @@ extern void device_destroy(struct class *cls, dev_t devt); * know about. */ /* Notify platform of device discovery */ -extern int (*platform_notify)(struct device * dev); +extern int (*platform_notify)(struct device *dev); -extern int (*platform_notify_remove)(struct device * dev); +extern int (*platform_notify_remove)(struct device *dev); /** * get_device - atomically increment the reference count for the device. * */ -extern struct device * get_device(struct device * dev); -extern void put_device(struct device * dev); +extern struct device *get_device(struct device *dev); +extern void put_device(struct device *dev); /* drivers/base/power/shutdown.c */ @@ -547,22 +566,33 @@ extern void device_shutdown(void); /* drivers/base/sys.c */ extern void sysdev_shutdown(void); - -/* drivers/base/firmware.c */ -extern int __must_check firmware_register(struct kset *); -extern void firmware_unregister(struct kset *); - /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(struct device *dev); #define dev_printk(level, dev, format, arg...) \ - printk(level "%s %s: " format , dev_driver_string(dev) , (dev)->bus_id , ## arg) + printk(level "%s %s: " format , dev_driver_string(dev) , \ + (dev)->bus_id , ## arg) + +#define dev_emerg(dev, format, arg...) \ + dev_printk(KERN_EMERG , dev , format , ## arg) +#define dev_alert(dev, format, arg...) \ + dev_printk(KERN_ALERT , dev , format , ## arg) +#define dev_crit(dev, format, arg...) \ + dev_printk(KERN_CRIT , dev , format , ## arg) +#define dev_err(dev, format, arg...) \ + dev_printk(KERN_ERR , dev , format , ## arg) +#define dev_warn(dev, format, arg...) \ + dev_printk(KERN_WARNING , dev , format , ## arg) +#define dev_notice(dev, format, arg...) \ + dev_printk(KERN_NOTICE , dev , format , ## arg) +#define dev_info(dev, format, arg...) \ + dev_printk(KERN_INFO , dev , format , ## arg) #ifdef DEBUG #define dev_dbg(dev, format, arg...) \ dev_printk(KERN_DEBUG , dev , format , ## arg) #else static inline int __attribute__ ((format (printf, 2, 3))) -dev_dbg(struct device * dev, const char * fmt, ...) +dev_dbg(struct device *dev, const char *fmt, ...) { return 0; } @@ -572,21 +602,12 @@ dev_dbg(struct device * dev, const char * fmt, ...) #define dev_vdbg dev_dbg #else static inline int __attribute__ ((format (printf, 2, 3))) -dev_vdbg(struct device * dev, const char * fmt, ...) +dev_vdbg(struct device *dev, const char *fmt, ...) { return 0; } #endif -#define dev_err(dev, format, arg...) \ - dev_printk(KERN_ERR , dev , format , ## arg) -#define dev_info(dev, format, arg...) \ - dev_printk(KERN_INFO , dev , format , ## arg) -#define dev_warn(dev, format, arg...) \ - dev_printk(KERN_WARNING , dev , format , ## arg) -#define dev_notice(dev, format, arg...) \ - dev_printk(KERN_NOTICE , dev , format , ## arg) - /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) diff --git a/include/linux/dlm.h b/include/linux/dlm.h index be9d278761e0..c743fbc769db 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h @@ -19,148 +19,12 @@ * routines and structures to use DLM lockspaces */ -/* - * Lock Modes - */ +/* Lock levels and flags are here */ +#include <linux/dlmconstants.h> -#define DLM_LOCK_IV -1 /* invalid */ -#define DLM_LOCK_NL 0 /* null */ -#define DLM_LOCK_CR 1 /* concurrent read */ -#define DLM_LOCK_CW 2 /* concurrent write */ -#define DLM_LOCK_PR 3 /* protected read */ -#define DLM_LOCK_PW 4 /* protected write */ -#define DLM_LOCK_EX 5 /* exclusive */ - -/* - * Maximum size in bytes of a dlm_lock name - */ #define DLM_RESNAME_MAXLEN 64 -/* - * Flags to dlm_lock - * - * DLM_LKF_NOQUEUE - * - * Do not queue the lock request on the wait queue if it cannot be granted - * immediately. If the lock cannot be granted because of this flag, DLM will - * either return -EAGAIN from the dlm_lock call or will return 0 from - * dlm_lock and -EAGAIN in the lock status block when the AST is executed. - * - * DLM_LKF_CANCEL - * - * Used to cancel a pending lock request or conversion. A converting lock is - * returned to its previously granted mode. - * - * DLM_LKF_CONVERT - * - * Indicates a lock conversion request. For conversions the name and namelen - * are ignored and the lock ID in the LKSB is used to identify the lock. - * - * DLM_LKF_VALBLK - * - * Requests DLM to return the current contents of the lock value block in the - * lock status block. When this flag is set in a lock conversion from PW or EX - * modes, DLM assigns the value specified in the lock status block to the lock - * value block of the lock resource. The LVB is a DLM_LVB_LEN size array - * containing application-specific information. - * - * DLM_LKF_QUECVT - * - * Force a conversion request to be queued, even if it is compatible with - * the granted modes of other locks on the same resource. - * - * DLM_LKF_IVVALBLK - * - * Invalidate the lock value block. - * - * DLM_LKF_CONVDEADLK - * - * Allows the dlm to resolve conversion deadlocks internally by demoting the - * granted mode of a converting lock to NL. The DLM_SBF_DEMOTED flag is - * returned for a conversion that's been effected by this. - * - * DLM_LKF_PERSISTENT - * - * Only relevant to locks originating in userspace. A persistent lock will not - * be removed if the process holding the lock exits. - * - * DLM_LKF_NODLCKWT - * - * Do not cancel the lock if it gets into conversion deadlock. - * Exclude this lock from being monitored due to DLM_LSFL_TIMEWARN. - * - * DLM_LKF_NODLCKBLK - * - * net yet implemented - * - * DLM_LKF_EXPEDITE - * - * Used only with new requests for NL mode locks. Tells the lock manager - * to grant the lock, ignoring other locks in convert and wait queues. - * - * DLM_LKF_NOQUEUEBAST - * - * Send blocking AST's before returning -EAGAIN to the caller. It is only - * used along with the NOQUEUE flag. Blocking AST's are not sent for failed - * NOQUEUE requests otherwise. - * - * DLM_LKF_HEADQUE - * - * Add a lock to the head of the convert or wait queue rather than the tail. - * - * DLM_LKF_NOORDER - * - * Disregard the standard grant order rules and grant a lock as soon as it - * is compatible with other granted locks. - * - * DLM_LKF_ORPHAN - * - * not yet implemented - * - * DLM_LKF_ALTPR - * - * If the requested mode cannot be granted immediately, try to grant the lock - * in PR mode instead. If this alternate mode is granted instead of the - * requested mode, DLM_SBF_ALTMODE is returned in the lksb. - * - * DLM_LKF_ALTCW - * - * The same as ALTPR, but the alternate mode is CW. - * - * DLM_LKF_FORCEUNLOCK - * - * Unlock the lock even if it is converting or waiting or has sublocks. - * Only really for use by the userland device.c code. - * - */ - -#define DLM_LKF_NOQUEUE 0x00000001 -#define DLM_LKF_CANCEL 0x00000002 -#define DLM_LKF_CONVERT 0x00000004 -#define DLM_LKF_VALBLK 0x00000008 -#define DLM_LKF_QUECVT 0x00000010 -#define DLM_LKF_IVVALBLK 0x00000020 -#define DLM_LKF_CONVDEADLK 0x00000040 -#define DLM_LKF_PERSISTENT 0x00000080 -#define DLM_LKF_NODLCKWT 0x00000100 -#define DLM_LKF_NODLCKBLK 0x00000200 -#define DLM_LKF_EXPEDITE 0x00000400 -#define DLM_LKF_NOQUEUEBAST 0x00000800 -#define DLM_LKF_HEADQUE 0x00001000 -#define DLM_LKF_NOORDER 0x00002000 -#define DLM_LKF_ORPHAN 0x00004000 -#define DLM_LKF_ALTPR 0x00008000 -#define DLM_LKF_ALTCW 0x00010000 -#define DLM_LKF_FORCEUNLOCK 0x00020000 -#define DLM_LKF_TIMEOUT 0x00040000 - -/* - * Some return codes that are not in errno.h - */ - -#define DLM_ECANCEL 0x10001 -#define DLM_EUNLOCK 0x10002 typedef void dlm_lockspace_t; diff --git a/include/linux/dlmconstants.h b/include/linux/dlmconstants.h new file mode 100644 index 000000000000..fddb3d3ff321 --- /dev/null +++ b/include/linux/dlmconstants.h @@ -0,0 +1,159 @@ +/****************************************************************************** +******************************************************************************* +** +** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. +** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. +** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. +** +******************************************************************************* +******************************************************************************/ + +#ifndef __DLMCONSTANTS_DOT_H__ +#define __DLMCONSTANTS_DOT_H__ + +/* + * Constants used by DLM interface. + */ + +/* + * Lock Modes + */ + +#define DLM_LOCK_IV (-1) /* invalid */ +#define DLM_LOCK_NL 0 /* null */ +#define DLM_LOCK_CR 1 /* concurrent read */ +#define DLM_LOCK_CW 2 /* concurrent write */ +#define DLM_LOCK_PR 3 /* protected read */ +#define DLM_LOCK_PW 4 /* protected write */ +#define DLM_LOCK_EX 5 /* exclusive */ + + +/* + * Flags to dlm_lock + * + * DLM_LKF_NOQUEUE + * + * Do not queue the lock request on the wait queue if it cannot be granted + * immediately. If the lock cannot be granted because of this flag, DLM will + * either return -EAGAIN from the dlm_lock call or will return 0 from + * dlm_lock and -EAGAIN in the lock status block when the AST is executed. + * + * DLM_LKF_CANCEL + * + * Used to cancel a pending lock request or conversion. A converting lock is + * returned to its previously granted mode. + * + * DLM_LKF_CONVERT + * + * Indicates a lock conversion request. For conversions the name and namelen + * are ignored and the lock ID in the LKSB is used to identify the lock. + * + * DLM_LKF_VALBLK + * + * Requests DLM to return the current contents of the lock value block in the + * lock status block. When this flag is set in a lock conversion from PW or EX + * modes, DLM assigns the value specified in the lock status block to the lock + * value block of the lock resource. The LVB is a DLM_LVB_LEN size array + * containing application-specific information. + * + * DLM_LKF_QUECVT + * + * Force a conversion request to be queued, even if it is compatible with + * the granted modes of other locks on the same resource. + * + * DLM_LKF_IVVALBLK + * + * Invalidate the lock value block. + * + * DLM_LKF_CONVDEADLK + * + * Allows the dlm to resolve conversion deadlocks internally by demoting the + * granted mode of a converting lock to NL. The DLM_SBF_DEMOTED flag is + * returned for a conversion that's been effected by this. + * + * DLM_LKF_PERSISTENT + * + * Only relevant to locks originating in userspace. A persistent lock will not + * be removed if the process holding the lock exits. + * + * DLM_LKF_NODLCKWT + * + * Do not cancel the lock if it gets into conversion deadlock. + * Exclude this lock from being monitored due to DLM_LSFL_TIMEWARN. + * + * DLM_LKF_NODLCKBLK + * + * net yet implemented + * + * DLM_LKF_EXPEDITE + * + * Used only with new requests for NL mode locks. Tells the lock manager + * to grant the lock, ignoring other locks in convert and wait queues. + * + * DLM_LKF_NOQUEUEBAST + * + * Send blocking AST's before returning -EAGAIN to the caller. It is only + * used along with the NOQUEUE flag. Blocking AST's are not sent for failed + * NOQUEUE requests otherwise. + * + * DLM_LKF_HEADQUE + * + * Add a lock to the head of the convert or wait queue rather than the tail. + * + * DLM_LKF_NOORDER + * + * Disregard the standard grant order rules and grant a lock as soon as it + * is compatible with other granted locks. + * + * DLM_LKF_ORPHAN + * + * not yet implemented + * + * DLM_LKF_ALTPR + * + * If the requested mode cannot be granted immediately, try to grant the lock + * in PR mode instead. If this alternate mode is granted instead of the + * requested mode, DLM_SBF_ALTMODE is returned in the lksb. + * + * DLM_LKF_ALTCW + * + * The same as ALTPR, but the alternate mode is CW. + * + * DLM_LKF_FORCEUNLOCK + * + * Unlock the lock even if it is converting or waiting or has sublocks. + * Only really for use by the userland device.c code. + * + */ + +#define DLM_LKF_NOQUEUE 0x00000001 +#define DLM_LKF_CANCEL 0x00000002 +#define DLM_LKF_CONVERT 0x00000004 +#define DLM_LKF_VALBLK 0x00000008 +#define DLM_LKF_QUECVT 0x00000010 +#define DLM_LKF_IVVALBLK 0x00000020 +#define DLM_LKF_CONVDEADLK 0x00000040 +#define DLM_LKF_PERSISTENT 0x00000080 +#define DLM_LKF_NODLCKWT 0x00000100 +#define DLM_LKF_NODLCKBLK 0x00000200 +#define DLM_LKF_EXPEDITE 0x00000400 +#define DLM_LKF_NOQUEUEBAST 0x00000800 +#define DLM_LKF_HEADQUE 0x00001000 +#define DLM_LKF_NOORDER 0x00002000 +#define DLM_LKF_ORPHAN 0x00004000 +#define DLM_LKF_ALTPR 0x00008000 +#define DLM_LKF_ALTCW 0x00010000 +#define DLM_LKF_FORCEUNLOCK 0x00020000 +#define DLM_LKF_TIMEOUT 0x00040000 + +/* + * Some return codes that are not in errno.h + */ + +#define DLM_ECANCEL 0x10001 +#define DLM_EUNLOCK 0x10002 + +#endif /* __DLMCONSTANTS_DOT_H__ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index a3b6035b6c86..55c9a6952f44 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -132,7 +132,7 @@ struct dma_chan { /* sysfs */ int chan_id; - struct class_device class_dev; + struct device dev; struct kref refcount; int slow_ref; @@ -142,6 +142,7 @@ struct dma_chan { struct dma_chan_percpu *local; }; +#define to_dma_chan(p) container_of(p, struct dma_chan, dev) void dma_chan_cleanup(struct kref *kref); diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 00fc7a9c35ec..5b42a659a308 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -78,6 +78,8 @@ extern const struct dmi_device * dmi_find_device(int type, const char *name, extern void dmi_scan_machine(void); extern int dmi_get_year(int field); extern int dmi_name_in_vendors(const char *str); +extern int dmi_available; +extern char *dmi_get_slot(int slot); #else @@ -87,6 +89,8 @@ static inline const struct dmi_device * dmi_find_device(int type, const char *na const struct dmi_device *from) { return NULL; } static inline int dmi_get_year(int year) { return 0; } static inline int dmi_name_in_vendors(const char *s) { return 0; } +#define dmi_available 0 +static inline char *dmi_get_slot(int slot) { return NULL; } #endif diff --git a/include/linux/elf.h b/include/linux/elf.h index 576e83bd6d88..7ceb24d87c1a 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -355,6 +355,7 @@ typedef struct elf64_shdr { #define NT_AUXV 6 #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ +#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ /* Note header in a PT_NOTE section */ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index e831759b2fb5..278e3ef05336 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -76,7 +76,7 @@ typeof(desc) _desc \ __attribute__((aligned(sizeof(Elf##size##_Word)))); \ } _ELFNOTE_PASTE(_note_, unique) \ - __attribute_used__ \ + __used \ __attribute__((section(".note." name), \ aligned(sizeof(Elf##size##_Word)), \ unused)) = { \ diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h index 97dd409d5f4a..1852313fc7c7 100644 --- a/include/linux/ext4_fs.h +++ b/include/linux/ext4_fs.h @@ -20,6 +20,8 @@ #include <linux/blkdev.h> #include <linux/magic.h> +#include <linux/ext4_fs_i.h> + /* * The second extended filesystem constants/structures */ @@ -51,6 +53,50 @@ #define ext4_debug(f, a...) do {} while (0) #endif +#define EXT4_MULTIBLOCK_ALLOCATOR 1 + +/* prefer goal again. length */ +#define EXT4_MB_HINT_MERGE 1 +/* blocks already reserved */ +#define EXT4_MB_HINT_RESERVED 2 +/* metadata is being allocated */ +#define EXT4_MB_HINT_METADATA 4 +/* first blocks in the file */ +#define EXT4_MB_HINT_FIRST 8 +/* search for the best chunk */ +#define EXT4_MB_HINT_BEST 16 +/* data is being allocated */ +#define EXT4_MB_HINT_DATA 32 +/* don't preallocate (for tails) */ +#define EXT4_MB_HINT_NOPREALLOC 64 +/* allocate for locality group */ +#define EXT4_MB_HINT_GROUP_ALLOC 128 +/* allocate goal blocks or none */ +#define EXT4_MB_HINT_GOAL_ONLY 256 +/* goal is meaningful */ +#define EXT4_MB_HINT_TRY_GOAL 512 + +struct ext4_allocation_request { + /* target inode for block we're allocating */ + struct inode *inode; + /* logical block in target inode */ + ext4_lblk_t logical; + /* phys. target (a hint) */ + ext4_fsblk_t goal; + /* the closest logical allocated block to the left */ + ext4_lblk_t lleft; + /* phys. block for ^^^ */ + ext4_fsblk_t pleft; + /* the closest logical allocated block to the right */ + ext4_lblk_t lright; + /* phys. block for ^^^ */ + ext4_fsblk_t pright; + /* how many blocks we want to allocate */ + unsigned long len; + /* flags. see above EXT4_MB_HINT_* */ + unsigned long flags; +}; + /* * Special inodes numbers */ @@ -73,8 +119,8 @@ * Macro-instructions used to manage several block sizes */ #define EXT4_MIN_BLOCK_SIZE 1024 -#define EXT4_MAX_BLOCK_SIZE 4096 -#define EXT4_MIN_BLOCK_LOG_SIZE 10 +#define EXT4_MAX_BLOCK_SIZE 65536 +#define EXT4_MIN_BLOCK_LOG_SIZE 10 #ifdef __KERNEL__ # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) #else @@ -118,6 +164,11 @@ struct ext4_group_desc __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */ __le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */ __le32 bg_inode_table_hi; /* Inodes table block MSB */ + __le16 bg_free_blocks_count_hi;/* Free blocks count MSB */ + __le16 bg_free_inodes_count_hi;/* Free inodes count MSB */ + __le16 bg_used_dirs_count_hi; /* Directories count MSB */ + __le16 bg_itable_unused_hi; /* Unused inodes count MSB */ + __u32 bg_reserved2[3]; }; #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ @@ -178,8 +229,9 @@ struct ext4_group_desc #define EXT4_NOTAIL_FL 0x00008000 /* file tail should not be merged */ #define EXT4_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ -#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ +#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ +#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */ #define EXT4_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ @@ -237,6 +289,7 @@ struct ext4_new_group_data { #endif #define EXT4_IOC_GETRSVSZ _IOR('f', 5, long) #define EXT4_IOC_SETRSVSZ _IOW('f', 6, long) +#define EXT4_IOC_MIGRATE _IO('f', 7) /* * ioctl commands in 32 bit emulation @@ -275,18 +328,18 @@ struct ext4_mount_options { struct ext4_inode { __le16 i_mode; /* File mode */ __le16 i_uid; /* Low 16 bits of Owner Uid */ - __le32 i_size; /* Size in bytes */ + __le32 i_size_lo; /* Size in bytes */ __le32 i_atime; /* Access time */ __le32 i_ctime; /* Inode Change time */ __le32 i_mtime; /* Modification time */ __le32 i_dtime; /* Deletion Time */ __le16 i_gid; /* Low 16 bits of Group Id */ __le16 i_links_count; /* Links count */ - __le32 i_blocks; /* Blocks count */ + __le32 i_blocks_lo; /* Blocks count */ __le32 i_flags; /* File flags */ union { struct { - __u32 l_i_reserved1; + __le32 l_i_version; } linux1; struct { __u32 h_i_translator; @@ -297,12 +350,12 @@ struct ext4_inode { } osd1; /* OS dependent 1 */ __le32 i_block[EXT4_N_BLOCKS];/* Pointers to blocks */ __le32 i_generation; /* File version (for NFS) */ - __le32 i_file_acl; /* File ACL */ - __le32 i_dir_acl; /* Directory ACL */ + __le32 i_file_acl_lo; /* File ACL */ + __le32 i_size_high; __le32 i_obso_faddr; /* Obsoleted fragment address */ union { struct { - __le16 l_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */ + __le16 l_i_blocks_high; /* were l_i_reserved1 */ __le16 l_i_file_acl_high; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ @@ -328,9 +381,9 @@ struct ext4_inode { __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */ __le32 i_crtime; /* File Creation time */ __le32 i_crtime_extra; /* extra FileCreationtime (nsec << 2 | epoch) */ + __le32 i_version_hi; /* high 32 bits for 64-bit version */ }; -#define i_size_high i_dir_acl #define EXT4_EPOCH_BITS 2 #define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1) @@ -402,9 +455,12 @@ do { \ raw_inode->xtime ## _extra); \ } while (0) +#define i_disk_version osd1.linux1.l_i_version + #if defined(__KERNEL__) || defined(__linux__) #define i_reserved1 osd1.linux1.l_i_reserved1 #define i_file_acl_high osd2.linux2.l_i_file_acl_high +#define i_blocks_high osd2.linux2.l_i_blocks_high #define i_uid_low i_uid #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high @@ -461,7 +517,10 @@ do { \ #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ #define EXT4_MOUNT_EXTENTS 0x400000 /* Extents support */ - +#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ +#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ +#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ +#define EXT4_MOUNT_MBALLOC 0x4000000 /* Buddy allocation support */ /* Compatibility, for having both ext2_fs.h and ext4_fs.h included at once */ #ifndef _LINUX_EXT2_FS_H #define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt @@ -481,6 +540,7 @@ do { \ #define ext4_test_bit ext2_test_bit #define ext4_find_first_zero_bit ext2_find_first_zero_bit #define ext4_find_next_zero_bit ext2_find_next_zero_bit +#define ext4_find_next_bit ext2_find_next_bit /* * Maximal mount counts between two filesystem checks @@ -671,6 +731,7 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 #define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 +#define EXT4_FEATURE_RO_COMPAT_HUGE_FILE 0x0008 #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 @@ -682,6 +743,7 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) #define EXT4_FEATURE_INCOMPAT_META_BG 0x0010 #define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */ #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 +#define EXT4_FEATURE_INCOMPAT_MMP 0x0100 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR @@ -696,7 +758,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ - EXT4_FEATURE_RO_COMPAT_BTREE_DIR) + EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\ + EXT4_FEATURE_RO_COMPAT_HUGE_FILE) /* * Default values for user and/or group using reserved blocks @@ -767,6 +830,26 @@ struct ext4_dir_entry_2 { #define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1) #define EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \ ~EXT4_DIR_ROUND) +#define EXT4_MAX_REC_LEN ((1<<16)-1) + +static inline unsigned ext4_rec_len_from_disk(__le16 dlen) +{ + unsigned len = le16_to_cpu(dlen); + + if (len == EXT4_MAX_REC_LEN) + return 1 << 16; + return len; +} + +static inline __le16 ext4_rec_len_to_disk(unsigned len) +{ + if (len == (1 << 16)) + return cpu_to_le16(EXT4_MAX_REC_LEN); + else if (len > (1 << 16)) + BUG(); + return cpu_to_le16(len); +} + /* * Hash Tree Directory indexing * (c) Daniel Phillips, 2001 @@ -810,7 +893,7 @@ struct ext4_iloc { struct buffer_head *bh; unsigned long offset; - unsigned long block_group; + ext4_group_t block_group; }; static inline struct ext4_inode *ext4_raw_inode(struct ext4_iloc *iloc) @@ -835,7 +918,7 @@ struct dir_private_info { /* calculate the first block number of the group */ static inline ext4_fsblk_t -ext4_group_first_block_no(struct super_block *sb, unsigned long group_no) +ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no) { return group_no * (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); @@ -866,21 +949,24 @@ extern unsigned int ext4_block_group(struct super_block *sb, ext4_fsblk_t blocknr); extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb, ext4_fsblk_t blocknr); -extern int ext4_bg_has_super(struct super_block *sb, int group); -extern unsigned long ext4_bg_num_gdb(struct super_block *sb, int group); +extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group); +extern unsigned long ext4_bg_num_gdb(struct super_block *sb, + ext4_group_t group); extern ext4_fsblk_t ext4_new_block (handle_t *handle, struct inode *inode, ext4_fsblk_t goal, int *errp); extern ext4_fsblk_t ext4_new_blocks (handle_t *handle, struct inode *inode, ext4_fsblk_t goal, unsigned long *count, int *errp); +extern ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode, + ext4_fsblk_t goal, unsigned long *count, int *errp); extern void ext4_free_blocks (handle_t *handle, struct inode *inode, - ext4_fsblk_t block, unsigned long count); + ext4_fsblk_t block, unsigned long count, int metadata); extern void ext4_free_blocks_sb (handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count, unsigned long *pdquot_freed_blocks); extern ext4_fsblk_t ext4_count_free_blocks (struct super_block *); extern void ext4_check_blocks_bitmap (struct super_block *); extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, - unsigned int block_group, + ext4_group_t block_group, struct buffer_head ** bh); extern int ext4_should_retry_alloc(struct super_block *sb, int *retries); extern void ext4_init_block_alloc_info(struct inode *); @@ -911,15 +997,32 @@ extern unsigned long ext4_count_dirs (struct super_block *); extern void ext4_check_inodes_bitmap (struct super_block *); extern unsigned long ext4_count_free (struct buffer_head *, unsigned); +/* mballoc.c */ +extern long ext4_mb_stats; +extern long ext4_mb_max_to_scan; +extern int ext4_mb_init(struct super_block *, int); +extern int ext4_mb_release(struct super_block *); +extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, + struct ext4_allocation_request *, int *); +extern int ext4_mb_reserve_blocks(struct super_block *, int); +extern void ext4_mb_discard_inode_preallocations(struct inode *); +extern int __init init_ext4_mballoc(void); +extern void exit_ext4_mballoc(void); +extern void ext4_mb_free_blocks(handle_t *, struct inode *, + unsigned long, unsigned long, int, unsigned long *); + /* inode.c */ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr); -struct buffer_head * ext4_getblk (handle_t *, struct inode *, long, int, int *); -struct buffer_head * ext4_bread (handle_t *, struct inode *, int, int, int *); +struct buffer_head *ext4_getblk(handle_t *, struct inode *, + ext4_lblk_t, int, int *); +struct buffer_head *ext4_bread(handle_t *, struct inode *, + ext4_lblk_t, int, int *); int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, - sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, - int create, int extend_disksize); + ext4_lblk_t iblock, unsigned long maxblocks, + struct buffer_head *bh_result, + int create, int extend_disksize); extern void ext4_read_inode (struct inode *); extern int ext4_write_inode (struct inode *, int); @@ -943,6 +1046,9 @@ extern int ext4_ioctl (struct inode *, struct file *, unsigned int, unsigned long); extern long ext4_compat_ioctl (struct file *, unsigned int, unsigned long); +/* migrate.c */ +extern int ext4_ext_migrate(struct inode *, struct file *, unsigned int, + unsigned long); /* namei.c */ extern int ext4_orphan_add(handle_t *, struct inode *); extern int ext4_orphan_del(handle_t *, struct inode *); @@ -965,6 +1071,12 @@ extern void ext4_abort (struct super_block *, const char *, const char *, ...) extern void ext4_warning (struct super_block *, const char *, const char *, ...) __attribute__ ((format (printf, 3, 4))); extern void ext4_update_dynamic_rev (struct super_block *sb); +extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, + __u32 compat); +extern int ext4_update_rocompat_feature(handle_t *handle, + struct super_block *sb, __u32 rocompat); +extern int ext4_update_incompat_feature(handle_t *handle, + struct super_block *sb, __u32 incompat); extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg); extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, @@ -1017,6 +1129,29 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es, es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32); } +static inline loff_t ext4_isize(struct ext4_inode *raw_inode) +{ + return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | + le32_to_cpu(raw_inode->i_size_lo); +} + +static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) +{ + raw_inode->i_size_lo = cpu_to_le32(i_size); + raw_inode->i_size_high = cpu_to_le32(i_size >> 32); +} + +static inline +struct ext4_group_info *ext4_get_group_info(struct super_block *sb, + ext4_group_t group) +{ + struct ext4_group_info ***grp_info; + long indexv, indexh; + grp_info = EXT4_SB(sb)->s_group_info; + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); + return grp_info[indexv][indexh]; +} #define ext4_std_error(sb, errno) \ @@ -1048,7 +1183,7 @@ extern const struct inode_operations ext4_fast_symlink_inode_operations; extern int ext4_ext_tree_init(handle_t *handle, struct inode *); extern int ext4_ext_writepage_trans_blocks(struct inode *, int); extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, - ext4_fsblk_t iblock, + ext4_lblk_t iblock, unsigned long max_blocks, struct buffer_head *bh_result, int create, int extend_disksize); extern void ext4_ext_truncate(struct inode *, struct page *); @@ -1056,19 +1191,10 @@ extern void ext4_ext_init(struct super_block *); extern void ext4_ext_release(struct super_block *); extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len); -static inline int -ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, - unsigned long max_blocks, struct buffer_head *bh, - int create, int extend_disksize) -{ - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) - return ext4_ext_get_blocks(handle, inode, block, max_blocks, - bh, create, extend_disksize); - return ext4_get_blocks_handle(handle, inode, block, max_blocks, bh, - create, extend_disksize); -} - - +extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, + sector_t block, unsigned long max_blocks, + struct buffer_head *bh, int create, + int extend_disksize); #endif /* __KERNEL__ */ #endif /* _LINUX_EXT4_FS_H */ diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h index d2045a26195d..697da4bce6c5 100644 --- a/include/linux/ext4_fs_extents.h +++ b/include/linux/ext4_fs_extents.h @@ -124,20 +124,6 @@ struct ext4_ext_path { #define EXT4_EXT_CACHE_GAP 1 #define EXT4_EXT_CACHE_EXTENT 2 -/* - * to be called by ext4_ext_walk_space() - * negative retcode - error - * positive retcode - signal for ext4_ext_walk_space(), see below - * callback must return valid extent (passed or newly created) - */ -typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, - struct ext4_ext_cache *, - void *); - -#define EXT_CONTINUE 0 -#define EXT_BREAK 1 -#define EXT_REPEAT 2 - #define EXT_MAX_BLOCK 0xffffffff @@ -226,6 +212,8 @@ static inline int ext4_ext_get_actual_len(struct ext4_extent *ext) (le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN)); } +extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *); +extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t); extern int ext4_extent_tree_init(handle_t *, struct inode *); extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *); extern int ext4_ext_try_to_merge(struct inode *inode, @@ -233,8 +221,11 @@ extern int ext4_ext_try_to_merge(struct inode *inode, struct ext4_extent *); extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *); extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *); -extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *); -extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *); - +extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t, + struct ext4_ext_path *); +extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *, + ext4_lblk_t *, ext4_fsblk_t *); +extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *, + ext4_lblk_t *, ext4_fsblk_t *); #endif /* _LINUX_EXT4_EXTENTS */ diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h index 86ddfe2089f3..d5508d3cf290 100644 --- a/include/linux/ext4_fs_i.h +++ b/include/linux/ext4_fs_i.h @@ -27,6 +27,12 @@ typedef int ext4_grpblk_t; /* data type for filesystem-wide blocks number */ typedef unsigned long long ext4_fsblk_t; +/* data type for file logical block number */ +typedef __u32 ext4_lblk_t; + +/* data type for block group number */ +typedef unsigned long ext4_group_t; + struct ext4_reserve_window { ext4_fsblk_t _rsv_start; /* First byte reserved */ ext4_fsblk_t _rsv_end; /* Last byte reserved or 0 */ @@ -48,7 +54,7 @@ struct ext4_block_alloc_info { * most-recently-allocated block in this file. * We use this for detecting linearly ascending allocation requests. */ - __u32 last_alloc_logical_block; + ext4_lblk_t last_alloc_logical_block; /* * Was i_next_alloc_goal in ext4_inode_info * is the *physical* companion to i_next_alloc_block. @@ -67,7 +73,7 @@ struct ext4_block_alloc_info { */ struct ext4_ext_cache { ext4_fsblk_t ec_start; - __u32 ec_block; + ext4_lblk_t ec_block; __u32 ec_len; /* must be 32bit to return holes */ __u32 ec_type; }; @@ -79,7 +85,6 @@ struct ext4_inode_info { __le32 i_data[15]; /* unconverted */ __u32 i_flags; ext4_fsblk_t i_file_acl; - __u32 i_dir_acl; __u32 i_dtime; /* @@ -89,13 +94,13 @@ struct ext4_inode_info { * place a file's data blocks near its inode block, and new inodes * near to their parent directory's inode. */ - __u32 i_block_group; + ext4_group_t i_block_group; __u32 i_state; /* Dynamic state flags for ext4 */ /* block reservation info */ struct ext4_block_alloc_info *i_block_alloc_info; - __u32 i_dir_start_lookup; + ext4_lblk_t i_dir_start_lookup; #ifdef CONFIG_EXT4DEV_FS_XATTR /* * Extended attributes can be read independently of the main file @@ -134,16 +139,16 @@ struct ext4_inode_info { __u16 i_extra_isize; /* - * truncate_mutex is for serialising ext4_truncate() against + * i_data_sem is for serialising ext4_truncate() against * ext4_getblock(). In the 2.4 ext2 design, great chunks of inode's * data tree are chopped off during truncate. We can't do that in * ext4 because whenever we perform intermediate commits during * truncate, the inode and all the metadata blocks *must* be in a * consistent state which allows truncation of the orphans to restart * during recovery. Hence we must fix the get_block-vs-truncate race - * by other means, so we have truncate_mutex. + * by other means, so we have i_data_sem. */ - struct mutex truncate_mutex; + struct rw_semaphore i_data_sem; struct inode vfs_inode; unsigned long i_ext_generation; @@ -153,6 +158,10 @@ struct ext4_inode_info { * struct timespec i_{a,c,m}time in the generic inode. */ struct timespec i_crtime; + + /* mballoc */ + struct list_head i_prealloc_list; + spinlock_t i_prealloc_lock; }; #endif /* _LINUX_EXT4_FS_I */ diff --git a/include/linux/ext4_fs_sb.h b/include/linux/ext4_fs_sb.h index b40e827cd495..abaae2c8cccf 100644 --- a/include/linux/ext4_fs_sb.h +++ b/include/linux/ext4_fs_sb.h @@ -35,9 +35,10 @@ struct ext4_sb_info { unsigned long s_itb_per_group; /* Number of inode table blocks per group */ unsigned long s_gdb_count; /* Number of group descriptor blocks */ unsigned long s_desc_per_block; /* Number of group descriptors per block */ - unsigned long s_groups_count; /* Number of groups in the fs */ + ext4_group_t s_groups_count; /* Number of groups in the fs */ unsigned long s_overhead_last; /* Last calculated overhead */ unsigned long s_blocks_last; /* Last seen block count */ + loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */ struct buffer_head * s_sbh; /* Buffer containing the super block */ struct ext4_super_block * s_es; /* Pointer to the super block in the buffer */ struct buffer_head ** s_group_desc; @@ -90,6 +91,58 @@ struct ext4_sb_info { unsigned long s_ext_blocks; unsigned long s_ext_extents; #endif + + /* for buddy allocator */ + struct ext4_group_info ***s_group_info; + struct inode *s_buddy_cache; + long s_blocks_reserved; + spinlock_t s_reserve_lock; + struct list_head s_active_transaction; + struct list_head s_closed_transaction; + struct list_head s_committed_transaction; + spinlock_t s_md_lock; + tid_t s_last_transaction; + unsigned short *s_mb_offsets, *s_mb_maxs; + + /* tunables */ + unsigned long s_stripe; + unsigned long s_mb_stream_request; + unsigned long s_mb_max_to_scan; + unsigned long s_mb_min_to_scan; + unsigned long s_mb_stats; + unsigned long s_mb_order2_reqs; + unsigned long s_mb_group_prealloc; + /* where last allocation was done - for stream allocation */ + unsigned long s_mb_last_group; + unsigned long s_mb_last_start; + + /* history to debug policy */ + struct ext4_mb_history *s_mb_history; + int s_mb_history_cur; + int s_mb_history_max; + int s_mb_history_num; + struct proc_dir_entry *s_mb_proc; + spinlock_t s_mb_history_lock; + int s_mb_history_filter; + + /* stats for buddy allocator */ + spinlock_t s_mb_pa_lock; + atomic_t s_bal_reqs; /* number of reqs with len > 1 */ + atomic_t s_bal_success; /* we found long enough chunks */ + atomic_t s_bal_allocated; /* in blocks */ + atomic_t s_bal_ex_scanned; /* total extents scanned */ + atomic_t s_bal_goals; /* goal hits */ + atomic_t s_bal_breaks; /* too long searches */ + atomic_t s_bal_2orders; /* 2^order hits */ + spinlock_t s_bal_lock; + unsigned long s_mb_buddies_generated; + unsigned long long s_mb_generation_time; + atomic_t s_mb_lost_chunks; + atomic_t s_mb_preallocated; + atomic_t s_mb_discarded; + + /* locality groups */ + struct ext4_locality_group *s_locality_groups; }; #endif /* _LINUX_EXT4_FS_SB */ diff --git a/include/linux/fs.h b/include/linux/fs.h index b3ec4a496d64..a516b6716870 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -124,6 +124,7 @@ extern int dir_notify_enable; #define MS_SHARED (1<<20) /* change to shared */ #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ #define MS_ACTIVE (1<<30) #define MS_NOUSER (1<<31) @@ -173,6 +174,7 @@ extern int dir_notify_enable; ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) +#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) @@ -599,7 +601,7 @@ struct inode { uid_t i_uid; gid_t i_gid; dev_t i_rdev; - unsigned long i_version; + u64 i_version; loff_t i_size; #ifdef __NEED_I_SIZE_ORDERED seqcount_t i_size_seqcount; @@ -1394,6 +1396,21 @@ static inline void inode_dec_link_count(struct inode *inode) mark_inode_dirty(inode); } +/** + * inode_inc_iversion - increments i_version + * @inode: inode that need to be updated + * + * Every time the inode is modified, the i_version field will be incremented. + * The filesystem has to be mounted with i_version flag + */ + +static inline void inode_inc_iversion(struct inode *inode) +{ + spin_lock(&inode->i_lock); + inode->i_version++; + spin_unlock(&inode->i_lock); +} + extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); static inline void file_accessed(struct file *file) { @@ -1476,7 +1493,7 @@ extern void drop_collected_mounts(struct vfsmount *); extern int vfs_statfs(struct dentry *, struct kstatfs *); /* /sys/fs */ -extern struct kset fs_subsys; +extern struct kobject *fs_kobj; #define FLOCK_VERIFY_READ 1 #define FLOCK_VERIFY_WRITE 2 diff --git a/include/linux/futex.h b/include/linux/futex.h index 92d420fe03f8..1a15f8e237a7 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -1,8 +1,12 @@ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H -#include <linux/sched.h> +#include <linux/compiler.h> +#include <linux/types.h> +struct inode; +struct mm_struct; +struct task_struct; union ktime; /* Second argument to futex syscall */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a47b8025d399..1dbea0ac5693 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -10,9 +10,19 @@ */ #include <linux/types.h> +#include <linux/kdev_t.h> #ifdef CONFIG_BLOCK +#define kobj_to_dev(k) container_of(k, struct device, kobj) +#define dev_to_disk(device) container_of(device, struct gendisk, dev) +#define dev_to_part(device) container_of(device, struct hd_struct, dev) + +extern struct device_type disk_type; +extern struct device_type part_type; +extern struct kobject *block_depr; +extern struct class block_class; + enum { /* These three have identical behaviour; use the second one if DOS FDISK gets confused about extended/logical partitions starting past cylinder 1023. */ @@ -84,7 +94,7 @@ struct partition { struct hd_struct { sector_t start_sect; sector_t nr_sects; - struct kobject kobj; + struct device dev; struct kobject *holder_dir; unsigned ios[2], sectors[2]; /* READs and WRITEs */ int policy, partno; @@ -117,15 +127,14 @@ struct gendisk { * disks that can't be partitioned. */ char disk_name[32]; /* name of major driver */ struct hd_struct **part; /* [indexed by minor] */ - int part_uevent_suppress; struct block_device_operations *fops; struct request_queue *queue; void *private_data; sector_t capacity; int flags; - struct device *driverfs_dev; - struct kobject kobj; + struct device *driverfs_dev; // FIXME: remove + struct device dev; struct kobject *holder_dir; struct kobject *slave_dir; @@ -143,13 +152,6 @@ struct gendisk { struct work_struct async_notify; }; -/* Structure for sysfs attributes on block devices */ -struct disk_attribute { - struct attribute attr; - ssize_t (*show)(struct gendisk *, char *); - ssize_t (*store)(struct gendisk *, const char *, size_t); -}; - /* * Macros to operate on percpu disk statistics: * @@ -411,7 +413,8 @@ struct unixware_disklabel { #define ADDPART_FLAG_RAID 1 #define ADDPART_FLAG_WHOLEDISK 2 -char *disk_name (struct gendisk *hd, int part, char *buf); +extern dev_t blk_lookup_devt(const char *name); +extern char *disk_name (struct gendisk *hd, int part, char *buf); extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); @@ -423,12 +426,12 @@ extern struct gendisk *alloc_disk(int minors); extern struct kobject *get_disk(struct gendisk *disk); extern void put_disk(struct gendisk *disk); extern void genhd_media_change_notify(struct gendisk *disk); -extern void blk_register_region(dev_t dev, unsigned long range, +extern void blk_register_region(dev_t devt, unsigned long range, struct module *module, struct kobject *(*probe)(dev_t, int *, void *), int (*lock)(dev_t, void *), void *data); -extern void blk_unregister_region(dev_t dev, unsigned long range); +extern void blk_unregister_region(dev_t devt, unsigned long range); static inline struct block_device *bdget_disk(struct gendisk *disk, int index) { @@ -441,6 +444,12 @@ static inline struct block_device *bdget_disk(struct gendisk *disk, int index) static inline void printk_all_partitions(void) { } +static inline dev_t blk_lookup_devt(const char *name) +{ + dev_t devt = MKDEV(0, 0); + return devt; +} + #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 8d302298a161..2961ec788046 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -72,11 +72,7 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) -#else -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) -#endif +#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) #ifdef CONFIG_PREEMPT # define PREEMPT_CHECK_OFFSET 1 diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index 818c6afc1091..ff43f8d6b5b3 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h @@ -44,7 +44,9 @@ /* Bits for HD_ERROR */ #define MARK_ERR 0x01 /* Bad address mark */ +#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */ #define TRK0_ERR 0x02 /* couldn't find track 0 */ +#define EOM_ERR 0x02 /* End Of Media (ATAPI) */ #define ABRT_ERR 0x04 /* Command aborted */ #define MCR_ERR 0x08 /* media change request */ #define ID_ERR 0x10 /* ID field not found */ @@ -52,6 +54,7 @@ #define ECC_ERR 0x40 /* Uncorrectable ECC error */ #define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ #define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ +#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */ /* Bits of HD_NSECTOR */ #define CD 0x01 @@ -70,13 +73,13 @@ #define HDIO_DRIVE_HOB_HDR_SIZE (8 * sizeof(__u8)) #define HDIO_DRIVE_TASK_HDR_SIZE (8 * sizeof(__u8)) -#define IDE_DRIVE_TASK_INVALID -1 #define IDE_DRIVE_TASK_NO_DATA 0 +#ifndef __KERNEL__ +#define IDE_DRIVE_TASK_INVALID -1 #define IDE_DRIVE_TASK_SET_XFER 1 - #define IDE_DRIVE_TASK_IN 2 - #define IDE_DRIVE_TASK_OUT 3 +#endif #define IDE_DRIVE_TASK_RAW_WRITE 4 /* @@ -87,10 +90,10 @@ #ifndef __KERNEL__ #define IDE_TASKFILE_STD_OUT_FLAGS 0xFE #define IDE_HOB_STD_OUT_FLAGS 0x3C -#endif typedef unsigned char task_ioreg_t; typedef unsigned long sata_ioreg_t; +#endif typedef union ide_reg_valid_s { unsigned all : 16; @@ -116,8 +119,8 @@ typedef union ide_reg_valid_s { } ide_reg_valid_t; typedef struct ide_task_request_s { - task_ioreg_t io_ports[8]; - task_ioreg_t hob_ports[8]; + __u8 io_ports[8]; + __u8 hob_ports[8]; /* bytes 6 and 7 are unused */ ide_reg_valid_t out_flags; ide_reg_valid_t in_flags; int data_phase; @@ -133,36 +136,35 @@ typedef struct ide_ioctl_request_s { } ide_ioctl_request_t; struct hd_drive_cmd_hdr { - task_ioreg_t command; - task_ioreg_t sector_number; - task_ioreg_t feature; - task_ioreg_t sector_count; + __u8 command; + __u8 sector_number; + __u8 feature; + __u8 sector_count; }; +#ifndef __KERNEL__ typedef struct hd_drive_task_hdr { - task_ioreg_t data; - task_ioreg_t feature; - task_ioreg_t sector_count; - task_ioreg_t sector_number; - task_ioreg_t low_cylinder; - task_ioreg_t high_cylinder; - task_ioreg_t device_head; - task_ioreg_t command; + __u8 data; + __u8 feature; + __u8 sector_count; + __u8 sector_number; + __u8 low_cylinder; + __u8 high_cylinder; + __u8 device_head; + __u8 command; } task_struct_t; typedef struct hd_drive_hob_hdr { - task_ioreg_t data; - task_ioreg_t feature; - task_ioreg_t sector_count; - task_ioreg_t sector_number; - task_ioreg_t low_cylinder; - task_ioreg_t high_cylinder; - task_ioreg_t device_head; - task_ioreg_t control; + __u8 data; + __u8 feature; + __u8 sector_count; + __u8 sector_number; + __u8 low_cylinder; + __u8 high_cylinder; + __u8 device_head; + __u8 control; } hob_struct_t; - -#define TASKFILE_INVALID 0x7fff -#define TASKFILE_48 0x8000 +#endif #define TASKFILE_NO_DATA 0x0000 @@ -178,12 +180,16 @@ typedef struct hd_drive_hob_hdr { #define TASKFILE_IN_DMAQ 0x0080 #define TASKFILE_OUT_DMAQ 0x0100 +#ifndef __KERNEL__ #define TASKFILE_P_IN 0x0200 #define TASKFILE_P_OUT 0x0400 #define TASKFILE_P_IN_DMA 0x0800 #define TASKFILE_P_OUT_DMA 0x1000 #define TASKFILE_P_IN_DMAQ 0x2000 #define TASKFILE_P_OUT_DMAQ 0x4000 +#define TASKFILE_48 0x8000 +#define TASKFILE_INVALID 0x7fff +#endif /* ATA/ATAPI Commands pre T13 Spec */ #define WIN_NOP 0x00 diff --git a/include/linux/hid.h b/include/linux/hid.h index 6e35b92b1d2c..3902690647b0 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -267,10 +267,10 @@ struct hid_item { #define HID_QUIRK_2WHEEL_MOUSE_HACK_5 0x00000100 #define HID_QUIRK_2WHEEL_MOUSE_HACK_ON 0x00000200 #define HID_QUIRK_MIGHTYMOUSE 0x00000400 -#define HID_QUIRK_POWERBOOK_HAS_FN 0x00000800 -#define HID_QUIRK_POWERBOOK_FN_ON 0x00001000 +#define HID_QUIRK_APPLE_HAS_FN 0x00000800 +#define HID_QUIRK_APPLE_FN_ON 0x00001000 #define HID_QUIRK_INVERT_HWHEEL 0x00002000 -#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00004000 +#define HID_QUIRK_APPLE_ISO_KEYBOARD 0x00004000 #define HID_QUIRK_BAD_RELATIVE_KEYS 0x00008000 #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 #define HID_QUIRK_IGNORE_MOUSE 0x00020000 @@ -281,6 +281,9 @@ struct hid_item { #define HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL 0x00400000 #define HID_QUIRK_LOGITECH_EXPANDED_KEYMAP 0x00800000 #define HID_QUIRK_IGNORE_HIDINPUT 0x01000000 +#define HID_QUIRK_2WHEEL_MOUSE_HACK_B8 0x02000000 +#define HID_QUIRK_HWHEEL_WHEEL_INVERT 0x04000000 +#define HID_QUIRK_MICROSOFT_KEYS 0x08000000 /* * Separate quirks for runtime report descriptor fixup @@ -291,6 +294,8 @@ struct hid_item { #define HID_QUIRK_RDESC_SWAPPED_MIN_MAX 0x00000004 #define HID_QUIRK_RDESC_PETALYNX 0x00000008 #define HID_QUIRK_RDESC_MACBOOK_JIS 0x00000010 +#define HID_QUIRK_RDESC_BUTTON_CONSUMER 0x00000020 +#define HID_QUIRK_RDESC_SAMSUNG_REMOTE 0x00000040 /* * This is the global environment of the parser. This information is @@ -456,6 +461,8 @@ struct hid_device { /* device report descriptor */ void *driver_data; + __s32 delayed_value; /* For A4 Tech mice hwheel quirk */ + /* device-specific function pointers */ int (*hidinput_input_event) (struct input_dev *, unsigned int, unsigned int, int); int (*hid_open) (struct hid_device *); @@ -469,7 +476,7 @@ struct hid_device { /* device report descriptor */ /* handler for raw output data, used by hidraw */ int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t); #ifdef CONFIG_USB_HIDINPUT_POWERBOOK - unsigned long pb_pressed_fn[BITS_TO_LONGS(KEY_CNT)]; + unsigned long apple_pressed_fn[BITS_TO_LONGS(KEY_CNT)]; unsigned long pb_pressed_numlock[BITS_TO_LONGS(KEY_CNT)]; #endif }; @@ -520,6 +527,9 @@ extern void hidinput_disconnect(struct hid_device *); int hid_set_field(struct hid_field *, unsigned, __s32); int hid_input_report(struct hid_device *, int type, u8 *, int, int); int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); +int hidinput_mapping_quirks(struct hid_usage *, struct input_dev *, unsigned long **, int *); +void hidinput_event_quirks(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); +int hidinput_apple_event(struct hid_device *, struct input_dev *, struct hid_usage *, __s32); void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data, int interrupt); void hid_output_report(struct hid_report *report, __u8 *data); void hid_free_device(struct hid_device *device); diff --git a/include/linux/hpet.h b/include/linux/hpet.h index 707f7cb9e795..9cd94bfd07e5 100644 --- a/include/linux/hpet.h +++ b/include/linux/hpet.h @@ -64,7 +64,7 @@ struct hpet { */ #define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) -#define Tn_INI_ROUTE_CAP_SHIFT (32UL) +#define Tn_INT_ROUTE_CAP_SHIFT (32UL) #define Tn_FSB_INT_DELCAP_MASK (0x8000UL) #define Tn_FSB_INT_DELCAP_SHIFT (15) #define Tn_FSB_EN_CNF_MASK (0x4000UL) @@ -115,9 +115,6 @@ static inline void hpet_reserve_timer(struct hpet_data *hd, int timer) } int hpet_alloc(struct hpet_data *); -int hpet_register(struct hpet_task *, int); -int hpet_unregister(struct hpet_task *); -int hpet_control(struct hpet_task *, unsigned int, unsigned long); #endif /* __KERNEL__ */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 7a9398e19704..49067f14fac1 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -115,10 +115,8 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; -#ifdef CONFIG_HIGH_RES_TIMERS enum hrtimer_cb_mode cb_mode; struct list_head cb_entry; -#endif #ifdef CONFIG_TIMER_STATS void *start_site; char start_comm[16]; @@ -194,10 +192,10 @@ struct hrtimer_cpu_base { spinlock_t lock; struct lock_class_key lock_key; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + struct list_head cb_pending; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t expires_next; int hres_active; - struct list_head cb_pending; unsigned long nr_events; #endif }; @@ -217,6 +215,11 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) return timer->base->get_time(); } +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return timer->base->cpu_base->hres_active; +} + /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an @@ -248,6 +251,10 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) return timer->base->softirq_time; } +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return 0; +} #endif extern ktime_t ktime_get(void); @@ -310,6 +317,7 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, /* Soft interrupt function to run the hrtimer queues: */ extern void hrtimer_run_queues(void); +extern void hrtimer_run_pending(void); /* Bootup initialization: */ extern void __init hrtimers_init(void); diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 21ea7610e177..85d11916e9ea 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -33,7 +33,7 @@ struct hwrng { const char *name; int (*init)(struct hwrng *rng); void (*cleanup)(struct hwrng *rng); - int (*data_present)(struct hwrng *rng); + int (*data_present)(struct hwrng *rng, int wait); int (*data_read)(struct hwrng *rng, u32 *data); unsigned long priv; diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index e18017d45758..f922b060158b 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -33,23 +33,13 @@ #define I2C_DRIVERID_MSP3400 1 #define I2C_DRIVERID_TUNER 2 -#define I2C_DRIVERID_VIDEOTEX 3 /* please rename */ #define I2C_DRIVERID_TDA8425 4 /* stereo sound processor */ #define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */ #define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */ #define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ -#define I2C_DRIVERID_SAA5281 9 /* videotext decoder */ -#define I2C_DRIVERID_SAA7112 10 /* video decoder, image scaler */ -#define I2C_DRIVERID_SAA7120 11 /* video encoder */ -#define I2C_DRIVERID_SAA7121 12 /* video encoder */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ -#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */ -#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */ -#define I2C_DRIVERID_PCF8582C 16 /* eeprom */ -#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */ #define I2C_DRIVERID_TEA6300 18 /* audio mixer */ -#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */ #define I2C_DRIVERID_TDA9850 20 /* audio mixer */ #define I2C_DRIVERID_TDA9855 21 /* audio mixer */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ @@ -60,42 +50,19 @@ #define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ #define I2C_DRIVERID_TVMIXER 28 /* Mixer driver for tv cards */ #define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ -#define I2C_DRIVERID_DPL3518 30 /* Dolby decoder chip */ #define I2C_DRIVERID_TDA9873 31 /* TV sound decoder chip */ #define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */ #define I2C_DRIVERID_PIC16C54_PV9 33 /* Audio mux/ir receiver */ - -#define I2C_DRIVERID_SBATT 34 /* Smart Battery Device */ -#define I2C_DRIVERID_SBS 35 /* SB System Manager */ -#define I2C_DRIVERID_VES1893 36 /* VLSI DVB-S decoder */ -#define I2C_DRIVERID_VES1820 37 /* VLSI DVB-C decoder */ -#define I2C_DRIVERID_SAA7113 38 /* video decoder */ -#define I2C_DRIVERID_TDA8444 39 /* octuple 6-bit DAC */ #define I2C_DRIVERID_BT819 40 /* video decoder */ #define I2C_DRIVERID_BT856 41 /* video encoder */ #define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */ -#define I2C_DRIVERID_DRP3510 43 /* ADR decoder (Astra Radio) */ -#define I2C_DRIVERID_SP5055 44 /* Satellite tuner */ -#define I2C_DRIVERID_STV0030 45 /* Multipurpose switch */ -#define I2C_DRIVERID_SAA7108 46 /* video decoder, image scaler */ -#define I2C_DRIVERID_DS1307 47 /* DS1307 real time clock */ #define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */ #define I2C_DRIVERID_SAA7114 49 /* video decoder */ -#define I2C_DRIVERID_ZR36120 50 /* Zoran 36120 video encoder */ -#define I2C_DRIVERID_24LC32A 51 /* Microchip 24LC32A 32k EEPROM */ -#define I2C_DRIVERID_STM41T00 52 /* real time clock */ -#define I2C_DRIVERID_UDA1342 53 /* UDA1342 audio codec */ #define I2C_DRIVERID_ADV7170 54 /* video encoder */ -#define I2C_DRIVERID_MAX1617 56 /* temp sensor */ #define I2C_DRIVERID_SAA7191 57 /* video decoder */ #define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */ -#define I2C_DRIVERID_BT832 59 /* CMOS camera video processor */ -#define I2C_DRIVERID_TDA9887 60 /* TDA988x IF-PLL demodulator */ #define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */ -#define I2C_DRIVERID_TDA7313 62 /* TDA7313 audio processor */ #define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */ -#define I2C_DRIVERID_SAA7114H 64 /* video decoder */ -#define I2C_DRIVERID_DS1374 65 /* DS1374 real time clock */ #define I2C_DRIVERID_TDA9874 66 /* TV sound decoder */ #define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */ #define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */ @@ -114,7 +81,6 @@ #define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */ #define I2C_DRIVERID_X1205 82 /* Xicor/Intersil X1205 RTC */ #define I2C_DRIVERID_PCF8563 83 /* Philips PCF8563 RTC */ -#define I2C_DRIVERID_RS5C372 84 /* Ricoh RS5C372 RTC */ #define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */ #define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */ #define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */ @@ -125,10 +91,10 @@ #define I2C_DRIVERID_LM4857 92 /* LM4857 Audio Amplifier */ #define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */ #define I2C_DRIVERID_CS4270 94 /* Cirrus Logic 4270 audio codec */ +#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */ +#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */ #define I2C_DRIVERID_I2CDEV 900 -#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ -#define I2C_DRIVERID_ALERT 903 /* SMBus Alert Responder Client */ /* IDs -- Use DRIVERIDs 1000-1999 for sensors. These were originally in sensors.h in the lm_sensors package */ @@ -174,24 +140,16 @@ /* --- Bit algorithm adapters */ #define I2C_HW_B_LP 0x010000 /* Parallel port Philips style */ -#define I2C_HW_B_SER 0x010002 /* Serial line interface */ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ -#define I2C_HW_B_WNV 0x010006 /* Winnov Videums */ #define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ #define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ #define I2C_HW_B_G400 0x010009 /* Matrox G400 */ #define I2C_HW_B_I810 0x01000a /* Intel I810 */ #define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ -#define I2C_HW_B_PPORT 0x01000c /* Primitive parallel port adapter */ -#define I2C_HW_B_SAVG 0x01000d /* Savage 4 */ #define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ #define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */ -#define I2C_HW_B_TSUNA 0x010012 /* DEC Tsunami chipset */ -#define I2C_HW_B_OMAHA 0x010014 /* Omaha I2C interface (ARM) */ -#define I2C_HW_B_GUIDE 0x010015 /* Guide bit-basher */ #define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */ -#define I2C_HW_B_IXP4XX 0x010017 /* GPIO on IXP4XX systems */ #define I2C_HW_B_S3VIA 0x010018 /* S3Via ProSavage adapter */ #define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ #define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */ @@ -205,22 +163,11 @@ #define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ /* --- PCF 8584 based algorithms */ -#define I2C_HW_P_LP 0x020000 /* Parallel port interface */ -#define I2C_HW_P_ISA 0x020001 /* generic ISA Bus inteface card */ #define I2C_HW_P_ELEK 0x020002 /* Elektor ISA Bus inteface card */ /* --- PCA 9564 based algorithms */ #define I2C_HW_A_ISA 0x1a0000 /* generic ISA Bus interface card */ -/* --- ACPI Embedded controller algorithms */ -#define I2C_HW_ACPI_EC 0x1f0000 - -/* --- MPC824x PowerPC adapters */ -#define I2C_HW_MPC824X 0x100001 /* Motorola 8240 / 8245 */ - -/* --- MPC8xx PowerPC adapters */ -#define I2C_HW_MPC8XX_EPON 0x110000 /* Eponymous MPC8xx I2C adapter */ - /* --- PowerPC on-chip adapters */ #define I2C_HW_OCP 0x120000 /* IBM on-chip I2C adapter */ @@ -229,7 +176,6 @@ /* --- SGI adapters */ #define I2C_HW_SGI_VINO 0x160000 -#define I2C_HW_SGI_MACE 0x160001 /* --- XSCALE on-chip adapters */ #define I2C_HW_IOP3XX 0x140000 @@ -253,17 +199,10 @@ #define I2C_HW_SMBUS_W9968CF 0x04000d #define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */ #define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ -#define I2C_HW_SMBUS_OV519 0x040010 /* OV519 USB 1.1 webcam IC */ #define I2C_HW_SMBUS_OVFX2 0x040011 /* Cypress/OmniVision FX2 webcam */ #define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */ #define I2C_HW_SMBUS_ALI1563 0x040013 -/* --- ISA pseudo-adapter */ -#define I2C_HW_ISA 0x050000 - -/* --- IPMB adapter */ -#define I2C_HW_IPMB 0x0c0000 - /* --- MCP107 adapter */ #define I2C_HW_MPC107 0x0d0000 diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a100c9f8eb7c..76014f8f3c60 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -140,7 +140,6 @@ struct i2c_driver { int (*command)(struct i2c_client *client,unsigned int cmd, void *arg); struct device_driver driver; - struct list_head list; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) @@ -155,12 +154,11 @@ struct i2c_driver { * generic enough to hide second-sourcing and compatible revisions. * @adapter: manages the bus segment hosting this I2C device * @driver: device's driver, hence pointer to access routines - * @usage_count: counts current number of users of this client * @dev: Driver model device node for the slave. * @irq: indicates the IRQ generated by this device (if any) * @driver_name: Identifies new-style driver used with this device; also * used as the module name for hotplug/coldplug modprobe support. - * @list: list of active/busy clients + * @list: list of active/busy clients (DEPRECATED) * @released: used to synchronize client releases & detaches and references * * An i2c_client identifies a single device (i.e. chip) connected to an @@ -175,16 +173,16 @@ struct i2c_client { char name[I2C_NAME_SIZE]; struct i2c_adapter *adapter; /* the adapter we sit on */ struct i2c_driver *driver; /* and our access routines */ - int usage_count; /* How many accesses currently */ - /* to the client */ struct device dev; /* the device structure */ int irq; /* irq issued by device (or -1) */ char driver_name[KOBJ_NAME_LEN]; - struct list_head list; + struct list_head list; /* DEPRECATED */ struct completion released; }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) +extern struct i2c_client *i2c_verify_client(struct device *dev); + static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) { struct device * const dev = container_of(kobj, struct device, kobj); @@ -261,6 +259,12 @@ i2c_new_probed_device(struct i2c_adapter *adap, struct i2c_board_info *info, unsigned short const *addr_list); +/* For devices that use several addresses, use i2c_new_dummy() to make + * client handles for the extra addresses. + */ +extern struct i2c_client * +i2c_new_dummy(struct i2c_adapter *adap, u16 address, const char *type); + extern void i2c_unregister_device(struct i2c_client *); /* Mainboard arch_initcall() code should register all its I2C devices. @@ -319,8 +323,7 @@ struct i2c_adapter { struct device dev; /* the adapter device */ int nr; - struct list_head clients; - struct list_head list; + struct list_head clients; /* DEPRECATED */ char name[48]; struct completion dev_released; }; @@ -357,10 +360,10 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data) * command line */ struct i2c_client_address_data { - unsigned short *normal_i2c; - unsigned short *probe; - unsigned short *ignore; - unsigned short **forces; + const unsigned short *normal_i2c; + const unsigned short *probe; + const unsigned short *ignore; + const unsigned short * const *forces; }; /* Internal numbers to terminate lists */ @@ -389,11 +392,8 @@ static inline int i2c_add_driver(struct i2c_driver *driver) extern int i2c_attach_client(struct i2c_client *); extern int i2c_detach_client(struct i2c_client *); -/* Should be used to make sure that client-struct is valid and that it - is okay to access the i2c-client. - returns -ENODEV if client has gone in the meantime */ -extern int i2c_use_client(struct i2c_client *); -extern int i2c_release_client(struct i2c_client *); +extern struct i2c_client *i2c_use_client(struct i2c_client *client); +extern void i2c_release_client(struct i2c_client *client); /* call the i2c_client->command() of all attached clients with * the given arguments */ @@ -405,7 +405,7 @@ extern void i2c_clients_command(struct i2c_adapter *adap, * specific address (unless a 'force' matched); */ extern int i2c_probe(struct i2c_adapter *adapter, - struct i2c_client_address_data *address_data, + const struct i2c_client_address_data *address_data, int (*found_proc) (struct i2c_adapter *, int, int)); extern struct i2c_adapter* i2c_get_adapter(int id); @@ -598,104 +598,93 @@ I2C_CLIENT_MODULE_PARM(probe, "List of adapter,address pairs to scan " \ "additionally"); \ I2C_CLIENT_MODULE_PARM(ignore, "List of adapter,address pairs not to " \ "scan"); \ -static struct i2c_client_address_data addr_data = { \ +const static struct i2c_client_address_data addr_data = { \ .normal_i2c = normal_i2c, \ .probe = probe, \ .ignore = ignore, \ .forces = forces, \ } +#define I2C_CLIENT_FORCE_TEXT \ + "List of adapter,address pairs to boldly assume to be present" + /* These are the ones you want to use in your own drivers. Pick the one which matches the number of devices the driver differenciates between. */ -#define I2C_CLIENT_INSMOD \ - I2C_CLIENT_MODULE_PARM(force, \ - "List of adapter,address pairs to boldly assume " \ - "to be present"); \ - static unsigned short *forces[] = { \ - force, \ - NULL \ - }; \ +#define I2C_CLIENT_INSMOD \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ +static const unsigned short * const forces[] = { force, NULL }; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_1(chip1) \ enum chips { any_chip, chip1 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ -static unsigned short *forces[] = { force, force_##chip1, NULL }; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, NULL }; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_2(chip1, chip2) \ enum chips { any_chip, chip1, chip2 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ I2C_CLIENT_MODULE_PARM_FORCE(chip2); \ -static unsigned short *forces[] = { force, force_##chip1, \ - force_##chip2, NULL }; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, force_##chip2, NULL }; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \ enum chips { any_chip, chip1, chip2, chip3 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ I2C_CLIENT_MODULE_PARM_FORCE(chip2); \ I2C_CLIENT_MODULE_PARM_FORCE(chip3); \ -static unsigned short *forces[] = { force, force_##chip1, \ - force_##chip2, force_##chip3, \ - NULL }; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, force_##chip2, force_##chip3, NULL }; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \ enum chips { any_chip, chip1, chip2, chip3, chip4 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ I2C_CLIENT_MODULE_PARM_FORCE(chip2); \ I2C_CLIENT_MODULE_PARM_FORCE(chip3); \ I2C_CLIENT_MODULE_PARM_FORCE(chip4); \ -static unsigned short *forces[] = { force, force_##chip1, \ - force_##chip2, force_##chip3, \ - force_##chip4, NULL}; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, force_##chip2, force_##chip3, \ + force_##chip4, NULL}; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \ enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ I2C_CLIENT_MODULE_PARM_FORCE(chip2); \ I2C_CLIENT_MODULE_PARM_FORCE(chip3); \ I2C_CLIENT_MODULE_PARM_FORCE(chip4); \ I2C_CLIENT_MODULE_PARM_FORCE(chip5); \ -static unsigned short *forces[] = { force, force_##chip1, \ - force_##chip2, force_##chip3, \ - force_##chip4, force_##chip5, \ - NULL }; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, force_##chip2, force_##chip3, \ + force_##chip4, force_##chip5, NULL }; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \ enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ I2C_CLIENT_MODULE_PARM_FORCE(chip2); \ I2C_CLIENT_MODULE_PARM_FORCE(chip3); \ I2C_CLIENT_MODULE_PARM_FORCE(chip4); \ I2C_CLIENT_MODULE_PARM_FORCE(chip5); \ I2C_CLIENT_MODULE_PARM_FORCE(chip6); \ -static unsigned short *forces[] = { force, force_##chip1, \ - force_##chip2, force_##chip3, \ - force_##chip4, force_##chip5, \ - force_##chip6, NULL }; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, force_##chip2, force_##chip3, \ + force_##chip4, force_##chip5, force_##chip6, NULL }; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \ enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ chip7 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ I2C_CLIENT_MODULE_PARM_FORCE(chip2); \ I2C_CLIENT_MODULE_PARM_FORCE(chip3); \ @@ -703,18 +692,16 @@ I2C_CLIENT_MODULE_PARM_FORCE(chip4); \ I2C_CLIENT_MODULE_PARM_FORCE(chip5); \ I2C_CLIENT_MODULE_PARM_FORCE(chip6); \ I2C_CLIENT_MODULE_PARM_FORCE(chip7); \ -static unsigned short *forces[] = { force, force_##chip1, \ - force_##chip2, force_##chip3, \ - force_##chip4, force_##chip5, \ - force_##chip6, force_##chip7, \ - NULL }; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, force_##chip2, force_##chip3, \ + force_##chip4, force_##chip5, force_##chip6, \ + force_##chip7, NULL }; \ I2C_CLIENT_INSMOD_COMMON #define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \ enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ chip7, chip8 }; \ -I2C_CLIENT_MODULE_PARM(force, "List of adapter,address pairs to " \ - "boldly assume to be present"); \ +I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \ I2C_CLIENT_MODULE_PARM_FORCE(chip1); \ I2C_CLIENT_MODULE_PARM_FORCE(chip2); \ I2C_CLIENT_MODULE_PARM_FORCE(chip3); \ @@ -723,11 +710,10 @@ I2C_CLIENT_MODULE_PARM_FORCE(chip5); \ I2C_CLIENT_MODULE_PARM_FORCE(chip6); \ I2C_CLIENT_MODULE_PARM_FORCE(chip7); \ I2C_CLIENT_MODULE_PARM_FORCE(chip8); \ -static unsigned short *forces[] = { force, force_##chip1, \ - force_##chip2, force_##chip3, \ - force_##chip4, force_##chip5, \ - force_##chip6, force_##chip7, \ - force_##chip8, NULL }; \ +static const unsigned short * const forces[] = { force, \ + force_##chip1, force_##chip2, force_##chip3, \ + force_##chip4, force_##chip5, force_##chip6, \ + force_##chip7, force_##chip8, NULL }; \ I2C_CLIENT_INSMOD_COMMON #endif /* __KERNEL__ */ #endif /* _LINUX_I2C_H */ diff --git a/include/asm-arm/arch-omap/tps65010.h b/include/linux/i2c/tps65010.h index b9aa2b3a3909..7021635ed6a0 100644 --- a/include/asm-arm/arch-omap/tps65010.h +++ b/include/linux/i2c/tps65010.h @@ -1,4 +1,4 @@ -/* linux/include/asm-arm/arch-omap/tps65010.h +/* linux/i2c/tps65010.h * * Functions to access TPS65010 power management device. * @@ -25,8 +25,8 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#ifndef __ASM_ARCH_TPS65010_H -#define __ASM_ARCH_TPS65010_H +#ifndef __LINUX_I2C_TPS65010_H +#define __LINUX_I2C_TPS65010_H /* * ---------------------------------------------------------------------------- @@ -152,5 +152,5 @@ extern int tps65010_config_vregs1(unsigned value); */ extern int tps65013_set_low_pwr(unsigned mode); -#endif /* __ASM_ARCH_TPS65010_H */ +#endif /* __LINUX_I2C_TPS65010_H */ diff --git a/include/linux/ide.h b/include/linux/ide.h index 9a6a41e7079f..27cb39de2ae2 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -27,25 +27,10 @@ #include <asm/semaphore.h> #include <asm/mutex.h> -/****************************************************************************** - * IDE driver configuration options (play with these as desired): - * - * REALLY_SLOW_IO can be defined in ide.c and ide-cd.c, if necessary - */ -#define INITIAL_MULT_COUNT 0 /* off=0; on=2,4,8,16,32, etc.. */ - -#ifndef SUPPORT_SLOW_DATA_PORTS /* 1 to support slow data ports */ -#define SUPPORT_SLOW_DATA_PORTS 1 /* 0 to reduce kernel size */ -#endif -#ifndef SUPPORT_VLB_SYNC /* 1 to support weird 32-bit chips */ -#define SUPPORT_VLB_SYNC 1 /* 0 to reduce kernel size */ -#endif -#ifndef OK_TO_RESET_CONTROLLER /* 1 needed for good error recovery */ -#define OK_TO_RESET_CONTROLLER 1 /* 0 for use with AH2372A/B interface */ -#endif - -#ifndef DISABLE_IRQ_NOSYNC -#define DISABLE_IRQ_NOSYNC 0 +#if defined(CRIS) || defined(FRV) +# define SUPPORT_VLB_SYNC 0 +#else +# define SUPPORT_VLB_SYNC 1 #endif /* @@ -55,10 +40,6 @@ #define IDE_NO_IRQ (-1) -/* - * "No user-serviceable parts" beyond this point :) - *****************************************************************************/ - typedef unsigned char byte; /* used everywhere */ /* @@ -103,8 +84,6 @@ typedef unsigned char byte; /* used everywhere */ #define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET #define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET -#define IDE_CONTROL_OFFSET_HOB (7) - #define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET]) #define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET]) #define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET]) @@ -128,7 +107,6 @@ typedef unsigned char byte; /* used everywhere */ #define BAD_W_STAT (BAD_R_STAT | WRERR_STAT) #define BAD_STAT (BAD_R_STAT | DRQ_STAT) #define DRIVE_READY (READY_STAT | SEEK_STAT) -#define DATA_READY (DRQ_STAT) #define BAD_CRC (ABRT_ERR | ICRC_ERR) @@ -219,8 +197,11 @@ typedef struct hw_regs_s { } hw_regs_t; struct hwif_s * ide_find_port(unsigned long); +void ide_init_port_data(struct hwif_s *, unsigned int); +void ide_init_port_hw(struct hwif_s *, hw_regs_t *); -int ide_register_hw(hw_regs_t *, void (*)(struct hwif_s *), int, +struct ide_drive_s; +int ide_register_hw(hw_regs_t *, void (*)(struct ide_drive_s *), struct hwif_s **); void ide_setup_ports( hw_regs_t *hw, @@ -327,47 +308,16 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, typedef union { unsigned all : 8; struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) unsigned set_geometry : 1; unsigned recalibrate : 1; unsigned set_multmode : 1; unsigned set_tune : 1; unsigned serviced : 1; unsigned reserved : 3; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned reserved : 3; - unsigned serviced : 1; - unsigned set_tune : 1; - unsigned set_multmode : 1; - unsigned recalibrate : 1; - unsigned set_geometry : 1; -#else -#error "Please fix <asm/byteorder.h>" -#endif } b; } special_t; /* - * ATA DATA Register Special. - * ATA NSECTOR Count Register(). - * ATAPI Byte Count Register. - */ -typedef union { - unsigned all :16; - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned low :8; /* LSB */ - unsigned high :8; /* MSB */ -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned high :8; /* MSB */ - unsigned low :8; /* LSB */ -#else -#error "Please fix <asm/byteorder.h>" -#endif - } b; -} ata_nsector_t, ata_data_t, atapi_bcount_t; - -/* * ATA-IDE Select Register, aka Device-Head * * head : always zeros here @@ -398,131 +348,6 @@ typedef union { } select_t, ata_select_t; /* - * The ATA-IDE Status Register. - * The ATAPI Status Register. - * - * check : Error occurred - * idx : Index Error - * corr : Correctable error occurred - * drq : Data is request by the device - * dsc : Disk Seek Complete : ata - * : Media access command finished : atapi - * df : Device Fault : ata - * : Reserved : atapi - * drdy : Ready, Command Mode Capable : ata - * : Ignored for ATAPI commands : atapi - * bsy : Disk is Busy - * : The device has access to the command block - */ -typedef union { - unsigned all :8; - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned check :1; - unsigned idx :1; - unsigned corr :1; - unsigned drq :1; - unsigned dsc :1; - unsigned df :1; - unsigned drdy :1; - unsigned bsy :1; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned bsy :1; - unsigned drdy :1; - unsigned df :1; - unsigned dsc :1; - unsigned drq :1; - unsigned corr :1; - unsigned idx :1; - unsigned check :1; -#else -#error "Please fix <asm/byteorder.h>" -#endif - } b; -} ata_status_t, atapi_status_t; - -/* - * ATAPI Feature Register - * - * dma : Using DMA or PIO - * reserved321 : Reserved - * reserved654 : Reserved (Tag Type) - * reserved7 : Reserved - */ -typedef union { - unsigned all :8; - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned dma :1; - unsigned reserved321 :3; - unsigned reserved654 :3; - unsigned reserved7 :1; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned reserved7 :1; - unsigned reserved654 :3; - unsigned reserved321 :3; - unsigned dma :1; -#else -#error "Please fix <asm/byteorder.h>" -#endif - } b; -} atapi_feature_t; - -/* - * ATAPI Interrupt Reason Register. - * - * cod : Information transferred is command (1) or data (0) - * io : The device requests us to read (1) or write (0) - * reserved : Reserved - */ -typedef union { - unsigned all :8; - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned cod :1; - unsigned io :1; - unsigned reserved :6; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned reserved :6; - unsigned io :1; - unsigned cod :1; -#else -#error "Please fix <asm/byteorder.h>" -#endif - } b; -} atapi_ireason_t; - -/* - * The ATAPI error register. - * - * ili : Illegal Length Indication - * eom : End Of Media Detected - * abrt : Aborted command - As defined by ATA - * mcr : Media Change Requested - As defined by ATA - * sense_key : Sense key of the last failed packet command - */ -typedef union { - unsigned all :8; - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned ili :1; - unsigned eom :1; - unsigned abrt :1; - unsigned mcr :1; - unsigned sense_key :4; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned sense_key :4; - unsigned mcr :1; - unsigned abrt :1; - unsigned eom :1; - unsigned ili :1; -#else -#error "Please fix <asm/byteorder.h>" -#endif - } b; -} atapi_error_t; - -/* * Status returned from various ide_ functions */ typedef enum { @@ -568,7 +393,6 @@ typedef struct ide_drive_s { u8 state; /* retry state */ u8 waiting_for_dma; /* dma currently in progress */ u8 unmask; /* okay to unmask other irqs */ - u8 bswap; /* byte swap data */ u8 noflush; /* don't attempt flushes */ u8 dsc_overlap; /* DSC overlap */ u8 nice1; /* give potential excess bandwidth */ @@ -701,36 +525,29 @@ typedef struct hwif_s { void (*pre_reset)(ide_drive_t *); /* routine to reset controller after a disk reset */ void (*resetproc)(ide_drive_t *); - /* special interrupt handling for shared pci interrupts */ - void (*intrproc)(ide_drive_t *); /* special host masking for drive selection */ void (*maskproc)(ide_drive_t *, int); /* check host's drive quirk list */ - int (*quirkproc)(ide_drive_t *); + void (*quirkproc)(ide_drive_t *); /* driver soft-power interface */ int (*busproc)(ide_drive_t *, int); #endif u8 (*mdma_filter)(ide_drive_t *); u8 (*udma_filter)(ide_drive_t *); - void (*fixup)(struct hwif_s *); - void (*ata_input_data)(ide_drive_t *, void *, u32); void (*ata_output_data)(ide_drive_t *, void *, u32); void (*atapi_input_bytes)(ide_drive_t *, void *, u32); void (*atapi_output_bytes)(ide_drive_t *, void *, u32); + void (*dma_host_set)(ide_drive_t *, int); int (*dma_setup)(ide_drive_t *); void (*dma_exec_cmd)(ide_drive_t *, u8); void (*dma_start)(ide_drive_t *); int (*ide_dma_end)(ide_drive_t *drive); - int (*ide_dma_on)(ide_drive_t *drive); - void (*dma_off_quietly)(ide_drive_t *drive); int (*ide_dma_test_irq)(ide_drive_t *drive); void (*ide_dma_clear_irq)(ide_drive_t *drive); - void (*dma_host_on)(ide_drive_t *drive); - void (*dma_host_off)(ide_drive_t *drive); void (*dma_lost_irq)(ide_drive_t *drive); void (*dma_timeout)(ide_drive_t *drive); @@ -766,7 +583,6 @@ typedef struct hwif_s { int rqsize; /* max sectors per request */ int irq; /* our irq number */ - unsigned long dma_master; /* reference base addr dmabase */ unsigned long dma_base; /* base addr for dma ports */ unsigned long dma_command; /* dma command register */ unsigned long dma_vendor1; /* dma vendor 1 register */ @@ -806,7 +622,6 @@ typedef struct hwif_s { /* * internal ide interrupt handler type */ -typedef ide_startstop_t (ide_pre_handler_t)(ide_drive_t *, struct request *); typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); typedef int (ide_expiry_t)(ide_drive_t *); @@ -1020,7 +835,8 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry); -extern void ide_execute_command(ide_drive_t *, task_ioreg_t cmd, ide_handler_t *, unsigned int, ide_expiry_t *); +void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int, + ide_expiry_t *); ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); @@ -1054,60 +870,126 @@ extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t); extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); -/* - * Issue ATA command and wait for completion. - * Use for implementing commands in kernel - * - * (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf) - */ -extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *); +enum { + IDE_TFLAG_LBA48 = (1 << 0), + IDE_TFLAG_NO_SELECT_MASK = (1 << 1), + IDE_TFLAG_FLAGGED = (1 << 2), + IDE_TFLAG_OUT_DATA = (1 << 3), + IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), + IDE_TFLAG_OUT_HOB_NSECT = (1 << 5), + IDE_TFLAG_OUT_HOB_LBAL = (1 << 6), + IDE_TFLAG_OUT_HOB_LBAM = (1 << 7), + IDE_TFLAG_OUT_HOB_LBAH = (1 << 8), + IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE | + IDE_TFLAG_OUT_HOB_NSECT | + IDE_TFLAG_OUT_HOB_LBAL | + IDE_TFLAG_OUT_HOB_LBAM | + IDE_TFLAG_OUT_HOB_LBAH, + IDE_TFLAG_OUT_FEATURE = (1 << 9), + IDE_TFLAG_OUT_NSECT = (1 << 10), + IDE_TFLAG_OUT_LBAL = (1 << 11), + IDE_TFLAG_OUT_LBAM = (1 << 12), + IDE_TFLAG_OUT_LBAH = (1 << 13), + IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE | + IDE_TFLAG_OUT_NSECT | + IDE_TFLAG_OUT_LBAL | + IDE_TFLAG_OUT_LBAM | + IDE_TFLAG_OUT_LBAH, + IDE_TFLAG_OUT_DEVICE = (1 << 14), + IDE_TFLAG_WRITE = (1 << 15), + IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16), + IDE_TFLAG_IN_DATA = (1 << 17), + IDE_TFLAG_CUSTOM_HANDLER = (1 << 18), + IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19), + IDE_TFLAG_IN_HOB_FEATURE = (1 << 20), + IDE_TFLAG_IN_HOB_NSECT = (1 << 21), + IDE_TFLAG_IN_HOB_LBAL = (1 << 22), + IDE_TFLAG_IN_HOB_LBAM = (1 << 23), + IDE_TFLAG_IN_HOB_LBAH = (1 << 24), + IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL | + IDE_TFLAG_IN_HOB_LBAM | + IDE_TFLAG_IN_HOB_LBAH, + IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | + IDE_TFLAG_IN_HOB_NSECT | + IDE_TFLAG_IN_HOB_LBA, + IDE_TFLAG_IN_NSECT = (1 << 25), + IDE_TFLAG_IN_LBAL = (1 << 26), + IDE_TFLAG_IN_LBAM = (1 << 27), + IDE_TFLAG_IN_LBAH = (1 << 28), + IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL | + IDE_TFLAG_IN_LBAM | + IDE_TFLAG_IN_LBAH, + IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT | + IDE_TFLAG_IN_LBA, + IDE_TFLAG_IN_DEVICE = (1 << 29), + IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB | + IDE_TFLAG_IN_HOB, + IDE_TFLAG_TF = IDE_TFLAG_OUT_TF | + IDE_TFLAG_IN_TF, + IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE | + IDE_TFLAG_IN_DEVICE, + /* force 16-bit I/O operations */ + IDE_TFLAG_IO_16BIT = (1 << 30), +}; + +struct ide_taskfile { + u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */ + + u8 hob_feature; /* 1-5: additional data to support LBA48 */ + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + + u8 data; /* 6: low data byte (for TASKFILE IOCTL) */ + + union { /*  7: */ + u8 error; /* read: error */ + u8 feature; /* write: feature */ + }; + + u8 nsect; /* 8: number of sectors */ + u8 lbal; /* 9: LBA low */ + u8 lbam; /* 10: LBA mid */ + u8 lbah; /* 11: LBA high */ + + u8 device; /* 12: device select */ + + union { /* 13: */ + u8 status; /*  read: status  */ + u8 command; /* write: command */ + }; +}; typedef struct ide_task_s { -/* - * struct hd_drive_task_hdr tf; - * task_struct_t tf; - * struct hd_drive_hob_hdr hobf; - * hob_struct_t hobf; - */ - task_ioreg_t tfRegister[8]; - task_ioreg_t hobRegister[8]; - ide_reg_valid_t tf_out_flags; - ide_reg_valid_t tf_in_flags; + union { + struct ide_taskfile tf; + u8 tf_array[14]; + }; + u32 tf_flags; int data_phase; - int command_type; - ide_pre_handler_t *prehandler; - ide_handler_t *handler; struct request *rq; /* copy of request */ void *special; /* valid_t generally */ } ide_task_t; -extern u32 ide_read_24(ide_drive_t *); +void ide_tf_load(ide_drive_t *, ide_task_t *); +void ide_tf_read(ide_drive_t *, ide_task_t *); extern void SELECT_DRIVE(ide_drive_t *); -extern void SELECT_INTERRUPT(ide_drive_t *); extern void SELECT_MASK(ide_drive_t *, int); -extern void QUIRK_LIST(ide_drive_t *); extern int drive_is_ready(ide_drive_t *); -/* - * taskfile io for disks for now...and builds request from ide_ioctl - */ -extern ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); +void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); -/* - * Special Flagged Register Validation Caller - */ -extern ide_startstop_t flagged_taskfile(ide_drive_t *, ide_task_t *); +ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); + +void task_end_request(ide_drive_t *, struct request *, u8); -extern ide_startstop_t set_multmode_intr(ide_drive_t *); -extern ide_startstop_t set_geometry_intr(ide_drive_t *); -extern ide_startstop_t recal_intr(ide_drive_t *); -extern ide_startstop_t task_no_data_intr(ide_drive_t *); -extern ide_startstop_t task_in_intr(ide_drive_t *); -extern ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *); +u8 wait_drive_not_busy(ide_drive_t *); -extern int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *); +int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16); +int ide_no_data_taskfile(ide_drive_t *, ide_task_t *); int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); @@ -1133,10 +1015,9 @@ extern void do_ide_request(struct request_queue *); void ide_init_disk(struct gendisk *, ide_drive_t *); -extern int ideprobe_init(void); - #ifdef CONFIG_IDEPCI_PCIBUS_ORDER -extern void ide_scan_pcibus(int scan_direction) __init; +extern int ide_scan_direction; +int __init ide_scan_pcibus(void); extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name); #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME) #else @@ -1212,6 +1093,9 @@ enum { IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ IDE_HFLAG_UNMASK_IRQS = (1 << 25), + IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), + /* host is CY82C693 */ + IDE_HFLAG_CY82C693 = (1 << 27), }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1226,10 +1110,9 @@ struct ide_port_info { void (*init_iops)(ide_hwif_t *); void (*init_hwif)(ide_hwif_t *); void (*init_dma)(ide_hwif_t *, unsigned long); - void (*fixup)(ide_hwif_t *); ide_pci_enablebit_t enablebits[2]; hwif_chipset_t chipset; - unsigned int extra; + u8 extra; u32 host_flags; u8 pio_mask; u8 swdma_mask; @@ -1264,7 +1147,9 @@ static inline u8 ide_max_dma_mode(ide_drive_t *drive) return ide_find_dma_mode(drive, XFER_UDMA_6); } +void ide_dma_off_quietly(ide_drive_t *); void ide_dma_off(ide_drive_t *); +void ide_dma_on(ide_drive_t *); int ide_set_dma(ide_drive_t *); ide_startstop_t ide_dma_intr(ide_drive_t *); @@ -1275,10 +1160,7 @@ extern void ide_destroy_dmatable(ide_drive_t *); extern int ide_release_dma(ide_hwif_t *); extern void ide_setup_dma(ide_hwif_t *, unsigned long, unsigned int); -void ide_dma_host_off(ide_drive_t *); -void ide_dma_off_quietly(ide_drive_t *); -void ide_dma_host_on(ide_drive_t *); -extern int __ide_dma_on(ide_drive_t *); +void ide_dma_host_set(ide_drive_t *, int); extern int ide_dma_setup(ide_drive_t *); extern void ide_dma_start(ide_drive_t *); extern int __ide_dma_end(ide_drive_t *); @@ -1290,7 +1172,9 @@ extern void ide_dma_timeout(ide_drive_t *); static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; } static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; } static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } +static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; } static inline void ide_dma_off(ide_drive_t *drive) { ; } +static inline void ide_dma_on(ide_drive_t *drive) { ; } static inline void ide_dma_verbose(ide_drive_t *drive) { ; } static inline int ide_set_dma(ide_drive_t *drive) { return 1; } #endif /* CONFIG_BLK_DEV_IDEDMA */ @@ -1320,8 +1204,9 @@ extern void ide_unregister (unsigned int index); void ide_register_region(struct gendisk *); void ide_unregister_region(struct gendisk *); -void ide_undecoded_slave(ide_hwif_t *); +void ide_undecoded_slave(ide_drive_t *); +int ide_device_add_all(u8 *idx); int ide_device_add(u8 idx[4]); static inline void *ide_get_hwifdata (ide_hwif_t * hwif) @@ -1356,6 +1241,7 @@ static inline int ide_dev_is_sata(struct hd_driveid *id) return 0; } +u64 ide_get_lba_addr(struct ide_taskfile *, int); u8 ide_dump_status(ide_drive_t *, const char *, u8); typedef struct ide_pio_timings_s { @@ -1418,4 +1304,9 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) return &hwif->drives[(drive->dn ^ 1) & 1]; } +static inline void ide_set_irq(ide_drive_t *drive, int on) +{ + drive->hwif->OUTB(drive->ctl | (on ? 0 : 2), IDE_CONTROL_REG); +} + #endif /* _IDE_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 30621c27159f..5de6d911cdf7 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -54,6 +54,8 @@ #define IEEE80211_STYPE_ACTION 0x00D0 /* control */ +#define IEEE80211_STYPE_BACK_REQ 0x0080 +#define IEEE80211_STYPE_BACK 0x0090 #define IEEE80211_STYPE_PSPOLL 0x00A0 #define IEEE80211_STYPE_RTS 0x00B0 #define IEEE80211_STYPE_CTS 0x00C0 @@ -81,18 +83,18 @@ /* miscellaneous IEEE 802.11 constants */ -#define IEEE80211_MAX_FRAG_THRESHOLD 2346 -#define IEEE80211_MAX_RTS_THRESHOLD 2347 +#define IEEE80211_MAX_FRAG_THRESHOLD 2352 +#define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 #define IEEE80211_MAX_TIM_LEN 251 -#define IEEE80211_MAX_DATA_LEN 2304 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section 6.2.1.1.2. - The figure in section 7.1.2 suggests a body size of up to 2312 - bytes is allowed, which is a bit confusing, I suspect this - represents the 2304 bytes of real data, plus a possible 8 bytes of - WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */ + 802.11e clarifies the figure in section 7.1.2. The frame body is + up to 2304 octets long (maximum MSDU size) plus any crypt overhead. */ +#define IEEE80211_MAX_DATA_LEN 2304 +/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ +#define IEEE80211_MAX_FRAME_LEN 2352 #define IEEE80211_MAX_SSID_LEN 32 @@ -185,6 +187,25 @@ struct ieee80211_mgmt { u8 new_chan; u8 switch_count; } __attribute__((packed)) chan_switch; + struct{ + u8 action_code; + u8 dialog_token; + __le16 capab; + __le16 timeout; + __le16 start_seq_num; + } __attribute__((packed)) addba_req; + struct{ + u8 action_code; + u8 dialog_token; + __le16 status; + __le16 capab; + __le16 timeout; + } __attribute__((packed)) addba_resp; + struct{ + u8 action_code; + __le16 params; + __le16 reason_code; + } __attribute__((packed)) delba; } u; } __attribute__ ((packed)) action; } u; @@ -205,6 +226,66 @@ struct ieee80211_cts { u8 ra[6]; } __attribute__ ((packed)); +/** + * struct ieee80211_bar - HT Block Ack Request + * + * This structure refers to "HT BlockAckReq" as + * described in 802.11n draft section 7.2.1.7.1 + */ +struct ieee80211_bar { + __le16 frame_control; + __le16 duration; + __u8 ra[6]; + __u8 ta[6]; + __le16 control; + __le16 start_seq_num; +} __attribute__((packed)); + +/** + * struct ieee80211_ht_cap - HT capabilities + * + * This structure refers to "HT capabilities element" as + * described in 802.11n draft section 7.3.2.52 + */ +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + u8 supp_mcs_set[16]; + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __attribute__ ((packed)); + +/** + * struct ieee80211_ht_cap - HT additional information + * + * This structure refers to "HT information element" as + * described in 802.11n draft section 7.3.2.53 + */ +struct ieee80211_ht_addt_info { + u8 control_chan; + u8 ht_param; + __le16 operation_mode; + __le16 stbc_param; + u8 basic_set[16]; +} __attribute__ ((packed)); + +/* 802.11n HT capabilities masks */ +#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002 +#define IEEE80211_HT_CAP_MIMO_PS 0x000C +#define IEEE80211_HT_CAP_GRN_FLD 0x0010 +#define IEEE80211_HT_CAP_SGI_20 0x0020 +#define IEEE80211_HT_CAP_SGI_40 0x0040 +#define IEEE80211_HT_CAP_DELAY_BA 0x0400 +#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 +#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C +/* 802.11n HT IE masks */ +#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03 +#define IEEE80211_HT_IE_CHA_WIDTH 0x04 +#define IEEE80211_HT_IE_HT_PROTECTION 0x0003 +#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 +#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 @@ -271,6 +352,18 @@ enum ieee80211_statuscode { WLAN_STATUS_UNSUPP_RSN_VERSION = 44, WLAN_STATUS_INVALID_RSN_IE_CAP = 45, WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, + /* 802.11e */ + WLAN_STATUS_UNSPECIFIED_QOS = 32, + WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33, + WLAN_STATUS_ASSOC_DENIED_LOWACK = 34, + WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35, + WLAN_STATUS_REQUEST_DECLINED = 37, + WLAN_STATUS_INVALID_QOS_PARAM = 38, + WLAN_STATUS_CHANGE_TSPEC = 39, + WLAN_STATUS_WAIT_TS_DELAY = 47, + WLAN_STATUS_NO_DIRECT_LINK = 48, + WLAN_STATUS_STA_NOT_PRESENT = 49, + WLAN_STATUS_STA_NOT_QSTA = 50, }; @@ -301,6 +394,16 @@ enum ieee80211_reasoncode { WLAN_REASON_INVALID_RSN_IE_CAP = 22, WLAN_REASON_IEEE8021X_FAILED = 23, WLAN_REASON_CIPHER_SUITE_REJECTED = 24, + /* 802.11e */ + WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32, + WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33, + WLAN_REASON_DISASSOC_LOW_ACK = 34, + WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35, + WLAN_REASON_QSTA_LEAVE_QBSS = 36, + WLAN_REASON_QSTA_NOT_USE = 37, + WLAN_REASON_QSTA_REQUIRE_SETUP = 38, + WLAN_REASON_QSTA_TIMEOUT = 39, + WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, }; @@ -319,6 +422,15 @@ enum ieee80211_eid { WLAN_EID_HP_PARAMS = 8, WLAN_EID_HP_TABLE = 9, WLAN_EID_REQUEST = 10, + /* 802.11e */ + WLAN_EID_QBSS_LOAD = 11, + WLAN_EID_EDCA_PARAM_SET = 12, + WLAN_EID_TSPEC = 13, + WLAN_EID_TCLAS = 14, + WLAN_EID_SCHEDULE = 15, + WLAN_EID_TS_DELAY = 43, + WLAN_EID_TCLAS_PROCESSING = 44, + WLAN_EID_QOS_CAPA = 46, /* 802.11h */ WLAN_EID_PWR_CONSTRAINT = 32, WLAN_EID_PWR_CAPABILITY = 33, @@ -333,6 +445,9 @@ enum ieee80211_eid { /* 802.11g */ WLAN_EID_ERP_INFO = 42, WLAN_EID_EXT_SUPP_RATES = 50, + /* 802.11n */ + WLAN_EID_HT_CAPABILITY = 45, + WLAN_EID_HT_EXTRA_INFO = 61, /* 802.11i */ WLAN_EID_RSN = 48, WLAN_EID_WPA = 221, @@ -341,6 +456,32 @@ enum ieee80211_eid { WLAN_EID_QOS_PARAMETER = 222 }; +/* Action category code */ +enum ieee80211_category { + WLAN_CATEGORY_SPECTRUM_MGMT = 0, + WLAN_CATEGORY_QOS = 1, + WLAN_CATEGORY_DLS = 2, + WLAN_CATEGORY_BACK = 3, + WLAN_CATEGORY_WMM = 17, +}; + +/* BACK action code */ +enum ieee80211_back_actioncode { + WLAN_ACTION_ADDBA_REQ = 0, + WLAN_ACTION_ADDBA_RESP = 1, + WLAN_ACTION_DELBA = 2, +}; + +/* BACK (block-ack) parties */ +enum ieee80211_back_parties { + WLAN_BACK_RECIPIENT = 0, + WLAN_BACK_INITIATOR = 1, + WLAN_BACK_TIMER = 2, +}; + +/* A-MSDU 802.11n */ +#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080 + /* cipher suite selectors */ #define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 #define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 diff --git a/include/linux/if.h b/include/linux/if.h index 32bf419351f1..5c9d1fa93fef 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -50,7 +50,9 @@ #define IFF_LOWER_UP 0x10000 /* driver signals L1 up */ #define IFF_DORMANT 0x20000 /* driver signals dormant */ -#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|\ +#define IFF_ECHO 0x40000 /* echo sent packets */ + +#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) /* Private (from user) interface flags (netdevice->priv_flags). */ @@ -61,6 +63,7 @@ #define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */ #define IFF_BONDING 0x20 /* bonding master or slave */ #define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ +#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_addrlabel.h b/include/linux/if_addrlabel.h new file mode 100644 index 000000000000..9fe79c95dd28 --- /dev/null +++ b/include/linux/if_addrlabel.h @@ -0,0 +1,32 @@ +/* + * if_addrlabel.h - netlink interface for address labels + * + * Copyright (C)2007 USAGI/WIDE Project, All Rights Reserved. + * + * Authors: + * YOSHIFUJI Hideaki @ USAGI/WIDE <yoshfuji@linux-ipv6.org> + */ + +#ifndef __LINUX_IF_ADDRLABEL_H +#define __LINUX_IF_ADDRLABEL_H + +struct ifaddrlblmsg +{ + __u8 ifal_family; /* Address family */ + __u8 __ifal_reserved; /* Reserved */ + __u8 ifal_prefixlen; /* Prefix length */ + __u8 ifal_flags; /* Flags */ + __u32 ifal_index; /* Link index */ + __u32 ifal_seq; /* sequence number */ +}; + +enum +{ + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX +}; + +#define IFAL_MAX (__IFAL_MAX - 1) + +#endif diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index ed7b93c3083a..296e8e86e91d 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -52,6 +52,7 @@ #define ARPHRD_ROSE 270 #define ARPHRD_X25 271 /* CCITT X.25 */ #define ARPHRD_HWX25 272 /* Boards with X.25 in firmware */ +#define ARPHRD_CAN 280 /* Controller Area Network */ #define ARPHRD_PPP 512 #define ARPHRD_CISCO 513 /* Cisco HDLC */ #define ARPHRD_HDLC ARPHRD_CISCO diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 5f9297793661..e157c1399b61 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -90,6 +90,7 @@ #define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/ #define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */ #define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */ +#define ETH_P_CAN 0x000C /* Controller Area Network */ #define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/ #define ETH_P_TR_802_2 0x0011 /* 802.2 frames */ #define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */ @@ -123,12 +124,15 @@ int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); extern struct ctl_table ether_table[]; #endif +extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); + /* * Display a 6 byte device address (MAC) in a readable format. */ +extern char *print_mac(char *buf, const unsigned char *addr); #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" -extern char *print_mac(char *buf, const u8 *addr); -#define DECLARE_MAC_BUF(var) char var[18] __maybe_unused +#define MAC_BUF_SIZE 18 +#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] __maybe_unused #endif diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index f272a80caa3e..5c34240de746 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -137,7 +137,7 @@ struct frhdr unsigned char NLPID; unsigned char OUI[3]; - unsigned short PID; + __be16 PID; #define IP_NLPID pad } __attribute__((packed)); diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h deleted file mode 100644 index 3b1b7ba19825..000000000000 --- a/include/linux/if_shaper.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef __LINUX_SHAPER_H -#define __LINUX_SHAPER_H - -#ifdef __KERNEL__ - -#define SHAPER_QLEN 10 -/* - * This is a bit speed dependent (read it shouldn't be a constant!) - * - * 5 is about right for 28.8 upwards. Below that double for every - * halving of speed or so. - ie about 20 for 9600 baud. - */ -#define SHAPER_LATENCY (5*HZ) -#define SHAPER_MAXSLIP 2 -#define SHAPER_BURST (HZ/50) /* Good for >128K then */ - -struct shaper -{ - struct sk_buff_head sendq; - __u32 bytespertick; - __u32 bitspersec; - __u32 shapelatency; - __u32 shapeclock; - unsigned long recovery; /* Time we can next clock a packet out on - an empty queue */ - spinlock_t lock; - struct net_device *dev; - struct net_device_stats* (*get_stats)(struct net_device *dev); - struct timer_list timer; -}; - -#endif - -#define SHAPER_SET_DEV 0x0001 -#define SHAPER_SET_SPEED 0x0002 -#define SHAPER_GET_DEV 0x0003 -#define SHAPER_GET_SPEED 0x0004 - -struct shaperconf -{ - __u16 ss_cmd; - union - { - char ssu_name[14]; - __u32 ssu_speed; - } ss_u; -#define ss_speed ss_u.ssu_speed -#define ss_name ss_u.ssu_name -}; - -#endif diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h index 046e9d95ba9a..5bcec8b2c5e2 100644 --- a/include/linux/if_tr.h +++ b/include/linux/if_tr.h @@ -49,9 +49,6 @@ static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb) { return (struct trh_hdr *)skb_mac_header(skb); } -#ifdef CONFIG_SYSCTL -extern struct ctl_table tr_table[]; -#endif #endif /* This is an Token-Ring LLC structure */ diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 33e489d5bb33..72f1c5f47be3 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -21,6 +21,8 @@ /* Uncomment to enable debugging */ /* #define TUN_DEBUG 1 */ +#include <linux/types.h> + #ifdef __KERNEL__ #ifdef TUN_DEBUG @@ -88,7 +90,7 @@ struct tun_struct { struct tun_pi { unsigned short flags; - unsigned short proto; + __be16 proto; }; #define TUN_PKT_STRIP 0x0001 diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index 660b5010c2d9..228eb4eb3129 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -17,6 +17,9 @@ #define GRE_FLAGS __constant_htons(0x00F8) #define GRE_VERSION __constant_htons(0x0007) +/* i_flags values for SIT mode */ +#define SIT_ISATAP 0x0001 + struct ip_tunnel_parm { char name[IFNAMSIZ]; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 976d4b1067d1..34f40efc7607 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -16,11 +16,6 @@ #ifdef __KERNEL__ /* externally defined structs */ -struct vlan_group; -struct net_device; -struct packet_type; -struct vlan_collection; -struct vlan_dev_info; struct hlist_node; #include <linux/netdevice.h> @@ -39,12 +34,30 @@ struct hlist_node; #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ +/* + * struct vlan_hdr - vlan header + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +/** + * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) + * @h_dest: destination ethernet address + * @h_source: source ethernet address + * @h_vlan_proto: ethernet protocol (always 0x8100) + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ struct vlan_ethhdr { - unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ - unsigned char h_source[ETH_ALEN]; /* source ether addr */ - __be16 h_vlan_proto; /* Should always be 0x8100 */ - __be16 h_vlan_TCI; /* Encapsulates priority and VLAN ID */ - __be16 h_vlan_encapsulated_proto; /* packet type ID field (or len) */ + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; }; #include <linux/skbuff.h> @@ -54,18 +67,11 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) return (struct vlan_ethhdr *)skb_mac_header(skb); } -struct vlan_hdr { - __be16 h_vlan_TCI; /* Encapsulates priority and VLAN ID */ - __be16 h_vlan_encapsulated_proto; /* packet type ID field (or len) */ -}; - #define VLAN_VID_MASK 0xfff /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); -#define VLAN_NAME "vlan" - /* if this changes, algorithm will have to be reworked because this * depends on completely exhausting the VLAN identifier space. Thus * it gives constant time look-up, but in many cases it wastes memory. @@ -76,19 +82,22 @@ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); struct vlan_group { int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ + unsigned int nr_vlans; struct hlist_node hlist; /* linked list */ struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; struct rcu_head rcu; }; -static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id) +static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, + unsigned int vlan_id) { struct net_device **array; array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN]; } -static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, +static inline void vlan_group_set_device(struct vlan_group *vg, + unsigned int vlan_id, struct net_device *dev) { struct net_device **array; @@ -132,22 +141,18 @@ struct vlan_dev_info { struct proc_dir_entry *dent; /* Holds the proc data */ unsigned long cnt_inc_headroom_on_tx; /* How many times did we have to grow the skb on TX. */ unsigned long cnt_encap_on_xmit; /* How many times did we have to encapsulate the skb on TX. */ - struct net_device_stats dev_stats; /* Device stats (rx-bytes, tx-pkts, etc...) */ }; -#define VLAN_DEV_INFO(x) ((struct vlan_dev_info *)(x->priv)) - -/* inline functions */ - -static inline struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) +static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) { - return &(VLAN_DEV_INFO(dev)->dev_stats); + return netdev_priv(dev); } +/* inline functions */ static inline __u32 vlan_get_ingress_priority(struct net_device *dev, unsigned short vlan_tag) { - struct vlan_dev_info *vip = VLAN_DEV_INFO(dev); + struct vlan_dev_info *vip = vlan_dev_info(dev); return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7]; } @@ -188,7 +193,7 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, skb->dev->last_rx = jiffies; - stats = vlan_dev_get_stats(skb->dev); + stats = &skb->dev->stats; stats->rx_packets++; stats->rx_bytes += skb->len; @@ -266,12 +271,12 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN); /* first, the ethernet type */ - veth->h_vlan_proto = __constant_htons(ETH_P_8021Q); + veth->h_vlan_proto = htons(ETH_P_8021Q); /* now, the tag */ veth->h_vlan_TCI = htons(tag); - skb->protocol = __constant_htons(ETH_P_8021Q); + skb->protocol = htons(ETH_P_8021Q); skb->mac_header -= VLAN_HLEN; skb->network_header -= VLAN_HLEN; @@ -326,7 +331,7 @@ static inline int __vlan_get_tag(struct sk_buff *skb, unsigned short *tag) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; - if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) { + if (veth->h_vlan_proto != htons(ETH_P_8021Q)) { return -EINVAL; } diff --git a/include/linux/in.h b/include/linux/in.h index 3975cbf52f20..70c6df882694 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -246,13 +246,69 @@ struct sockaddr_in { #include <asm/byteorder.h> #ifdef __KERNEL__ -/* Some random defines to make it easier in the kernel.. */ -#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000)) -#define MULTICAST(x) (((x) & htonl(0xf0000000)) == htonl(0xe0000000)) -#define BADCLASS(x) (((x) & htonl(0xf0000000)) == htonl(0xf0000000)) -#define ZERONET(x) (((x) & htonl(0xff000000)) == htonl(0x00000000)) -#define LOCAL_MCAST(x) (((x) & htonl(0xFFFFFF00)) == htonl(0xE0000000)) +static inline bool ipv4_is_loopback(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x7f000000); +} + +static inline bool ipv4_is_multicast(__be32 addr) +{ + return (addr & htonl(0xf0000000)) == htonl(0xe0000000); +} + +static inline bool ipv4_is_local_multicast(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xe0000000); +} + +static inline bool ipv4_is_lbcast(__be32 addr) +{ + /* limited broadcast */ + return addr == INADDR_BROADCAST; +} + +static inline bool ipv4_is_zeronet(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x00000000); +} + +/* Special-Use IPv4 Addresses (RFC3330) */ + +static inline bool ipv4_is_private_10(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x0a000000); +} + +static inline bool ipv4_is_private_172(__be32 addr) +{ + return (addr & htonl(0xfff00000)) == htonl(0xac100000); +} + +static inline bool ipv4_is_private_192(__be32 addr) +{ + return (addr & htonl(0xffff0000)) == htonl(0xc0a80000); +} + +static inline bool ipv4_is_linklocal_169(__be32 addr) +{ + return (addr & htonl(0xffff0000)) == htonl(0xa9fe0000); +} + +static inline bool ipv4_is_anycast_6to4(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xc0586300); +} + +static inline bool ipv4_is_test_192(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xc0000200); +} + +static inline bool ipv4_is_test_198(__be32 addr) +{ + return (addr & htonl(0xfffe0000)) == htonl(0xc6120000); +} #endif #endif /* _LINUX_IN_H */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index d83fee2dc643..8d9eaaebded7 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -44,7 +44,8 @@ struct in_device }; #define IPV4_DEVCONF(cnf, attr) ((cnf).data[NET_IPV4_CONF_ ## attr - 1]) -#define IPV4_DEVCONF_ALL(attr) IPV4_DEVCONF(ipv4_devconf, attr) +#define IPV4_DEVCONF_ALL(net, attr) \ + IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr) static inline int ipv4_devconf_get(struct in_device *in_dev, int index) { @@ -71,16 +72,17 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) ipv4_devconf_set((in_dev), NET_IPV4_CONF_ ## attr, (val)) #define IN_DEV_ANDCONF(in_dev, attr) \ - (IPV4_DEVCONF_ALL(attr) && IN_DEV_CONF_GET((in_dev), attr)) + (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) && \ + IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_ORCONF(in_dev, attr) \ - (IPV4_DEVCONF_ALL(attr) || IN_DEV_CONF_GET((in_dev), attr)) + (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) || \ + IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_MAXCONF(in_dev, attr) \ - (max(IPV4_DEVCONF_ALL(attr), IN_DEV_CONF_GET((in_dev), attr))) + (max(IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr), \ + IN_DEV_CONF_GET((in_dev), attr))) #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) -#define IN_DEV_MFORWARD(in_dev) (IPV4_DEVCONF_ALL(MC_FORWARDING) && \ - IPV4_DEVCONF((in_dev)->cnf, \ - MC_FORWARDING)) +#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) #define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER) #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ ACCEPT_SOURCE_ROUTE) @@ -127,15 +129,14 @@ struct in_ifaddr extern int register_inetaddr_notifier(struct notifier_block *nb); extern int unregister_inetaddr_notifier(struct notifier_block *nb); -extern struct net_device *ip_dev_find(__be32 addr); +extern struct net_device *ip_dev_find(struct net *net, __be32 addr); extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); extern int devinet_ioctl(unsigned int cmd, void __user *); extern void devinet_init(void); -extern struct in_device *inetdev_by_index(int); +extern struct in_device *inetdev_by_index(struct net *, int); extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); -extern __be32 inet_confirm_addr(const struct net_device *dev, __be32 dst, __be32 local, int scope); +extern __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope); extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); -extern void inet_forward_change(void); static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) { diff --git a/include/linux/init.h b/include/linux/init.h index 5141381a7527..2efbda016741 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -40,10 +40,10 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __attribute__ ((__section__ (".init.text"))) __cold -#define __initdata __attribute__ ((__section__ (".init.data"))) -#define __exitdata __attribute__ ((__section__(".exit.data"))) -#define __exit_call __attribute_used__ __attribute__ ((__section__ (".exitcall.exit"))) +#define __init __section(.init.text) __cold +#define __initdata __section(.init.data) +#define __exitdata __section(.exit.data) +#define __exit_call __used __section(.exitcall.exit) /* modpost check for section mismatches during the kernel build. * A section mismatch happens when there are references from a @@ -52,25 +52,81 @@ * when early init has completed so all such references are potential bugs. * For exit sections the same issue exists. * The following markers are used for the cases where the reference to - * the init/exit section (code or data) is valid and will teach modpost - * not to issue a warning. + * the *init / *exit section (code or data) is valid and will teach + * modpost not to issue a warning. * The markers follow same syntax rules as __init / __initdata. */ -#define __init_refok noinline __attribute__ ((__section__ (".text.init.refok"))) -#define __initdata_refok __attribute__ ((__section__ (".data.init.refok"))) -#define __exit_refok noinline __attribute__ ((__section__ (".exit.text.refok"))) +#define __ref __section(.ref.text) noinline +#define __refdata __section(.ref.data) +#define __refconst __section(.ref.rodata) + +/* backward compatibility note + * A few places hardcode the old section names: + * .text.init.refok + * .data.init.refok + * .exit.text.refok + * They should be converted to use the defines from this file + */ + +/* compatibility defines */ +#define __init_refok __ref +#define __initdata_refok __refdata +#define __exit_refok __ref + #ifdef MODULE -#define __exit __attribute__ ((__section__(".exit.text"))) __cold +#define __exitused #else -#define __exit __attribute_used__ __attribute__ ((__section__(".exit.text"))) __cold +#define __exitused __used #endif +#define __exit __section(.exit.text) __exitused __cold + +/* Used for HOTPLUG */ +#define __devinit __section(.devinit.text) __cold +#define __devinitdata __section(.devinit.data) +#define __devinitconst __section(.devinit.rodata) +#define __devexit __section(.devexit.text) __exitused __cold +#define __devexitdata __section(.devexit.data) +#define __devexitconst __section(.devexit.rodata) + +/* Used for HOTPLUG_CPU */ +#define __cpuinit __section(.cpuinit.text) __cold +#define __cpuinitdata __section(.cpuinit.data) +#define __cpuinitconst __section(.cpuinit.rodata) +#define __cpuexit __section(.cpuexit.text) __exitused __cold +#define __cpuexitdata __section(.cpuexit.data) +#define __cpuexitconst __section(.cpuexit.rodata) + +/* Used for MEMORY_HOTPLUG */ +#define __meminit __section(.meminit.text) __cold +#define __meminitdata __section(.meminit.data) +#define __meminitconst __section(.meminit.rodata) +#define __memexit __section(.memexit.text) __exitused __cold +#define __memexitdata __section(.memexit.data) +#define __memexitconst __section(.memexit.rodata) + /* For assembly routines */ #define __INIT .section ".init.text","ax" -#define __INIT_REFOK .section ".text.init.refok","ax" #define __FINIT .previous + #define __INITDATA .section ".init.data","aw" -#define __INITDATA_REFOK .section ".data.init.refok","aw" + +#define __DEVINIT .section ".devinit.text", "ax" +#define __DEVINITDATA .section ".devinit.data", "aw" + +#define __CPUINIT .section ".cpuinit.text", "ax" +#define __CPUINITDATA .section ".cpuinit.data", "aw" + +#define __MEMINIT .section ".meminit.text", "ax" +#define __MEMINITDATA .section ".meminit.data", "aw" + +/* silence warnings when references are OK */ +#define __REF .section ".ref.text", "ax" +#define __REFDATA .section ".ref.data", "aw" +#define __REFCONST .section ".ref.rodata", "aw" +/* backward compatibility */ +#define __INIT_REFOK .section __REF +#define __INITDATA_REFOK .section __REFDATA #ifndef __ASSEMBLY__ /* @@ -108,7 +164,7 @@ void prepare_namespace(void); */ #define __define_initcall(level,fn,id) \ - static initcall_t __initcall_##fn##id __attribute_used__ \ + static initcall_t __initcall_##fn##id __used \ __attribute__((__section__(".initcall" level ".init"))) = fn /* @@ -142,11 +198,11 @@ void prepare_namespace(void); #define console_initcall(fn) \ static initcall_t __initcall_##fn \ - __attribute_used__ __attribute__((__section__(".con_initcall.init")))=fn + __used __section(.con_initcall.init) = fn #define security_initcall(fn) \ static initcall_t __initcall_##fn \ - __attribute_used__ __attribute__((__section__(".security_initcall.init"))) = fn + __used __section(.security_initcall.init) = fn struct obs_kernel_param { const char *str; @@ -163,8 +219,7 @@ struct obs_kernel_param { #define __setup_param(str, unique_id, fn, early) \ static char __setup_str_##unique_id[] __initdata __aligned(1) = str; \ static struct obs_kernel_param __setup_##unique_id \ - __attribute_used__ \ - __attribute__((__section__(".init.setup"))) \ + __used __section(.init.setup) \ __attribute__((aligned((sizeof(long))))) \ = { __setup_str_##unique_id, fn, early } @@ -242,7 +297,7 @@ void __init parse_early_param(void); #endif /* Data marked not to be saved by software suspend */ -#define __nosavedata __attribute__ ((__section__ (".data.nosave"))) +#define __nosavedata __section(.data.nosave) /* This means "can be init if no module support, otherwise module load may call it." */ @@ -254,43 +309,6 @@ void __init parse_early_param(void); #define __initdata_or_module __initdata #endif /*CONFIG_MODULES*/ -#ifdef CONFIG_HOTPLUG -#define __devinit -#define __devinitdata -#define __devexit -#define __devexitdata -#else -#define __devinit __init -#define __devinitdata __initdata -#define __devexit __exit -#define __devexitdata __exitdata -#endif - -#ifdef CONFIG_HOTPLUG_CPU -#define __cpuinit -#define __cpuinitdata -#define __cpuexit -#define __cpuexitdata -#else -#define __cpuinit __init -#define __cpuinitdata __initdata -#define __cpuexit __exit -#define __cpuexitdata __exitdata -#endif - -#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) \ - || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) -#define __meminit -#define __meminitdata -#define __memexit -#define __memexitdata -#else -#define __meminit __init -#define __meminitdata __initdata -#define __memexit __exit -#define __memexitdata __exitdata -#endif - /* Functions marked as __devexit may be discarded at kernel link time, depending on config options. Newer versions of binutils detect references from retained sections to discarded sections and flag an error. Pointers to diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h new file mode 100644 index 000000000000..3c03a4bba5e4 --- /dev/null +++ b/include/linux/init_ohci1394_dma.h @@ -0,0 +1,4 @@ +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT +extern int __initdata init_ohci1394_dma_early; +extern void __init init_ohci1394_dma_on_all_controllers(void); +#endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h index cae35b6b9aec..e6b3f7080679 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -132,9 +132,11 @@ extern struct group_info init_groups; .cpus_allowed = CPU_MASK_ALL, \ .mm = NULL, \ .active_mm = &init_mm, \ - .run_list = LIST_HEAD_INIT(tsk.run_list), \ - .ioprio = 0, \ - .time_slice = HZ, \ + .rt = { \ + .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ + .time_slice = HZ, \ + .nr_cpus_allowed = NR_CPUS, \ + }, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2306920fa388..c3db4a00f1fa 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -256,6 +256,7 @@ enum #ifdef CONFIG_HIGH_RES_TIMERS HRTIMER_SOFTIRQ, #endif + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ }; /* softirq mask and active fields moved to irq_cpustat_t in diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h new file mode 100644 index 000000000000..593b222d9dcc --- /dev/null +++ b/include/linux/iocontext.h @@ -0,0 +1,95 @@ +#ifndef IOCONTEXT_H +#define IOCONTEXT_H + +#include <linux/radix-tree.h> + +/* + * This is the per-process anticipatory I/O scheduler state. + */ +struct as_io_context { + spinlock_t lock; + + void (*dtor)(struct as_io_context *aic); /* destructor */ + void (*exit)(struct as_io_context *aic); /* called on task exit */ + + unsigned long state; + atomic_t nr_queued; /* queued reads & sync writes */ + atomic_t nr_dispatched; /* number of requests gone to the drivers */ + + /* IO History tracking */ + /* Thinktime */ + unsigned long last_end_request; + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; + /* Layout pattern */ + unsigned int seek_samples; + sector_t last_request_pos; + u64 seek_total; + sector_t seek_mean; +}; + +struct cfq_queue; +struct cfq_io_context { + void *key; + unsigned long dead_key; + + struct cfq_queue *cfqq[2]; + + struct io_context *ioc; + + unsigned long last_end_request; + sector_t last_request_pos; + + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; + + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + + struct list_head queue_list; + + void (*dtor)(struct io_context *); /* destructor */ + void (*exit)(struct io_context *); /* called on task exit */ +}; + +/* + * I/O subsystem state of the associated processes. It is refcounted + * and kmalloc'ed. These could be shared between processes. + */ +struct io_context { + atomic_t refcount; + atomic_t nr_tasks; + + /* all the fields below are protected by this lock */ + spinlock_t lock; + + unsigned short ioprio; + unsigned short ioprio_changed; + + /* + * For request batching + */ + unsigned long last_waited; /* Time last woken after wait for request */ + int nr_batch_requests; /* Number of requests left in the batch */ + + struct as_io_context *aic; + struct radix_tree_root radix_root; + void *ioc_data; +}; + +static inline struct io_context *ioc_task_link(struct io_context *ioc) +{ + /* + * if ref count is zero, don't allow sharing (ioc is going away, it's + * a race). + */ + if (ioc && atomic_inc_not_zero(&ioc->refcount)) + return ioc; + + return NULL; +} + +#endif diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 6187a8567bc7..605d237364d2 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -8,6 +8,7 @@ #ifndef _LINUX_IOPORT_H #define _LINUX_IOPORT_H +#ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/types.h> /* @@ -153,4 +154,5 @@ extern struct resource * __devm_request_region(struct device *dev, extern void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n); +#endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index baf29387cab4..2a3bb1bb7433 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -2,6 +2,7 @@ #define IOPRIO_H #include <linux/sched.h> +#include <linux/iocontext.h> /* * Gives us 8 prio classes with 13-bits of data for each class @@ -45,18 +46,18 @@ enum { * the cpu scheduler nice value to an io priority */ #define IOPRIO_NORM (4) -static inline int task_ioprio(struct task_struct *task) +static inline int task_ioprio(struct io_context *ioc) { - if (ioprio_valid(task->ioprio)) - return IOPRIO_PRIO_DATA(task->ioprio); + if (ioprio_valid(ioc->ioprio)) + return IOPRIO_PRIO_DATA(ioc->ioprio); return IOPRIO_NORM; } -static inline int task_ioprio_class(struct task_struct *task) +static inline int task_ioprio_class(struct io_context *ioc) { - if (ioprio_valid(task->ioprio)) - return IOPRIO_PRIO_CLASS(task->ioprio); + if (ioprio_valid(ioc->ioprio)) + return IOPRIO_PRIO_CLASS(ioc->ioprio); return IOPRIO_CLASS_BE; } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 06ef11457051..2cbf6fdb1799 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -149,6 +149,28 @@ typedef struct journal_header_s __be32 h_sequence; } journal_header_t; +/* + * Checksum types. + */ +#define JBD2_CRC32_CHKSUM 1 +#define JBD2_MD5_CHKSUM 2 +#define JBD2_SHA1_CHKSUM 3 + +#define JBD2_CRC32_CHKSUM_SIZE 4 + +#define JBD2_CHECKSUM_BYTES (32 / sizeof(u32)) +/* + * Commit block header for storing transactional checksums: + */ +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[JBD2_CHECKSUM_BYTES]; +}; /* * The block tag: used to describe a single buffer in the journal. @@ -242,31 +264,25 @@ typedef struct journal_superblock_s ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) -#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 -#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 +#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 + +#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 +#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 +#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 /* Features known to this kernel version: */ -#define JBD2_KNOWN_COMPAT_FEATURES 0 +#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM #define JBD2_KNOWN_ROCOMPAT_FEATURES 0 #define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ - JBD2_FEATURE_INCOMPAT_64BIT) + JBD2_FEATURE_INCOMPAT_64BIT | \ + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) #ifdef __KERNEL__ #include <linux/fs.h> #include <linux/sched.h> -#define JBD2_ASSERTIONS -#ifdef JBD2_ASSERTIONS -#define J_ASSERT(assert) \ -do { \ - if (!(assert)) { \ - printk (KERN_EMERG \ - "Assertion failure in %s() at %s:%d: \"%s\"\n", \ - __FUNCTION__, __FILE__, __LINE__, # assert); \ - BUG(); \ - } \ -} while (0) +#define J_ASSERT(assert) BUG_ON(!(assert)) #if defined(CONFIG_BUFFER_DEBUG) void buffer_assertion_failure(struct buffer_head *bh); @@ -282,10 +298,6 @@ void buffer_assertion_failure(struct buffer_head *bh); #define J_ASSERT_JH(jh, expr) J_ASSERT(expr) #endif -#else -#define J_ASSERT(assert) do { } while (0) -#endif /* JBD2_ASSERTIONS */ - #if defined(JBD2_PARANOID_IOFAIL) #define J_EXPECT(expr, why...) J_ASSERT(expr) #define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) @@ -406,9 +418,23 @@ struct handle_s unsigned int h_sync: 1; /* sync-on-close */ unsigned int h_jdata: 1; /* force data journaling */ unsigned int h_aborted: 1; /* fatal error on handle */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map h_lockdep_map; +#endif }; +/* + * Some stats for checkpoint phase + */ +struct transaction_chp_stats_s { + unsigned long cs_chp_time; + unsigned long cs_forced_to_close; + unsigned long cs_written; + unsigned long cs_dropped; +}; + /* The transaction_t type is the guts of the journaling mechanism. It * tracks a compound transaction through its various states: * @@ -456,6 +482,8 @@ struct transaction_s /* * Transaction's current state * [no locking - only kjournald2 alters this] + * [j_list_lock] guards transition of a transaction into T_FINISHED + * state and subsequent call of __jbd2_journal_drop_transaction() * FIXME: needs barriers * KLUDGE: [use j_state_lock] */ @@ -544,6 +572,21 @@ struct transaction_s spinlock_t t_handle_lock; /* + * Longest time some handle had to wait for running transaction + */ + unsigned long t_max_wait; + + /* + * When transaction started + */ + unsigned long t_start; + + /* + * Checkpointing stats [j_checkpoint_sem] + */ + struct transaction_chp_stats_s t_chp_stats; + + /* * Number of outstanding updates running on this transaction * [t_handle_lock] */ @@ -574,6 +617,39 @@ struct transaction_s }; +struct transaction_run_stats_s { + unsigned long rs_wait; + unsigned long rs_running; + unsigned long rs_locked; + unsigned long rs_flushing; + unsigned long rs_logging; + + unsigned long rs_handle_count; + unsigned long rs_blocks; + unsigned long rs_blocks_logged; +}; + +struct transaction_stats_s { + int ts_type; + unsigned long ts_tid; + union { + struct transaction_run_stats_s run; + struct transaction_chp_stats_s chp; + } u; +}; + +#define JBD2_STATS_RUN 1 +#define JBD2_STATS_CHECKPOINT 2 + +static inline unsigned long +jbd2_time_diff(unsigned long start, unsigned long end) +{ + if (end >= start) + return end - start; + + return end + (MAX_JIFFY_OFFSET - start); +} + /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. @@ -635,6 +711,12 @@ struct transaction_s * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the * number that will fit in j_blocksize * @j_last_sync_writer: most recent pid which did a synchronous write + * @j_history: Buffer storing the transactions statistics history + * @j_history_max: Maximum number of transactions in the statistics history + * @j_history_cur: Current number of transactions in the statistics history + * @j_history_lock: Protect the transactions statistics history + * @j_proc_entry: procfs entry for the jbd statistics directory + * @j_stats: Overall statistics * @j_private: An opaque pointer to fs-private information. */ @@ -827,6 +909,19 @@ struct journal_s pid_t j_last_sync_writer; /* + * Journal statistics + */ + struct transaction_stats_s *j_history; + int j_history_max; + int j_history_cur; + /* + * Protect the transactions statistics history + */ + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + + /* * An opaque pointer to fs-private information. ext3 puts its * superblock pointer here */ @@ -932,6 +1027,8 @@ extern int jbd2_journal_check_available_features (journal_t *, unsigned long, unsigned long, unsigned long); extern int jbd2_journal_set_features (journal_t *, unsigned long, unsigned long, unsigned long); +extern void jbd2_journal_clear_features + (journal_t *, unsigned long, unsigned long, unsigned long); extern int jbd2_journal_create (journal_t *); extern int jbd2_journal_load (journal_t *journal); extern void jbd2_journal_destroy (journal_t *); diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 8b080024bbc1..7ba9e47bf061 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -29,6 +29,12 @@ # define SHIFT_HZ 9 #elif HZ >= 768 && HZ < 1536 # define SHIFT_HZ 10 +#elif HZ >= 1536 && HZ < 3072 +# define SHIFT_HZ 11 +#elif HZ >= 3072 && HZ < 6144 +# define SHIFT_HZ 12 +#elif HZ >= 6144 && HZ < 12288 +# define SHIFT_HZ 13 #else # error You lose. #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 94bc99656963..ff356b2ee478 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -105,8 +105,8 @@ struct user; * supposed to. */ #ifdef CONFIG_PREEMPT_VOLUNTARY -extern int cond_resched(void); -# define might_resched() cond_resched() +extern int _cond_resched(void); +# define might_resched() _cond_resched() #else # define might_resched() do { } while (0) #endif @@ -194,6 +194,9 @@ static inline int log_buf_read(int idx) { return 0; } static inline int log_buf_copy(char *dest, int idx, int len) { return 0; } #endif +extern void __attribute__((format(printf, 1, 2))) + early_printk(const char *fmt, ...); + unsigned long int_sqrt(unsigned long); extern int printk_ratelimit(void); diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 4a0d27f475d7..caa3f411f15d 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -3,15 +3,14 @@ * * Copyright (c) 2002-2003 Patrick Mochel * Copyright (c) 2002-2003 Open Source Development Labs - * Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com> - * Copyright (c) 2006-2007 Novell Inc. + * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (c) 2006-2008 Novell Inc. * * This file is released under the GPLv2. * - * * Please read Documentation/kobject.txt before using the kobject * interface, ESPECIALLY the parts about reference counts and object - * destructors. + * destructors. */ #ifndef _KOBJECT_H_ @@ -61,48 +60,54 @@ enum kobject_action { }; struct kobject { - const char * k_name; + const char *name; struct kref kref; struct list_head entry; - struct kobject * parent; - struct kset * kset; - struct kobj_type * ktype; - struct sysfs_dirent * sd; + struct kobject *parent; + struct kset *kset; + struct kobj_type *ktype; + struct sysfs_dirent *sd; + unsigned int state_initialized:1; + unsigned int state_in_sysfs:1; + unsigned int state_add_uevent_sent:1; + unsigned int state_remove_uevent_sent:1; }; -extern int kobject_set_name(struct kobject *, const char *, ...) - __attribute__((format(printf,2,3))); +extern int kobject_set_name(struct kobject *kobj, const char *name, ...) + __attribute__((format(printf, 2, 3))); -static inline const char * kobject_name(const struct kobject * kobj) +static inline const char *kobject_name(const struct kobject *kobj) { - return kobj->k_name; + return kobj->name; } -extern void kobject_init(struct kobject *); -extern void kobject_cleanup(struct kobject *); +extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); +extern int __must_check kobject_add(struct kobject *kobj, + struct kobject *parent, + const char *fmt, ...); +extern int __must_check kobject_init_and_add(struct kobject *kobj, + struct kobj_type *ktype, + struct kobject *parent, + const char *fmt, ...); + +extern void kobject_del(struct kobject *kobj); -extern int __must_check kobject_add(struct kobject *); -extern void kobject_del(struct kobject *); +extern struct kobject * __must_check kobject_create(void); +extern struct kobject * __must_check kobject_create_and_add(const char *name, + struct kobject *parent); extern int __must_check kobject_rename(struct kobject *, const char *new_name); extern int __must_check kobject_move(struct kobject *, struct kobject *); -extern int __must_check kobject_register(struct kobject *); -extern void kobject_unregister(struct kobject *); - -extern struct kobject * kobject_get(struct kobject *); -extern void kobject_put(struct kobject *); - -extern struct kobject *kobject_kset_add_dir(struct kset *kset, - struct kobject *, const char *); -extern struct kobject *kobject_add_dir(struct kobject *, const char *); +extern struct kobject *kobject_get(struct kobject *kobj); +extern void kobject_put(struct kobject *kobj); -extern char * kobject_get_path(struct kobject *, gfp_t); +extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); struct kobj_type { - void (*release)(struct kobject *); - struct sysfs_ops * sysfs_ops; - struct attribute ** default_attrs; + void (*release)(struct kobject *kobj); + struct sysfs_ops *sysfs_ops; + struct attribute **default_attrs; }; struct kobj_uevent_env { @@ -119,6 +124,16 @@ struct kset_uevent_ops { struct kobj_uevent_env *env); }; +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); + ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); +}; + +extern struct sysfs_ops kobj_sysfs_ops; + /** * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. * @@ -128,7 +143,6 @@ struct kset_uevent_ops { * define the attribute callbacks and other common events that happen to * a kobject. * - * @ktype: the struct kobj_type for this specific kset * @list: the list of all kobjects for this kset * @list_lock: a lock for iterating over the kobjects * @kobj: the embedded kobject for this kset (recursion, isn't it fun...) @@ -138,99 +152,49 @@ struct kset_uevent_ops { * desired. */ struct kset { - struct kobj_type *ktype; - struct list_head list; - spinlock_t list_lock; - struct kobject kobj; - struct kset_uevent_ops *uevent_ops; + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + struct kset_uevent_ops *uevent_ops; }; +extern void kset_init(struct kset *kset); +extern int __must_check kset_register(struct kset *kset); +extern void kset_unregister(struct kset *kset); +extern struct kset * __must_check kset_create_and_add(const char *name, + struct kset_uevent_ops *u, + struct kobject *parent_kobj); -extern void kset_init(struct kset * k); -extern int __must_check kset_add(struct kset * k); -extern int __must_check kset_register(struct kset * k); -extern void kset_unregister(struct kset * k); - -static inline struct kset * to_kset(struct kobject * kobj) +static inline struct kset *to_kset(struct kobject *kobj) { - return kobj ? container_of(kobj,struct kset,kobj) : NULL; + return kobj ? container_of(kobj, struct kset, kobj) : NULL; } -static inline struct kset * kset_get(struct kset * k) +static inline struct kset *kset_get(struct kset *k) { return k ? to_kset(kobject_get(&k->kobj)) : NULL; } -static inline void kset_put(struct kset * k) +static inline void kset_put(struct kset *k) { kobject_put(&k->kobj); } -static inline struct kobj_type * get_ktype(struct kobject * k) +static inline struct kobj_type *get_ktype(struct kobject *kobj) { - if (k->kset && k->kset->ktype) - return k->kset->ktype; - else - return k->ktype; + return kobj->ktype; } -extern struct kobject * kset_find_obj(struct kset *, const char *); - - -/* - * Use this when initializing an embedded kset with no other - * fields to initialize. - */ -#define set_kset_name(str) .kset = { .kobj = { .k_name = str } } - - -#define decl_subsys(_name,_type,_uevent_ops) \ -struct kset _name##_subsys = { \ - .kobj = { .k_name = __stringify(_name) }, \ - .ktype = _type, \ - .uevent_ops =_uevent_ops, \ -} -#define decl_subsys_name(_varname,_name,_type,_uevent_ops) \ -struct kset _varname##_subsys = { \ - .kobj = { .k_name = __stringify(_name) }, \ - .ktype = _type, \ - .uevent_ops =_uevent_ops, \ -} - -/* The global /sys/kernel/ subsystem for people to chain off of */ -extern struct kset kernel_subsys; -/* The global /sys/hypervisor/ subsystem */ -extern struct kset hypervisor_subsys; - -/* - * Helpers for setting the kset of registered objects. - * Often, a registered object belongs to a kset embedded in a - * subsystem. These do no magic, just make the resulting code - * easier to follow. - */ - -/** - * kobj_set_kset_s(obj,subsys) - set kset for embedded kobject. - * @obj: ptr to some object type. - * @subsys: a subsystem object (not a ptr). - * - * Can be used for any object type with an embedded ->kobj. - */ - -#define kobj_set_kset_s(obj,subsys) \ - (obj)->kobj.kset = &(subsys) - -extern int __must_check subsystem_register(struct kset *); -extern void subsystem_unregister(struct kset *); - -struct subsys_attribute { - struct attribute attr; - ssize_t (*show)(struct kset *, char *); - ssize_t (*store)(struct kset *, const char *, size_t); -}; +extern struct kobject *kset_find_obj(struct kset *, const char *); -extern int __must_check subsys_create_file(struct kset *, - struct subsys_attribute *); +/* The global /sys/kernel/ kobject for people to chain off of */ +extern struct kobject *kernel_kobj; +/* The global /sys/hypervisor/ kobject for people to chain off of */ +extern struct kobject *hypervisor_kobj; +/* The global /sys/power/ kobject for people to chain off of */ +extern struct kobject *power_kobj; +/* The global /sys/firmware/ kobject for people to chain off of */ +extern struct kobject *firmware_kobj; #if defined(CONFIG_HOTPLUG) int kobject_uevent(struct kobject *kobj, enum kobject_action action); @@ -243,18 +207,20 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) int kobject_action_type(const char *buf, size_t count, enum kobject_action *type); #else -static inline int kobject_uevent(struct kobject *kobj, enum kobject_action action) +static inline int kobject_uevent(struct kobject *kobj, + enum kobject_action action) { return 0; } static inline int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp[]) { return 0; } -static inline int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) +static inline int add_uevent_var(struct kobj_uevent_env *env, + const char *format, ...) { return 0; } static inline int kobject_action_type(const char *buf, size_t count, - enum kobject_action *type) + enum kobject_action *type) { return -EINVAL; } #endif diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 81891581e89b..6168c0a44172 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -182,6 +182,15 @@ static inline void kretprobe_assert(struct kretprobe_instance *ri, } } +#ifdef CONFIG_KPROBES_SANITY_TEST +extern int init_test_probes(void); +#else +static inline int init_test_probes(void) +{ + return 0; +} +#endif /* CONFIG_KPROBES_SANITY_TEST */ + extern spinlock_t kretprobe_lock; extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); @@ -227,6 +236,7 @@ void unregister_kretprobe(struct kretprobe *rp); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); + #else /* CONFIG_KPROBES */ #define __kprobes /**/ diff --git a/include/linux/kref.h b/include/linux/kref.h index 6fee3539893f..5d185635786e 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -24,6 +24,7 @@ struct kref { atomic_t refcount; }; +void kref_set(struct kref *kref, int num); void kref_init(struct kref *kref); void kref_get(struct kref *kref); int kref_put(struct kref *kref, void (*release) (struct kref *kref)); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 057a7f34ee36..4de4fd2d8607 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -9,12 +9,10 @@ #include <asm/types.h> #include <linux/ioctl.h> +#include <asm/kvm.h> #define KVM_API_VERSION 12 -/* Architectural interrupt line count. */ -#define KVM_NR_INTERRUPTS 256 - /* for KVM_CREATE_MEMORY_REGION */ struct kvm_memory_region { __u32 slot; @@ -23,17 +21,19 @@ struct kvm_memory_region { __u64 memory_size; /* bytes */ }; -/* for kvm_memory_region::flags */ -#define KVM_MEM_LOG_DIRTY_PAGES 1UL - -struct kvm_memory_alias { - __u32 slot; /* this has a different namespace than memory slots */ +/* for KVM_SET_USER_MEMORY_REGION */ +struct kvm_userspace_memory_region { + __u32 slot; __u32 flags; __u64 guest_phys_addr; - __u64 memory_size; - __u64 target_phys_addr; + __u64 memory_size; /* bytes */ + __u64 userspace_addr; /* start of the userspace allocated memory */ }; +/* for kvm_memory_region::flags */ +#define KVM_MEM_LOG_DIRTY_PAGES 1UL + + /* for KVM_IRQ_LINE */ struct kvm_irq_level { /* @@ -45,62 +45,18 @@ struct kvm_irq_level { __u32 level; }; -/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ -struct kvm_pic_state { - __u8 last_irr; /* edge detection */ - __u8 irr; /* interrupt request register */ - __u8 imr; /* interrupt mask register */ - __u8 isr; /* interrupt service register */ - __u8 priority_add; /* highest irq priority */ - __u8 irq_base; - __u8 read_reg_select; - __u8 poll; - __u8 special_mask; - __u8 init_state; - __u8 auto_eoi; - __u8 rotate_on_auto_eoi; - __u8 special_fully_nested_mode; - __u8 init4; /* true if 4 byte init */ - __u8 elcr; /* PIIX edge/trigger selection */ - __u8 elcr_mask; -}; - -#define KVM_IOAPIC_NUM_PINS 24 -struct kvm_ioapic_state { - __u64 base_address; - __u32 ioregsel; - __u32 id; - __u32 irr; - __u32 pad; - union { - __u64 bits; - struct { - __u8 vector; - __u8 delivery_mode:3; - __u8 dest_mode:1; - __u8 delivery_status:1; - __u8 polarity:1; - __u8 remote_irr:1; - __u8 trig_mode:1; - __u8 mask:1; - __u8 reserve:7; - __u8 reserved[4]; - __u8 dest_id; - } fields; - } redirtbl[KVM_IOAPIC_NUM_PINS]; -}; - -#define KVM_IRQCHIP_PIC_MASTER 0 -#define KVM_IRQCHIP_PIC_SLAVE 1 -#define KVM_IRQCHIP_IOAPIC 2 struct kvm_irqchip { __u32 chip_id; __u32 pad; union { char dummy[512]; /* reserving space */ +#ifdef CONFIG_X86 struct kvm_pic_state pic; +#endif +#if defined(CONFIG_X86) || defined(CONFIG_IA64) struct kvm_ioapic_state ioapic; +#endif } chip; }; @@ -116,6 +72,7 @@ struct kvm_irqchip { #define KVM_EXIT_FAIL_ENTRY 9 #define KVM_EXIT_INTR 10 #define KVM_EXIT_SET_TPR 11 +#define KVM_EXIT_TPR_ACCESS 12 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { @@ -174,90 +131,17 @@ struct kvm_run { __u32 longmode; __u32 pad; } hypercall; + /* KVM_EXIT_TPR_ACCESS */ + struct { + __u64 rip; + __u32 is_write; + __u32 pad; + } tpr_access; /* Fix the size of the union. */ char padding[256]; }; }; -/* for KVM_GET_REGS and KVM_SET_REGS */ -struct kvm_regs { - /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ - __u64 rax, rbx, rcx, rdx; - __u64 rsi, rdi, rsp, rbp; - __u64 r8, r9, r10, r11; - __u64 r12, r13, r14, r15; - __u64 rip, rflags; -}; - -/* for KVM_GET_FPU and KVM_SET_FPU */ -struct kvm_fpu { - __u8 fpr[8][16]; - __u16 fcw; - __u16 fsw; - __u8 ftwx; /* in fxsave format */ - __u8 pad1; - __u16 last_opcode; - __u64 last_ip; - __u64 last_dp; - __u8 xmm[16][16]; - __u32 mxcsr; - __u32 pad2; -}; - -/* for KVM_GET_LAPIC and KVM_SET_LAPIC */ -#define KVM_APIC_REG_SIZE 0x400 -struct kvm_lapic_state { - char regs[KVM_APIC_REG_SIZE]; -}; - -struct kvm_segment { - __u64 base; - __u32 limit; - __u16 selector; - __u8 type; - __u8 present, dpl, db, s, l, g, avl; - __u8 unusable; - __u8 padding; -}; - -struct kvm_dtable { - __u64 base; - __u16 limit; - __u16 padding[3]; -}; - -/* for KVM_GET_SREGS and KVM_SET_SREGS */ -struct kvm_sregs { - /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ - struct kvm_segment cs, ds, es, fs, gs, ss; - struct kvm_segment tr, ldt; - struct kvm_dtable gdt, idt; - __u64 cr0, cr2, cr3, cr4, cr8; - __u64 efer; - __u64 apic_base; - __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; -}; - -struct kvm_msr_entry { - __u32 index; - __u32 reserved; - __u64 data; -}; - -/* for KVM_GET_MSRS and KVM_SET_MSRS */ -struct kvm_msrs { - __u32 nmsrs; /* number of msrs in entries */ - __u32 pad; - - struct kvm_msr_entry entries[0]; -}; - -/* for KVM_GET_MSR_INDEX_LIST */ -struct kvm_msr_list { - __u32 nmsrs; /* number of msrs in entries */ - __u32 indices[0]; -}; - /* for KVM_TRANSLATE */ struct kvm_translation { /* in */ @@ -302,28 +186,24 @@ struct kvm_dirty_log { }; }; -struct kvm_cpuid_entry { - __u32 function; - __u32 eax; - __u32 ebx; - __u32 ecx; - __u32 edx; - __u32 padding; -}; - -/* for KVM_SET_CPUID */ -struct kvm_cpuid { - __u32 nent; - __u32 padding; - struct kvm_cpuid_entry entries[0]; -}; - /* for KVM_SET_SIGNAL_MASK */ struct kvm_signal_mask { __u32 len; __u8 sigset[0]; }; +/* for KVM_TPR_ACCESS_REPORTING */ +struct kvm_tpr_access_ctl { + __u32 enabled; + __u32 flags; + __u32 reserved[8]; +}; + +/* for KVM_SET_VAPIC_ADDR */ +struct kvm_vapic_addr { + __u64 vapic_addr; +}; + #define KVMIO 0xAE /* @@ -347,11 +227,21 @@ struct kvm_signal_mask { */ #define KVM_CAP_IRQCHIP 0 #define KVM_CAP_HLT 1 +#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 +#define KVM_CAP_USER_MEMORY 3 +#define KVM_CAP_SET_TSS_ADDR 4 +#define KVM_CAP_EXT_CPUID 5 +#define KVM_CAP_VAPIC 6 /* * ioctls for VM fds */ #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) +#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) +#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) +#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\ + struct kvm_userspace_memory_region) +#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) /* * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns * a vcpu fd. @@ -359,6 +249,7 @@ struct kvm_signal_mask { #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) +#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x48, struct kvm_cpuid2) /* Device model IOC */ #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) @@ -384,5 +275,11 @@ struct kvm_signal_mask { #define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu) #define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state) #define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state) +#define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2) +#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2) +/* Available with KVM_CAP_VAPIC */ +#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) +/* Available with KVM_CAP_VAPIC */ +#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h new file mode 100644 index 000000000000..ea4764b0a2f4 --- /dev/null +++ b/include/linux/kvm_host.h @@ -0,0 +1,299 @@ +#ifndef __KVM_HOST_H +#define __KVM_HOST_H + +/* + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/types.h> +#include <linux/hardirq.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/preempt.h> +#include <asm/signal.h> + +#include <linux/kvm.h> +#include <linux/kvm_para.h> + +#include <linux/kvm_types.h> + +#include <asm/kvm_host.h> + +#define KVM_MAX_VCPUS 4 +#define KVM_MEMORY_SLOTS 8 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + +#define KVM_PIO_PAGE_OFFSET 1 + +/* + * vcpu->requests bit members + */ +#define KVM_REQ_TLB_FLUSH 0 +#define KVM_REQ_MIGRATE_TIMER 1 +#define KVM_REQ_REPORT_TPR_ACCESS 2 + +struct kvm_vcpu; +extern struct kmem_cache *kvm_vcpu_cache; + +struct kvm_guest_debug { + int enabled; + unsigned long bp[4]; + int singlestep; +}; + +/* + * It would be nice to use something smarter than a linear search, TBD... + * Thankfully we dont expect many devices to register (famous last words :), + * so until then it will suffice. At least its abstracted so we can change + * in one place. + */ +struct kvm_io_bus { + int dev_count; +#define NR_IOBUS_DEVS 6 + struct kvm_io_device *devs[NR_IOBUS_DEVS]; +}; + +void kvm_io_bus_init(struct kvm_io_bus *bus); +void kvm_io_bus_destroy(struct kvm_io_bus *bus); +struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); +void kvm_io_bus_register_dev(struct kvm_io_bus *bus, + struct kvm_io_device *dev); + +struct kvm_vcpu { + struct kvm *kvm; + struct preempt_notifier preempt_notifier; + int vcpu_id; + struct mutex mutex; + int cpu; + struct kvm_run *run; + int guest_mode; + unsigned long requests; + struct kvm_guest_debug guest_debug; + int fpu_active; + int guest_fpu_loaded; + wait_queue_head_t wq; + int sigset_active; + sigset_t sigset; + struct kvm_vcpu_stat stat; + +#ifdef CONFIG_HAS_IOMEM + int mmio_needed; + int mmio_read_completed; + int mmio_is_write; + int mmio_size; + unsigned char mmio_data[8]; + gpa_t mmio_phys_addr; +#endif + + struct kvm_vcpu_arch arch; +}; + +struct kvm_memory_slot { + gfn_t base_gfn; + unsigned long npages; + unsigned long flags; + unsigned long *rmap; + unsigned long *dirty_bitmap; + unsigned long userspace_addr; + int user_alloc; +}; + +struct kvm { + struct mutex lock; /* protects the vcpus array and APIC accesses */ + spinlock_t mmu_lock; + struct mm_struct *mm; /* userspace tied to this vm */ + int nmemslots; + struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + + KVM_PRIVATE_MEM_SLOTS]; + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + struct list_head vm_list; + struct file *filp; + struct kvm_io_bus mmio_bus; + struct kvm_io_bus pio_bus; + struct kvm_vm_stat stat; + struct kvm_arch arch; +}; + +/* The guest did something we don't support. */ +#define pr_unimpl(vcpu, fmt, ...) \ + do { \ + if (printk_ratelimit()) \ + printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ + current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ + } while (0) + +#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) +#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) + +int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); +void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); + +void vcpu_load(struct kvm_vcpu *vcpu); +void vcpu_put(struct kvm_vcpu *vcpu); + +void decache_vcpus_on_cpu(int cpu); + + +int kvm_init(void *opaque, unsigned int vcpu_size, + struct module *module); +void kvm_exit(void); + +#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) +#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) +static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } +struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); + +extern struct page *bad_page; + +int is_error_page(struct page *page); +int kvm_is_error_hva(unsigned long addr); +int kvm_set_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + int user_alloc); +int __kvm_set_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + int user_alloc); +int kvm_arch_set_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old, + int user_alloc); +gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); +struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); +void kvm_release_page_clean(struct page *page); +void kvm_release_page_dirty(struct page *page); +int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, + int len); +int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, + unsigned long len); +int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, + int offset, int len); +int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, + unsigned long len); +int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); +int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); +int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +void mark_page_dirty(struct kvm *kvm, gfn_t gfn); + +void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_resched(struct kvm_vcpu *vcpu); +void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); +void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); +void kvm_flush_remote_tlbs(struct kvm *kvm); + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); + +int kvm_dev_ioctl_check_extension(long ext); + +int kvm_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log, int *is_dirty); +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log); + +int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, + struct + kvm_userspace_memory_region *mem, + int user_alloc); +long kvm_arch_vm_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +void kvm_arch_destroy_vm(struct kvm *kvm); + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr); + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs); +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs); +int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, + struct kvm_debug_guest *dbg); +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); + +int kvm_arch_init(void *opaque); +void kvm_arch_exit(void); + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); + +int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_arch_hardware_enable(void *garbage); +void kvm_arch_hardware_disable(void *garbage); +int kvm_arch_hardware_setup(void); +void kvm_arch_hardware_unsetup(void); +void kvm_arch_check_processor_compat(void *rtn); +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); + +void kvm_free_physmem(struct kvm *kvm); + +struct kvm *kvm_arch_create_vm(void); +void kvm_arch_destroy_vm(struct kvm *kvm); + +int kvm_cpu_get_interrupt(struct kvm_vcpu *v); +int kvm_cpu_has_interrupt(struct kvm_vcpu *v); +void kvm_vcpu_kick(struct kvm_vcpu *vcpu); + +static inline void kvm_guest_enter(void) +{ + account_system_vtime(current); + current->flags |= PF_VCPU; +} + +static inline void kvm_guest_exit(void) +{ + account_system_vtime(current); + current->flags &= ~PF_VCPU; +} + +static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + return slot - kvm->memslots; +} + +static inline gpa_t gfn_to_gpa(gfn_t gfn) +{ + return (gpa_t)gfn << PAGE_SHIFT; +} + +static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) +{ + set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); +} + +enum kvm_stat_kind { + KVM_STAT_VM, + KVM_STAT_VCPU, +}; + +struct kvm_stats_debugfs_item { + const char *name; + int offset; + enum kvm_stat_kind kind; + struct dentry *dentry; +}; +extern struct kvm_stats_debugfs_item debugfs_entries[]; + +#endif diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 3b292565a693..5497aac0d2f8 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -2,72 +2,30 @@ #define __LINUX_KVM_PARA_H /* - * Guest OS interface for KVM paravirtualization - * - * Note: this interface is totally experimental, and is certain to change - * as we make progress. + * This header file provides a method for making a hypercall to the host + * Architectures should define: + * - kvm_hypercall0, kvm_hypercall1... + * - kvm_arch_para_features + * - kvm_para_available */ -/* - * Per-VCPU descriptor area shared between guest and host. Writable to - * both guest and host. Registered with the host by the guest when - * a guest acknowledges paravirtual mode. - * - * NOTE: all addresses are guest-physical addresses (gpa), to make it - * easier for the hypervisor to map between the various addresses. - */ -struct kvm_vcpu_para_state { - /* - * API version information for compatibility. If there's any support - * mismatch (too old host trying to execute too new guest) then - * the host will deny entry into paravirtual mode. Any other - * combination (new host + old guest and new host + new guest) - * is supposed to work - new host versions will support all old - * guest API versions. - */ - u32 guest_version; - u32 host_version; - u32 size; - u32 ret; - - /* - * The address of the vm exit instruction (VMCALL or VMMCALL), - * which the host will patch according to the CPU model the - * VM runs on: - */ - u64 hypercall_gpa; - -} __attribute__ ((aligned(PAGE_SIZE))); - -#define KVM_PARA_API_VERSION 1 - -/* - * This is used for an RDMSR's ECX parameter to probe for a KVM host. - * Hopefully no CPU vendor will use up this number. This is placed well - * out of way of the typical space occupied by CPU vendors' MSR indices, - * and we think (or at least hope) it wont be occupied in the future - * either. - */ -#define MSR_KVM_API_MAGIC 0x87655678 +/* Return values for hypercalls */ +#define KVM_ENOSYS 1000 -#define KVM_EINVAL 1 +#define KVM_HC_VAPIC_POLL_IRQ 1 /* - * Hypercall calling convention: - * - * Each hypercall may have 0-6 parameters. - * - * 64-bit hypercall index is in RAX, goes from 0 to __NR_hypercalls-1 - * - * 64-bit parameters 1-6 are in the standard gcc x86_64 calling convention - * order: RDI, RSI, RDX, RCX, R8, R9. - * - * 32-bit index is EBX, parameters are: EAX, ECX, EDX, ESI, EDI, EBP. - * (the first 3 are according to the gcc regparm calling convention) - * - * No registers are clobbered by the hypercall, except that the - * return value is in RAX. + * hypercalls use architecture specific */ -#define __NR_hypercalls 0 +#include <asm/kvm_para.h> + +#ifdef __KERNEL__ +static inline int kvm_para_has_feature(unsigned int feature) +{ + if (kvm_arch_para_features() & (1UL << feature)) + return 1; + return 0; +} +#endif /* __KERNEL__ */ +#endif /* __LINUX_KVM_PARA_H */ -#endif diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h new file mode 100644 index 000000000000..1c4e46decb22 --- /dev/null +++ b/include/linux/kvm_types.h @@ -0,0 +1,54 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __KVM_TYPES_H__ +#define __KVM_TYPES_H__ + +#include <asm/types.h> + +/* + * Address types: + * + * gva - guest virtual address + * gpa - guest physical address + * gfn - guest frame number + * hva - host virtual address + * hpa - host physical address + * hfn - host frame number + */ + +typedef unsigned long gva_t; +typedef u64 gpa_t; +typedef unsigned long gfn_t; + +typedef unsigned long hva_t; +typedef u64 hpa_t; +typedef unsigned long hfn_t; + +struct kvm_pio_request { + unsigned long count; + int cur_count; + struct page *guest_pages[2]; + unsigned guest_page_offset; + int in; + int port; + int size; + int string; + int down; + int rep; +}; + +#endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h new file mode 100644 index 000000000000..901c2d6377a8 --- /dev/null +++ b/include/linux/latencytop.h @@ -0,0 +1,44 @@ +/* + * latencytop.h: Infrastructure for displaying latency + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + */ + +#ifndef _INCLUDE_GUARD_LATENCYTOP_H_ +#define _INCLUDE_GUARD_LATENCYTOP_H_ + +#ifdef CONFIG_LATENCYTOP + +#define LT_SAVECOUNT 32 +#define LT_BACKTRACEDEPTH 12 + +struct latency_record { + unsigned long backtrace[LT_BACKTRACEDEPTH]; + unsigned int count; + unsigned long time; + unsigned long max; +}; + + +struct task_struct; + +void account_scheduler_latency(struct task_struct *task, int usecs, int inter); + +void clear_all_latency_tracing(struct task_struct *p); + +#else + +static inline void +account_scheduler_latency(struct task_struct *task, int usecs, int inter) +{ +} + +static inline void clear_all_latency_tracing(struct task_struct *p) +{ +} + +#endif + +#endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 124033cb5e9b..4374c4277780 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -35,6 +35,7 @@ #include <linux/workqueue.h> #include <scsi/scsi_host.h> #include <linux/acpi.h> +#include <linux/cdrom.h> /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -143,10 +144,11 @@ enum { ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ - ATA_DFLAG_INIT_MASK = (1 << 16) - 1, + ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ + ATA_DFLAG_INIT_MASK = (1 << 24) - 1, - ATA_DFLAG_DETACH = (1 << 16), - ATA_DFLAG_DETACHED = (1 << 17), + ATA_DFLAG_DETACH = (1 << 24), + ATA_DFLAG_DETACHED = (1 << 25), ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ @@ -217,9 +219,7 @@ enum { /* struct ata_queued_cmd flags */ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ - ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */ - ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ - ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, + ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ @@ -266,19 +266,15 @@ enum { PORT_DISABLED = 2, /* encoding various smaller bitmaps into a single - * unsigned int bitmap + * unsigned long bitmap */ - ATA_BITS_PIO = 7, - ATA_BITS_MWDMA = 5, - ATA_BITS_UDMA = 8, + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, ATA_SHIFT_PIO = 0, - ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO, - ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA, - - ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO, - ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA, - ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA, + ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, + ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, /* size of buffer to pad xfers ending on unaligned boundaries */ ATA_DMA_PAD_SZ = 4, @@ -349,6 +345,21 @@ enum { ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ + + /* ATAPI command types */ + ATAPI_READ = 0, /* READs */ + ATAPI_WRITE = 1, /* WRITEs */ + ATAPI_READ_CD = 2, /* READ CD [MSF] */ + ATAPI_MISC = 3, /* the rest */ +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1) + << ATA_SHIFT_PIO, + ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1) + << ATA_SHIFT_MWDMA, + ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1) + << ATA_SHIFT_UDMA, }; enum hsm_task_states { @@ -447,7 +458,7 @@ struct ata_queued_cmd { unsigned int tag; unsigned int n_elem; unsigned int n_iter; - unsigned int orig_n_elem; + unsigned int mapped_n_elem; int dma_dir; @@ -455,17 +466,18 @@ struct ata_queued_cmd { unsigned int sect_size; unsigned int nbytes; + unsigned int raw_nbytes; unsigned int curbytes; struct scatterlist *cursg; unsigned int cursg_ofs; + struct scatterlist *last_sg; + struct scatterlist saved_last_sg; struct scatterlist sgent; - struct scatterlist pad_sgent; - void *buf_virt; + struct scatterlist extra_sg[2]; - /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ - struct scatterlist *__sg; + struct scatterlist *sg; unsigned int err_mask; struct ata_taskfile result_tf; @@ -482,7 +494,7 @@ struct ata_port_stats { }; struct ata_ering_entry { - int is_io; + unsigned int eflags; unsigned int err_mask; u64 timestamp; }; @@ -522,9 +534,9 @@ struct ata_device { unsigned int cdb_len; /* per-dev xfer mask */ - unsigned int pio_mask; - unsigned int mwdma_mask; - unsigned int udma_mask; + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; /* for CHS addressing */ u16 cylinders; /* Number of cylinders */ @@ -560,6 +572,8 @@ struct ata_eh_context { int tries[ATA_MAX_DEVICES]; unsigned int classes[ATA_MAX_DEVICES]; unsigned int did_probe_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[ATA_MAX_DEVICES]; }; struct ata_acpi_drive @@ -686,7 +700,8 @@ struct ata_port_operations { void (*bmdma_setup) (struct ata_queued_cmd *qc); void (*bmdma_start) (struct ata_queued_cmd *qc); - void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); + unsigned int (*data_xfer) (struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw); int (*qc_defer) (struct ata_queued_cmd *qc); void (*qc_prep) (struct ata_queued_cmd *qc); @@ -832,8 +847,6 @@ extern int ata_busy_sleep(struct ata_port *ap, unsigned long timeout_pat, unsigned long timeout); extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline); extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); -extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, - void *data, unsigned long delay); extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, unsigned long interval_msec, unsigned long timeout_msec); @@ -848,6 +861,16 @@ extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis); extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); +extern unsigned long ata_pack_xfermask(unsigned long pio_mask, + unsigned long mwdma_mask, unsigned long udma_mask); +extern void ata_unpack_xfermask(unsigned long xfer_mask, + unsigned long *pio_mask, unsigned long *mwdma_mask, + unsigned long *udma_mask); +extern u8 ata_xfer_mask2mode(unsigned long xfer_mask); +extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); +extern int ata_xfer_mode2shift(unsigned long xfer_mode); +extern const char *ata_mode_string(unsigned long xfer_mask); +extern unsigned long ata_id_xfermask(const u16 *id); extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device); extern void ata_std_dev_select(struct ata_port *ap, unsigned int device); extern u8 ata_check_status(struct ata_port *ap); @@ -856,17 +879,15 @@ extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) extern int ata_port_start(struct ata_port *ap); extern int ata_sff_port_start(struct ata_port *ap); extern irqreturn_t ata_interrupt(int irq, void *dev_instance); -extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf, - unsigned int buflen, int write_data); -extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, - unsigned int buflen, int write_data); +extern unsigned int ata_data_xfer(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); +extern unsigned int ata_data_xfer_noirq(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc); extern void ata_qc_prep(struct ata_queued_cmd *qc); extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); -extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, - unsigned int buflen); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); @@ -875,7 +896,6 @@ extern void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); -extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown); extern void ata_bmdma_setup(struct ata_queued_cmd *qc); extern void ata_bmdma_start(struct ata_queued_cmd *qc); extern void ata_bmdma_stop(struct ata_queued_cmd *qc); @@ -910,6 +930,7 @@ extern u8 ata_irq_on(struct ata_port *ap); extern int ata_cable_40wire(struct ata_port *ap); extern int ata_cable_80wire(struct ata_port *ap); extern int ata_cable_sata(struct ata_port *ap); +extern int ata_cable_ignore(struct ata_port *ap); extern int ata_cable_unknown(struct ata_port *ap); /* @@ -917,11 +938,13 @@ extern int ata_cable_unknown(struct ata_port *ap); */ extern unsigned int ata_pio_need_iordy(const struct ata_device *); +extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); extern int ata_timing_compute(struct ata_device *, unsigned short, struct ata_timing *, int, int); extern void ata_timing_merge(const struct ata_timing *, const struct ata_timing *, struct ata_timing *, unsigned int); +extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); enum { ATA_TIMING_SETUP = (1 << 0), @@ -948,15 +971,40 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) return &ap->__acpi_init_gtm; return NULL; } -extern int ata_acpi_cbl_80wire(struct ata_port *ap); int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); +unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm); +int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); #else static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) { return NULL; } -static inline int ata_acpi_cbl_80wire(struct ata_port *ap) { return 0; } + +static inline int ata_acpi_stm(const struct ata_port *ap, + struct ata_acpi_gtm *stm) +{ + return -ENOSYS; +} + +static inline int ata_acpi_gtm(const struct ata_port *ap, + struct ata_acpi_gtm *stm) +{ + return -ENOSYS; +} + +static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm) +{ + return 0; +} + +static inline int ata_acpi_cbl_80wire(struct ata_port *ap, + const struct ata_acpi_gtm *gtm) +{ + return 0; +} #endif #ifdef CONFIG_PCI @@ -985,8 +1033,12 @@ extern int ata_pci_init_bmdma(struct ata_host *host); extern int ata_pci_prepare_sff_host(struct pci_dev *pdev, const struct ata_port_info * const * ppi, struct ata_host **r_host); +extern int ata_pci_activate_sff_host(struct ata_host *host, + irq_handler_t irq_handler, + struct scsi_host_template *sht); extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); -extern unsigned long ata_pci_default_filter(struct ata_device *, unsigned long); +extern unsigned long ata_pci_default_filter(struct ata_device *dev, + unsigned long xfer_mask); #endif /* CONFIG_PCI */ /* @@ -1074,35 +1126,6 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name); #endif -/* - * qc helpers - */ -static inline struct scatterlist * -ata_qc_first_sg(struct ata_queued_cmd *qc) -{ - qc->n_iter = 0; - if (qc->n_elem) - return qc->__sg; - if (qc->pad_len) - return &qc->pad_sgent; - return NULL; -} - -static inline struct scatterlist * -ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) -{ - if (sg == &qc->pad_sgent) - return NULL; - if (++qc->n_iter < qc->n_elem) - return sg_next(sg); - if (qc->pad_len) - return &qc->pad_sgent; - return NULL; -} - -#define ata_for_each_sg(sg, qc) \ - for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc)) - static inline unsigned int ata_tag_valid(unsigned int tag) { return (tag < ATA_MAX_QUEUE) ? 1 : 0; @@ -1337,15 +1360,17 @@ static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) static inline void ata_qc_reinit(struct ata_queued_cmd *qc) { qc->dma_dir = DMA_NONE; - qc->__sg = NULL; + qc->sg = NULL; qc->flags = 0; qc->cursg = NULL; qc->cursg_ofs = 0; - qc->nbytes = qc->curbytes = 0; + qc->nbytes = qc->raw_nbytes = qc->curbytes = 0; qc->n_elem = 0; + qc->mapped_n_elem = 0; qc->n_iter = 0; qc->err_mask = 0; qc->pad_len = 0; + qc->last_sg = NULL; qc->sect_size = ATA_SECT_SIZE; ata_tf_init(qc->dev, &qc->tf); @@ -1362,6 +1387,27 @@ static inline int ata_try_flush_cache(const struct ata_device *dev) ata_id_has_flush_ext(dev->id); } +static inline int atapi_cmd_type(u8 opcode) +{ + switch (opcode) { + case GPCMD_READ_10: + case GPCMD_READ_12: + return ATAPI_READ; + + case GPCMD_WRITE_10: + case GPCMD_WRITE_12: + case GPCMD_WRITE_AND_VERIFY_10: + return ATAPI_WRITE; + + case GPCMD_READ_CD: + case GPCMD_READ_CD_MSF: + return ATAPI_READ_CD; + + default: + return ATAPI_MISC; + } +} + static inline unsigned int ac_err_mask(u8 status) { if (status & (ATA_BUSY | ATA_DRQ)) diff --git a/include/linux/linkage.h b/include/linux/linkage.h index ff203dd02919..3faf599ea58e 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -13,6 +13,10 @@ #define asmlinkage CPP_ASMLINKAGE #endif +#ifndef asmregparm +# define asmregparm +#endif + #ifndef prevent_tail_call # define prevent_tail_call(ret) do { } while (0) #endif @@ -53,6 +57,10 @@ .size name, .-name #endif +/* If symbol 'name' is treated as a subroutine (gets called, and returns) + * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of + * static analysis tools such as stack depth analyzer. + */ #ifndef ENDPROC #define ENDPROC(name) \ .type name, @function; \ diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 6f1637c61e10..3d25bcd139d1 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -33,9 +33,26 @@ struct nlmsvc_binding { extern struct nlmsvc_binding * nlmsvc_ops; /* + * Similar to nfs_client_initdata, but without the NFS-specific + * rpc_ops field. + */ +struct nlmclnt_initdata { + const char *hostname; + const struct sockaddr *address; + size_t addrlen; + unsigned short protocol; + u32 nfs_version; +}; + +/* * Functions exported by the lockd module */ -extern int nlmclnt_proc(struct inode *, int, struct file_lock *); + +extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); +extern void nlmclnt_done(struct nlm_host *host); + +extern int nlmclnt_proc(struct nlm_host *host, int cmd, + struct file_lock *fl); extern int lockd_up(int proto); extern void lockd_down(void); diff --git a/include/linux/m41t00.h b/include/linux/m41t00.h deleted file mode 100644 index b423360ca38e..000000000000 --- a/include/linux/m41t00.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Definitions for the ST M41T00 family of i2c rtc chips. - * - * Author: Mark A. Greer <mgreer@mvista.com> - * - * 2005, 2006 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#ifndef _M41T00_H -#define _M41T00_H - -#define M41T00_DRV_NAME "m41t00" -#define M41T00_I2C_ADDR 0x68 - -#define M41T00_TYPE_M41T00 0 -#define M41T00_TYPE_M41T81 81 -#define M41T00_TYPE_M41T85 85 - -struct m41t00_platform_data { - u8 type; - u8 i2c_addr; - u8 sqw_freq; -}; - -/* SQW output disabled, this is default value by power on */ -#define M41T00_SQW_DISABLE (0) - -#define M41T00_SQW_32KHZ (1<<4) /* 32.768 KHz */ -#define M41T00_SQW_8KHZ (2<<4) /* 8.192 KHz */ -#define M41T00_SQW_4KHZ (3<<4) /* 4.096 KHz */ -#define M41T00_SQW_2KHZ (4<<4) /* 2.048 KHz */ -#define M41T00_SQW_1KHZ (5<<4) /* 1.024 KHz */ -#define M41T00_SQW_512HZ (6<<4) /* 512 Hz */ -#define M41T00_SQW_256HZ (7<<4) /* 256 Hz */ -#define M41T00_SQW_128HZ (8<<4) /* 128 Hz */ -#define M41T00_SQW_64HZ (9<<4) /* 64 Hz */ -#define M41T00_SQW_32HZ (10<<4) /* 32 Hz */ -#define M41T00_SQW_16HZ (11<<4) /* 16 Hz */ -#define M41T00_SQW_8HZ (12<<4) /* 8 Hz */ -#define M41T00_SQW_4HZ (13<<4) /* 4 Hz */ -#define M41T00_SQW_2HZ (14<<4) /* 2 Hz */ -#define M41T00_SQW_1HZ (15<<4) /* 1 Hz */ - -extern ulong m41t00_get_rtc_time(void); -extern int m41t00_set_rtc_time(ulong nowtime); - -#endif /* _M41T00_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 1b7b95c67aca..1bba6789a50a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -12,7 +12,6 @@ #include <linux/prio_tree.h> #include <linux/debug_locks.h> #include <linux/mm_types.h> -#include <linux/security.h> struct mempolicy; struct anon_vma; @@ -34,6 +33,8 @@ extern int sysctl_legacy_va_layout; #define sysctl_legacy_va_layout 0 #endif +extern unsigned long mmap_min_addr; + #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -1117,9 +1118,21 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ -#ifndef CONFIG_DEBUG_PAGEALLOC +#ifdef CONFIG_DEBUG_PAGEALLOC +extern int debug_pagealloc_enabled; + +extern void kernel_map_pages(struct page *page, int numpages, int enable); + +static inline void enable_debug_pagealloc(void) +{ + debug_pagealloc_enabled = 1; +} +#else static inline void kernel_map_pages(struct page *page, int numpages, int enable) {} +static inline void enable_debug_pagealloc(void) +{ +} #endif extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); @@ -1145,6 +1158,7 @@ extern int randomize_va_space; #endif const char * arch_vma_name(struct vm_area_struct *vma); +void print_vma_addr(char *prefix, unsigned long rip); struct page *sparse_mem_map_populate(unsigned long pnum, int nid); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); diff --git a/include/linux/module.h b/include/linux/module.h index 2cbc0b87e329..ac481e2094fd 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -178,7 +178,7 @@ void *__symbol_get_gpl(const char *symbol); #define __CRC_SYMBOL(sym, sec) \ extern void *__crc_##sym __attribute__((weak)); \ static const unsigned long __kcrctab_##sym \ - __attribute_used__ \ + __used \ __attribute__((section("__kcrctab" sec), unused)) \ = (unsigned long) &__crc_##sym; #else @@ -193,7 +193,7 @@ void *__symbol_get_gpl(const char *symbol); __attribute__((section("__ksymtab_strings"))) \ = MODULE_SYMBOL_PREFIX #sym; \ static const struct kernel_symbol __ksymtab_##sym \ - __attribute_used__ \ + __used \ __attribute__((section("__ksymtab" sec), unused)) \ = { (unsigned long)&sym, __kstrtab_##sym } @@ -446,11 +446,14 @@ static inline void __module_get(struct module *module) __mod ? __mod->name : "kernel"; \ }) -/* For kallsyms to ask for address resolution. NULL means not found. */ -const char *module_address_lookup(unsigned long addr, - unsigned long *symbolsize, - unsigned long *offset, - char **modname); +/* For kallsyms to ask for address resolution. namebuf should be at + * least KSYM_NAME_LEN long: a pointer to namebuf is returned if + * found, otherwise NULL. */ +char *module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, + char *namebuf); int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -516,10 +519,11 @@ static inline void module_put(struct module *module) #define module_name(mod) "kernel" /* For kallsyms to ask for address resolution. NULL means not found. */ -static inline const char *module_address_lookup(unsigned long addr, - unsigned long *symbolsize, - unsigned long *offset, - char **modname) +static inline char *module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, + char *namebuf) { return NULL; } @@ -574,7 +578,9 @@ struct device_driver; #ifdef CONFIG_SYSFS struct module; -extern struct kset module_subsys; +extern struct kset *module_kset; +extern struct kobj_type module_ktype; +extern int module_sysfs_initialized; int mod_sysfs_init(struct module *mod); int mod_sysfs_setup(struct module *mod, @@ -607,21 +613,6 @@ static inline void module_remove_modinfo_attrs(struct module *mod) #endif /* CONFIG_SYSFS */ -#if defined(CONFIG_SYSFS) && defined(CONFIG_MODULES) - -void module_add_driver(struct module *mod, struct device_driver *drv); -void module_remove_driver(struct device_driver *drv); - -#else /* not both CONFIG_SYSFS && CONFIG_MODULES */ - -static inline void module_add_driver(struct module *mod, struct device_driver *drv) -{ } - -static inline void module_remove_driver(struct device_driver *drv) -{ } - -#endif - #define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x) /* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 13410b20600f..8126e55c5bdc 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -18,7 +18,7 @@ #define __module_cat(a,b) ___module_cat(a,b) #define __MODULE_INFO(tag, name, info) \ static const char __module_cat(name,__LINE__)[] \ - __attribute_used__ \ + __used \ __attribute__((section(".modinfo"),unused)) = __stringify(tag) "=" info #else /* !MODULE */ #define __MODULE_INFO(tag, name, info) @@ -72,7 +72,7 @@ struct kparam_array BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \ static const char __param_str_##name[] = prefix #name; \ static struct kernel_param const __param_##name \ - __attribute_used__ \ + __used \ __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ = { __param_str_##name, perm, set, get, { arg } } diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index d2ae6185f03b..69327b7b4ce4 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -15,6 +15,7 @@ #include <asm/types.h> #include <linux/mv643xx_eth.h> +#include <linux/mv643xx_i2c.h> /****************************************/ /* Processor Address Space */ @@ -863,7 +864,6 @@ /* I2C Registers */ /****************************************/ -#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c" #define MV64XXX_I2C_OFFSET 0xc000 #define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020 @@ -968,14 +968,6 @@ struct mpsc_pdata { u32 brg_clk_freq; }; -/* i2c Platform Device, Driver Data */ -struct mv64xxx_i2c_pdata { - u32 freq_m; - u32 freq_n; - u32 timeout; /* In milliseconds */ - u32 retries; -}; - /* Watchdog Platform Device, Driver Data */ #define MV64x60_WDT_NAME "mv64x60_wdt" diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h new file mode 100644 index 000000000000..5db5152e9de5 --- /dev/null +++ b/include/linux/mv643xx_i2c.h @@ -0,0 +1,22 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _MV64XXX_I2C_H_ +#define _MV64XXX_I2C_H_ + +#include <linux/types.h> + +#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c" + +/* i2c Platform Device, Driver Data */ +struct mv64xxx_i2c_pdata { + u32 freq_m; + u32 freq_n; + u32 timeout; /* In milliseconds */ +}; + +#endif /*_MV64XXX_I2C_H_*/ diff --git a/include/linux/net.h b/include/linux/net.h index 596131ea46f4..c414d90e647b 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -22,6 +22,7 @@ #include <asm/socket.h> struct poll_table_struct; +struct pipe_inode_info; struct inode; struct net; @@ -172,6 +173,8 @@ struct proto_ops { struct vm_area_struct * vma); ssize_t (*sendpage) (struct socket *sock, struct page *page, int offset, size_t size, int flags); + ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, unsigned int flags); }; struct net_proto_family { @@ -183,6 +186,13 @@ struct net_proto_family { struct iovec; struct kvec; +enum { + SOCK_WAKE_IO, + SOCK_WAKE_WAITD, + SOCK_WAKE_SPACE, + SOCK_WAKE_URG, +}; + extern int sock_wake_async(struct socket *sk, int how, int band); extern int sock_register(const struct net_proto_family *fam); extern void sock_unregister(int family); @@ -327,7 +337,6 @@ static const struct proto_ops name##_ops = { \ #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> -extern ctl_table net_table[]; extern int net_msg_cost; extern int net_msg_burst; #endif diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 16adac688af5..d74e79bacd2d 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -7,6 +7,8 @@ #include <linux/skbuff.h> #include <linux/net.h> #include <linux/if.h> +#include <linux/in.h> +#include <linux/in6.h> #include <linux/wait.h> #include <linux/list.h> #endif @@ -39,6 +41,23 @@ #define NFC_ALTERED 0x8000 #endif +enum nf_inet_hooks { + NF_INET_PRE_ROUTING, + NF_INET_LOCAL_IN, + NF_INET_FORWARD, + NF_INET_LOCAL_OUT, + NF_INET_POST_ROUTING, + NF_INET_NUMHOOKS +}; + +union nf_inet_addr { + u_int32_t all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + #ifdef __KERNEL__ #ifdef CONFIG_NETFILTER @@ -92,19 +111,6 @@ struct nf_sockopt_ops struct module *owner; }; -/* Each queued (to userspace) skbuff has one of these. */ -struct nf_info -{ - /* The ops struct which sent us to userspace. */ - struct nf_hook_ops *elem; - - /* If we're sent to userspace, this keeps housekeeping info */ - int pf; - unsigned int hook; - struct net_device *indev, *outdev; - int (*okfn)(struct sk_buff *); -}; - /* Function to register/unregister hook points. */ int nf_register_hook(struct nf_hook_ops *reg); void nf_unregister_hook(struct nf_hook_ops *reg); @@ -118,71 +124,12 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); #ifdef CONFIG_SYSCTL /* Sysctl registration */ -struct ctl_table_header *nf_register_sysctl_table(struct ctl_table *path, - struct ctl_table *table); -void nf_unregister_sysctl_table(struct ctl_table_header *header, - struct ctl_table *table); -extern struct ctl_table nf_net_netfilter_sysctl_path[]; -extern struct ctl_table nf_net_ipv4_netfilter_sysctl_path[]; +extern struct ctl_path nf_net_netfilter_sysctl_path[]; +extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; #endif /* CONFIG_SYSCTL */ extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS]; -/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will - * disappear once iptables is replaced with pkttables. Please DO NOT use them - * for any new code! */ -#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ -#define NF_LOG_TCPOPT 0x02 /* Log TCP options */ -#define NF_LOG_IPOPT 0x04 /* Log IP options */ -#define NF_LOG_UID 0x08 /* Log UID owning local socket */ -#define NF_LOG_MASK 0x0f - -#define NF_LOG_TYPE_LOG 0x01 -#define NF_LOG_TYPE_ULOG 0x02 - -struct nf_loginfo { - u_int8_t type; - union { - struct { - u_int32_t copy_len; - u_int16_t group; - u_int16_t qthreshold; - } ulog; - struct { - u_int8_t level; - u_int8_t logflags; - } log; - } u; -}; - -typedef void nf_logfn(unsigned int pf, - unsigned int hooknum, - const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct nf_loginfo *li, - const char *prefix); - -struct nf_logger { - struct module *me; - nf_logfn *logfn; - char *name; -}; - -/* Function to register/unregister log function. */ -int nf_log_register(int pf, struct nf_logger *logger); -void nf_log_unregister(struct nf_logger *logger); -void nf_log_unregister_pf(int pf); - -/* Calls the registered backend logging function */ -void nf_log_packet(int pf, - unsigned int hooknum, - const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - struct nf_loginfo *li, - const char *fmt, ...); - int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *), int thresh); @@ -265,65 +212,28 @@ int compat_nf_setsockopt(struct sock *sk, int pf, int optval, int compat_nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt, int *len); -/* Packet queuing */ -struct nf_queue_handler { - int (*outfn)(struct sk_buff *skb, struct nf_info *info, - unsigned int queuenum, void *data); - void *data; - char *name; -}; -extern int nf_register_queue_handler(int pf, - struct nf_queue_handler *qh); -extern int nf_unregister_queue_handler(int pf, - struct nf_queue_handler *qh); -extern void nf_unregister_queue_handlers(struct nf_queue_handler *qh); -extern void nf_reinject(struct sk_buff *skb, - struct nf_info *info, - unsigned int verdict); - -/* FIXME: Before cache is ever used, this must be implemented for real. */ -extern void nf_invalidate_cache(int pf); - /* Call this before modifying an existing packet: ensures it is modifiable and linear to the point you care about (writable_len). Returns true or false. */ extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); -static inline void nf_csum_replace4(__sum16 *sum, __be32 from, __be32 to) -{ - __be32 diff[] = { ~from, to }; - - *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(*sum))); -} - -static inline void nf_csum_replace2(__sum16 *sum, __be16 from, __be16 to) -{ - nf_csum_replace4(sum, (__force __be32)from, (__force __be32)to); -} - -extern void nf_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, - __be32 from, __be32 to, int pseudohdr); - -static inline void nf_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, - __be16 from, __be16 to, int pseudohdr) -{ - nf_proto_csum_replace4(sum, skb, (__force __be32)from, - (__force __be32)to, pseudohdr); -} +struct flowi; +struct nf_queue_entry; struct nf_afinfo { unsigned short family; __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); + int (*route)(struct dst_entry **dst, struct flowi *fl); void (*saveroute)(const struct sk_buff *skb, - struct nf_info *info); + struct nf_queue_entry *entry); int (*reroute)(struct sk_buff *skb, - const struct nf_info *info); + const struct nf_queue_entry *entry); int route_key_size; }; -extern struct nf_afinfo *nf_afinfo[]; -static inline struct nf_afinfo *nf_get_afinfo(unsigned short family) +extern const struct nf_afinfo *nf_afinfo[NPROTO]; +static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) { return rcu_dereference(nf_afinfo[family]); } @@ -332,7 +242,7 @@ static inline __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family) { - struct nf_afinfo *afinfo; + const struct nf_afinfo *afinfo; __sum16 csum = 0; rcu_read_lock(); @@ -343,10 +253,8 @@ nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, return csum; } -extern int nf_register_afinfo(struct nf_afinfo *afinfo); -extern void nf_unregister_afinfo(struct nf_afinfo *afinfo); - -#define nf_info_reroute(x) ((void *)x + sizeof(struct nf_info)) +extern int nf_register_afinfo(const struct nf_afinfo *afinfo); +extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); #include <net/flow.h> extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); @@ -354,11 +262,16 @@ extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) { -#if defined(CONFIG_IP_NF_NAT_NEEDED) || defined(CONFIG_NF_NAT_NEEDED) +#ifdef CONFIG_NF_NAT_NEEDED void (*decodefn)(struct sk_buff *, struct flowi *); - if (family == AF_INET && (decodefn = ip_nat_decode_session) != NULL) - decodefn(skb, fl); + if (family == AF_INET) { + rcu_read_lock(); + decodefn = rcu_dereference(ip_nat_decode_session); + if (decodefn) + decodefn(skb, fl); + rcu_read_unlock(); + } #endif } diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index b87e83a5e070..91fef0cae42f 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -10,6 +10,7 @@ header-y += xt_DSCP.h header-y += xt_MARK.h header-y += xt_NFLOG.h header-y += xt_NFQUEUE.h +header-y += xt_RATEEST.h header-y += xt_SECMARK.h header-y += xt_TCPMSS.h header-y += xt_comment.h @@ -20,14 +21,17 @@ header-y += xt_dccp.h header-y += xt_dscp.h header-y += xt_esp.h header-y += xt_hashlimit.h +header-y += xt_iprange.h header-y += xt_helper.h header-y += xt_length.h header-y += xt_limit.h header-y += xt_mac.h header-y += xt_mark.h header-y += xt_multiport.h +header-y += xt_owner.h header-y += xt_pkttype.h header-y += xt_policy.h +header-y += xt_rateest.h header-y += xt_realm.h header-y += xt_sctp.h header-y += xt_state.h diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 9e0dae07861e..bad1eb760f61 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -129,6 +129,14 @@ enum ip_conntrack_events /* Mark is set */ IPCT_MARK_BIT = 12, IPCT_MARK = (1 << IPCT_MARK_BIT), + + /* NAT sequence adjustment */ + IPCT_NATSEQADJ_BIT = 13, + IPCT_NATSEQADJ = (1 << IPCT_NATSEQADJ_BIT), + + /* Secmark is set */ + IPCT_SECMARK_BIT = 14, + IPCT_SECMARK = (1 << IPCT_SECMARK_BIT), }; enum ip_conntrack_expect_events { diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h index aabd24ac7631..26f9226ea72b 100644 --- a/include/linux/netfilter/nf_conntrack_h323.h +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -31,7 +31,7 @@ struct nf_conn; extern int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, - union nf_conntrack_address *addr, __be16 *port); + union nf_inet_addr *addr, __be16 *port); extern void nf_conntrack_h245_expect(struct nf_conn *new, struct nf_conntrack_expect *this); extern void nf_conntrack_q931_expect(struct nf_conn *new, @@ -39,12 +39,12 @@ extern void nf_conntrack_q931_expect(struct nf_conn *new, extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned char **data, int dataoff, H245_TransportAddress *taddr, - union nf_conntrack_address *addr, + union nf_inet_addr *addr, __be16 port); extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned char **data, int dataoff, TransportAddress *taddr, - union nf_conntrack_address *addr, + union nf_inet_addr *addr, __be16 port); extern int (*set_sig_addr_hook) (struct sk_buff *skb, struct nf_conn *ct, diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 5cf2c115cce4..768f78c4ac53 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -21,7 +21,6 @@ struct ip_ct_sctp enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; - u_int32_t ttag[IP_CT_DIR_MAX]; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 4affa3fe78e0..e3e1533aba2d 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -37,6 +37,9 @@ enum ctattr_type { CTA_ID, CTA_NAT_DST, CTA_TUPLE_MASTER, + CTA_NAT_SEQ_ADJ_ORIG, + CTA_NAT_SEQ_ADJ_REPLY, + CTA_SECMARK, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) @@ -119,6 +122,14 @@ enum ctattr_protonat { }; #define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1) +enum ctattr_natseq { + CTA_NAT_SEQ_CORRECTION_POS, + CTA_NAT_SEQ_OFFSET_BEFORE, + CTA_NAT_SEQ_OFFSET_AFTER, + __CTA_NAT_SEQ_MAX +}; +#define CTA_NAT_SEQ_MAX (__CTA_NAT_SEQ_MAX - 1) + enum ctattr_expect { CTA_EXPECT_UNSPEC, CTA_EXPECT_MASTER, diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h index 5966afa026e9..a85721332924 100644 --- a/include/linux/netfilter/nfnetlink_log.h +++ b/include/linux/netfilter/nfnetlink_log.h @@ -47,6 +47,7 @@ enum nfulnl_attr_type { NFULA_UID, /* user id of socket */ NFULA_SEQ, /* instance-local sequence number */ NFULA_SEQ_GLOBAL, /* global sequence number */ + NFULA_GID, /* group id of socket */ __NFULA_MAX }; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 03e6ce979eaa..b99ede51318a 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -126,6 +126,49 @@ struct xt_counters_info #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ +/* fn returns 0 to continue iteration */ +#define XT_MATCH_ITERATE(type, e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct xt_entry_match *__m; \ + \ + for (__i = sizeof(type); \ + __i < (e)->target_offset; \ + __i += __m->u.match_size) { \ + __m = (void *)e + __i; \ + \ + __ret = fn(__m , ## args); \ + if (__ret != 0) \ + break; \ + } \ + __ret; \ +}) + +/* fn returns 0 to continue iteration */ +#define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ +({ \ + unsigned int __i, __n; \ + int __ret = 0; \ + type *__entry; \ + \ + for (__i = 0, __n = 0; __i < (size); \ + __i += __entry->next_offset, __n++) { \ + __entry = (void *)(entries) + __i; \ + if (__n < n) \ + continue; \ + \ + __ret = fn(__entry , ## args); \ + if (__ret != 0) \ + break; \ + } \ + __ret; \ +}) + +/* fn returns 0 to continue iteration */ +#define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ + XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) + #ifdef __KERNEL__ #include <linux/netdevice.h> @@ -265,13 +308,16 @@ struct xt_table_info unsigned int initial_entries; /* Entry points and underflows */ - unsigned int hook_entry[NF_IP_NUMHOOKS]; - unsigned int underflow[NF_IP_NUMHOOKS]; + unsigned int hook_entry[NF_INET_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; /* ipt_entry tables: one per CPU */ - char *entries[NR_CPUS]; + /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ + char *entries[1]; }; +#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ + + nr_cpu_ids * sizeof(char *)) extern int xt_register_target(struct xt_target *target); extern void xt_unregister_target(struct xt_target *target); extern int xt_register_targets(struct xt_target *target, unsigned int n); @@ -378,9 +424,13 @@ struct compat_xt_counters_info extern void xt_compat_lock(int af); extern void xt_compat_unlock(int af); +extern int xt_compat_add_offset(int af, unsigned int offset, short delta); +extern void xt_compat_flush_offsets(int af); +extern short xt_compat_calc_jump(int af, unsigned int offset); + extern int xt_compat_match_offset(struct xt_match *match); -extern void xt_compat_match_from_user(struct xt_entry_match *m, - void **dstptr, int *size); +extern int xt_compat_match_from_user(struct xt_entry_match *m, + void **dstptr, int *size); extern int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, int *size); diff --git a/include/linux/netfilter/xt_CONNMARK.h b/include/linux/netfilter/xt_CONNMARK.h index 9f744689fffc..4e58ba43c289 100644 --- a/include/linux/netfilter/xt_CONNMARK.h +++ b/include/linux/netfilter/xt_CONNMARK.h @@ -22,4 +22,9 @@ struct xt_connmark_target_info { u_int8_t mode; }; +struct xt_connmark_tginfo1 { + u_int32_t ctmark, ctmask, nfmask; + u_int8_t mode; +}; + #endif /*_XT_CONNMARK_H_target*/ diff --git a/include/linux/netfilter/xt_DSCP.h b/include/linux/netfilter/xt_DSCP.h index 3c7c963997bd..14da1968e2c6 100644 --- a/include/linux/netfilter/xt_DSCP.h +++ b/include/linux/netfilter/xt_DSCP.h @@ -17,4 +17,9 @@ struct xt_DSCP_info { u_int8_t dscp; }; +struct xt_tos_target_info { + u_int8_t tos_value; + u_int8_t tos_mask; +}; + #endif /* _XT_DSCP_TARGET_H */ diff --git a/include/linux/netfilter/xt_MARK.h b/include/linux/netfilter/xt_MARK.h index b021e93ee5d6..778b278fd9f2 100644 --- a/include/linux/netfilter/xt_MARK.h +++ b/include/linux/netfilter/xt_MARK.h @@ -18,4 +18,8 @@ struct xt_mark_target_info_v1 { u_int8_t mode; }; +struct xt_mark_tginfo2 { + u_int32_t mark, mask; +}; + #endif /*_XT_MARK_H_target */ diff --git a/include/linux/netfilter/xt_RATEEST.h b/include/linux/netfilter/xt_RATEEST.h new file mode 100644 index 000000000000..f79e3133cbea --- /dev/null +++ b/include/linux/netfilter/xt_RATEEST.h @@ -0,0 +1,13 @@ +#ifndef _XT_RATEEST_TARGET_H +#define _XT_RATEEST_TARGET_H + +struct xt_rateest_target_info { + char name[IFNAMSIZ]; + int8_t interval; + u_int8_t ewma_log; + + /* Used internally by the kernel */ + struct xt_rateest *est __attribute__((aligned(8))); +}; + +#endif /* _XT_RATEEST_TARGET_H */ diff --git a/include/linux/netfilter/xt_TCPOPTSTRIP.h b/include/linux/netfilter/xt_TCPOPTSTRIP.h new file mode 100644 index 000000000000..2db543214ff5 --- /dev/null +++ b/include/linux/netfilter/xt_TCPOPTSTRIP.h @@ -0,0 +1,13 @@ +#ifndef _XT_TCPOPTSTRIP_H +#define _XT_TCPOPTSTRIP_H + +#define tcpoptstrip_set_bit(bmap, idx) \ + (bmap[(idx) >> 5] |= 1U << (idx & 31)) +#define tcpoptstrip_test_bit(bmap, idx) \ + (((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0) + +struct xt_tcpoptstrip_target_info { + u_int32_t strip_bmap[8]; +}; + +#endif /* _XT_TCPOPTSTRIP_H */ diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h index 37e933c9987d..7e3284bcbd2b 100644 --- a/include/linux/netfilter/xt_connlimit.h +++ b/include/linux/netfilter/xt_connlimit.h @@ -5,12 +5,17 @@ struct xt_connlimit_data; struct xt_connlimit_info { union { - __be32 v4_mask; - __be32 v6_mask[4]; + union nf_inet_addr mask; +#ifndef __KERNEL__ + union { + __be32 v4_mask; + __be32 v6_mask[4]; + }; +#endif }; unsigned int limit, inverse; - /* this needs to be at the end */ + /* Used internally by the kernel */ struct xt_connlimit_data *data __attribute__((aligned(8))); }; diff --git a/include/linux/netfilter/xt_connmark.h b/include/linux/netfilter/xt_connmark.h index c592f6ae0883..359ef86918dc 100644 --- a/include/linux/netfilter/xt_connmark.h +++ b/include/linux/netfilter/xt_connmark.h @@ -15,4 +15,9 @@ struct xt_connmark_info { u_int8_t invert; }; +struct xt_connmark_mtinfo1 { + u_int32_t mark, mask; + u_int8_t invert; +}; + #endif /*_XT_CONNMARK_H*/ diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h index 70b6f718cf4c..d2492a3329be 100644 --- a/include/linux/netfilter/xt_conntrack.h +++ b/include/linux/netfilter/xt_conntrack.h @@ -6,7 +6,9 @@ #define _XT_CONNTRACK_H #include <linux/netfilter/nf_conntrack_tuple_common.h> -#include <linux/in.h> +#ifdef __KERNEL__ +# include <linux/in.h> +#endif #define XT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1)) #define XT_CONNTRACK_STATE_INVALID (1 << 0) @@ -60,4 +62,16 @@ struct xt_conntrack_info /* Inverse flags */ u_int8_t invflags; }; + +struct xt_conntrack_mtinfo1 { + union nf_inet_addr origsrc_addr, origsrc_mask; + union nf_inet_addr origdst_addr, origdst_mask; + union nf_inet_addr replsrc_addr, replsrc_mask; + union nf_inet_addr repldst_addr, repldst_mask; + u_int32_t expires_min, expires_max; + u_int16_t l4proto; + u_int8_t state_mask, status_mask; + u_int8_t match_flags, invert_flags; +}; + #endif /*_XT_CONNTRACK_H*/ diff --git a/include/linux/netfilter/xt_dscp.h b/include/linux/netfilter/xt_dscp.h index 1da61e6acaf7..f49bc1a648dc 100644 --- a/include/linux/netfilter/xt_dscp.h +++ b/include/linux/netfilter/xt_dscp.h @@ -20,4 +20,10 @@ struct xt_dscp_info { u_int8_t invert; }; +struct xt_tos_match_info { + u_int8_t tos_mask; + u_int8_t tos_value; + u_int8_t invert; +}; + #endif /* _XT_DSCP_H */ diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h index b4556b8edbfd..c19972e4564d 100644 --- a/include/linux/netfilter/xt_hashlimit.h +++ b/include/linux/netfilter/xt_hashlimit.h @@ -29,9 +29,9 @@ struct hashlimit_cfg { struct xt_hashlimit_info { char name [IFNAMSIZ]; /* name */ struct hashlimit_cfg cfg; - struct xt_hashlimit_htable *hinfo; /* Used internally by the kernel */ + struct xt_hashlimit_htable *hinfo; union { void *ptr; struct xt_hashlimit_info *master; diff --git a/include/linux/netfilter/xt_iprange.h b/include/linux/netfilter/xt_iprange.h new file mode 100644 index 000000000000..a4299c7d3680 --- /dev/null +++ b/include/linux/netfilter/xt_iprange.h @@ -0,0 +1,17 @@ +#ifndef _LINUX_NETFILTER_XT_IPRANGE_H +#define _LINUX_NETFILTER_XT_IPRANGE_H 1 + +enum { + IPRANGE_SRC = 1 << 0, /* match source IP address */ + IPRANGE_DST = 1 << 1, /* match destination IP address */ + IPRANGE_SRC_INV = 1 << 4, /* negate the condition */ + IPRANGE_DST_INV = 1 << 5, /* -"- */ +}; + +struct xt_iprange_mtinfo { + union nf_inet_addr src_min, src_max; + union nf_inet_addr dst_min, dst_max; + u_int8_t flags; +}; + +#endif /* _LINUX_NETFILTER_XT_IPRANGE_H */ diff --git a/include/linux/netfilter/xt_mark.h b/include/linux/netfilter/xt_mark.h index 802dd4842caf..fae74bc3f34e 100644 --- a/include/linux/netfilter/xt_mark.h +++ b/include/linux/netfilter/xt_mark.h @@ -6,4 +6,9 @@ struct xt_mark_info { u_int8_t invert; }; +struct xt_mark_mtinfo1 { + u_int32_t mark, mask; + u_int8_t invert; +}; + #endif /*_XT_MARK_H*/ diff --git a/include/linux/netfilter/xt_owner.h b/include/linux/netfilter/xt_owner.h new file mode 100644 index 000000000000..eacd34efebd5 --- /dev/null +++ b/include/linux/netfilter/xt_owner.h @@ -0,0 +1,16 @@ +#ifndef _XT_OWNER_MATCH_H +#define _XT_OWNER_MATCH_H + +enum { + XT_OWNER_UID = 1 << 0, + XT_OWNER_GID = 1 << 1, + XT_OWNER_SOCKET = 1 << 2, +}; + +struct xt_owner_match_info { + u_int32_t uid; + u_int32_t gid; + u_int8_t match, invert; +}; + +#endif /* _XT_OWNER_MATCH_H */ diff --git a/include/linux/netfilter/xt_policy.h b/include/linux/netfilter/xt_policy.h index 45654d359a68..053d8cc65464 100644 --- a/include/linux/netfilter/xt_policy.h +++ b/include/linux/netfilter/xt_policy.h @@ -27,18 +27,33 @@ struct xt_policy_spec reqid:1; }; +#ifndef __KERNEL__ union xt_policy_addr { struct in_addr a4; struct in6_addr a6; }; +#endif struct xt_policy_elem { - union xt_policy_addr saddr; - union xt_policy_addr smask; - union xt_policy_addr daddr; - union xt_policy_addr dmask; + union { +#ifdef __KERNEL__ + struct { + union nf_inet_addr saddr; + union nf_inet_addr smask; + union nf_inet_addr daddr; + union nf_inet_addr dmask; + }; +#else + struct { + union xt_policy_addr saddr; + union xt_policy_addr smask; + union xt_policy_addr daddr; + union xt_policy_addr dmask; + }; +#endif + }; __be32 spi; u_int32_t reqid; u_int8_t proto; diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h index acd7fd77bbee..4c8368d781e5 100644 --- a/include/linux/netfilter/xt_quota.h +++ b/include/linux/netfilter/xt_quota.h @@ -9,6 +9,8 @@ enum xt_quota_flags { struct xt_quota_info { u_int32_t flags; u_int32_t pad; + + /* Used internally by the kernel */ aligned_u64 quota; struct xt_quota_info *master; }; diff --git a/include/linux/netfilter/xt_rateest.h b/include/linux/netfilter/xt_rateest.h new file mode 100644 index 000000000000..2010cb74250f --- /dev/null +++ b/include/linux/netfilter/xt_rateest.h @@ -0,0 +1,35 @@ +#ifndef _XT_RATEEST_MATCH_H +#define _XT_RATEEST_MATCH_H + +enum xt_rateest_match_flags { + XT_RATEEST_MATCH_INVERT = 1<<0, + XT_RATEEST_MATCH_ABS = 1<<1, + XT_RATEEST_MATCH_REL = 1<<2, + XT_RATEEST_MATCH_DELTA = 1<<3, + XT_RATEEST_MATCH_BPS = 1<<4, + XT_RATEEST_MATCH_PPS = 1<<5, +}; + +enum xt_rateest_match_mode { + XT_RATEEST_MATCH_NONE, + XT_RATEEST_MATCH_EQ, + XT_RATEEST_MATCH_LT, + XT_RATEEST_MATCH_GT, +}; + +struct xt_rateest_match_info { + char name1[IFNAMSIZ]; + char name2[IFNAMSIZ]; + u_int16_t flags; + u_int16_t mode; + u_int32_t bps1; + u_int32_t pps1; + u_int32_t bps2; + u_int32_t pps2; + + /* Used internally by the kernel */ + struct xt_rateest *est1 __attribute__((aligned(8))); + struct xt_rateest *est2 __attribute__((aligned(8))); +}; + +#endif /* _XT_RATEEST_MATCH_H */ diff --git a/include/linux/netfilter/xt_statistic.h b/include/linux/netfilter/xt_statistic.h index c344e9916e23..3d38bc975048 100644 --- a/include/linux/netfilter/xt_statistic.h +++ b/include/linux/netfilter/xt_statistic.h @@ -23,6 +23,7 @@ struct xt_statistic_info { struct { u_int32_t every; u_int32_t packet; + /* Used internally by the kernel */ u_int32_t count; } nth; } u; diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h index 3b3419f2637d..bb21dd1aee2d 100644 --- a/include/linux/netfilter/xt_string.h +++ b/include/linux/netfilter/xt_string.h @@ -12,6 +12,8 @@ struct xt_string_info char pattern[XT_STRING_MAX_PATTERN_SIZE]; u_int8_t patlen; u_int8_t invert; + + /* Used internally by the kernel */ struct ts_config __attribute__((aligned(8))) *config; }; diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 2fc73fa8e37f..53dd4df27aa1 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -217,21 +217,8 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e } /* fn returns 0 to continue iteration */ -#define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ -({ \ - unsigned int __i; \ - int __ret = 0; \ - struct arpt_entry *__entry; \ - \ - for (__i = 0; __i < (size); __i += __entry->next_offset) { \ - __entry = (void *)(entries) + __i; \ - \ - __ret = fn(__entry , ## args); \ - if (__ret != 0) \ - break; \ - } \ - __ret; \ -}) +#define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) /* * Main firewall chains definitions and global var's definitions. @@ -293,6 +280,37 @@ extern unsigned int arpt_do_table(struct sk_buff *skb, const struct net_device *out, struct arpt_table *table); -#define ARPT_ALIGN(s) (((s) + (__alignof__(struct arpt_entry)-1)) & ~(__alignof__(struct arpt_entry)-1)) +#define ARPT_ALIGN(s) XT_ALIGN(s) + +#ifdef CONFIG_COMPAT +#include <net/compat.h> + +struct compat_arpt_entry +{ + struct arpt_arp arp; + u_int16_t target_offset; + u_int16_t next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +static inline struct arpt_entry_target * +compat_arpt_get_target(struct compat_arpt_entry *e) +{ + return (void *)e + e->target_offset; +} + +#define COMPAT_ARPT_ALIGN(s) COMPAT_XT_ALIGN(s) + +/* fn returns 0 to continue iteration */ +#define COMPAT_ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct compat_arpt_entry, entries, size, fn, ## args) + +#define COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \ + XT_ENTRY_ITERATE_CONTINUE(struct compat_arpt_entry, entries, size, n, \ + fn, ## args) + +#endif /* CONFIG_COMPAT */ #endif /*__KERNEL__*/ #endif /* _ARPTABLES_H */ diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 1a63adf5c4c1..9a10092e358c 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -36,7 +36,6 @@ #define NFC_IP_DST_PT 0x0400 /* Something else about the proto */ #define NFC_IP_PROTO_UNKNOWN 0x2000 -#endif /* ! __KERNEL__ */ /* IP Hooks */ /* After promisc drops, checksum checks. */ @@ -50,6 +49,7 @@ /* Packets about to hit the wire. */ #define NF_IP_POST_ROUTING 4 #define NF_IP_NUMHOOKS 5 +#endif /* ! __KERNEL__ */ enum nf_ip_hook_priorities { NF_IP_PRI_FIRST = INT_MIN, diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index d79ed69cbc1f..45fcad91e67b 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -156,10 +156,10 @@ struct ipt_getinfo unsigned int valid_hooks; /* Hook entry points: one per netfilter hook. */ - unsigned int hook_entry[NF_IP_NUMHOOKS]; + unsigned int hook_entry[NF_INET_NUMHOOKS]; /* Underflow points. */ - unsigned int underflow[NF_IP_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; /* Number of entries */ unsigned int num_entries; @@ -185,10 +185,10 @@ struct ipt_replace unsigned int size; /* Hook entry points. */ - unsigned int hook_entry[NF_IP_NUMHOOKS]; + unsigned int hook_entry[NF_INET_NUMHOOKS]; /* Underflow points. */ - unsigned int underflow[NF_IP_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; /* Information about old entries: */ /* Number of counters (must be equal to current number of entries). */ @@ -229,60 +229,12 @@ ipt_get_target(struct ipt_entry *e) } /* fn returns 0 to continue iteration */ -#define IPT_MATCH_ITERATE(e, fn, args...) \ -({ \ - unsigned int __i; \ - int __ret = 0; \ - struct ipt_entry_match *__match; \ - \ - for (__i = sizeof(struct ipt_entry); \ - __i < (e)->target_offset; \ - __i += __match->u.match_size) { \ - __match = (void *)(e) + __i; \ - \ - __ret = fn(__match , ## args); \ - if (__ret != 0) \ - break; \ - } \ - __ret; \ -}) +#define IPT_MATCH_ITERATE(e, fn, args...) \ + XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) /* fn returns 0 to continue iteration */ -#define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ -({ \ - unsigned int __i; \ - int __ret = 0; \ - struct ipt_entry *__entry; \ - \ - for (__i = 0; __i < (size); __i += __entry->next_offset) { \ - __entry = (void *)(entries) + __i; \ - \ - __ret = fn(__entry , ## args); \ - if (__ret != 0) \ - break; \ - } \ - __ret; \ -}) - -/* fn returns 0 to continue iteration */ -#define IPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \ -({ \ - unsigned int __i, __n; \ - int __ret = 0; \ - struct ipt_entry *__entry; \ - \ - for (__i = 0, __n = 0; __i < (size); \ - __i += __entry->next_offset, __n++) { \ - __entry = (void *)(entries) + __i; \ - if (__n < n) \ - continue; \ - \ - __ret = fn(__entry , ## args); \ - if (__ret != 0) \ - break; \ - } \ - __ret; \ -}) +#define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) /* * Main firewall chains definitions and global var's definitions. @@ -359,8 +311,28 @@ struct compat_ipt_entry unsigned char elems[0]; }; +/* Helper functions */ +static inline struct ipt_entry_target * +compat_ipt_get_target(struct compat_ipt_entry *e) +{ + return (void *)e + e->target_offset; +} + #define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s) +/* fn returns 0 to continue iteration */ +#define COMPAT_IPT_MATCH_ITERATE(e, fn, args...) \ + XT_MATCH_ITERATE(struct compat_ipt_entry, e, fn, ## args) + +/* fn returns 0 to continue iteration */ +#define COMPAT_IPT_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct compat_ipt_entry, entries, size, fn, ## args) + +/* fn returns 0 to continue iteration */ +#define COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \ + XT_ENTRY_ITERATE_CONTINUE(struct compat_ipt_entry, entries, size, n, \ + fn, ## args) + #endif /* CONFIG_COMPAT */ #endif /*__KERNEL__*/ #endif /* _IPTABLES_H */ diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h index daf50be22c9d..e5a3687c8a72 100644 --- a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h +++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h @@ -27,6 +27,7 @@ struct ipt_clusterip_tgt_info { u_int32_t hash_mode; u_int32_t hash_initval; + /* Used internally by the kernel */ struct clusterip_config *config; }; diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h index 166ed01a8122..446de6aef983 100644 --- a/include/linux/netfilter_ipv4/ipt_addrtype.h +++ b/include/linux/netfilter_ipv4/ipt_addrtype.h @@ -1,6 +1,20 @@ #ifndef _IPT_ADDRTYPE_H #define _IPT_ADDRTYPE_H +enum { + IPT_ADDRTYPE_INVERT_SOURCE = 0x0001, + IPT_ADDRTYPE_INVERT_DEST = 0x0002, + IPT_ADDRTYPE_LIMIT_IFACE_IN = 0x0004, + IPT_ADDRTYPE_LIMIT_IFACE_OUT = 0x0008, +}; + +struct ipt_addrtype_info_v1 { + u_int16_t source; /* source-type mask */ + u_int16_t dest; /* dest-type mask */ + u_int32_t flags; +}; + +/* revision 0 */ struct ipt_addrtype_info { u_int16_t source; /* source-type mask */ u_int16_t dest; /* dest-type mask */ diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h index a92fefc3c7ec..5f1aebde4d2f 100644 --- a/include/linux/netfilter_ipv4/ipt_iprange.h +++ b/include/linux/netfilter_ipv4/ipt_iprange.h @@ -2,11 +2,7 @@ #define _IPT_IPRANGE_H #include <linux/types.h> - -#define IPRANGE_SRC 0x01 /* Match source IP address */ -#define IPRANGE_DST 0x02 /* Match destination IP address */ -#define IPRANGE_SRC_INV 0x10 /* Negate the condition */ -#define IPRANGE_DST_INV 0x20 /* Negate the condition */ +#include <linux/netfilter/xt_iprange.h> struct ipt_iprange { /* Inclusive: network order. */ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 66ca8e3100dc..3475a65dae9b 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -40,8 +40,6 @@ #define NFC_IP6_DST_PT 0x0400 /* Something else about the proto */ #define NFC_IP6_PROTO_UNKNOWN 0x2000 -#endif /* ! __KERNEL__ */ - /* IP6 Hooks */ /* After promisc drops, checksum checks. */ @@ -55,6 +53,7 @@ /* Packets about to hit the wire. */ #define NF_IP6_POST_ROUTING 4 #define NF_IP6_NUMHOOKS 5 +#endif /* ! __KERNEL__ */ enum nf_ip6_hook_priorities { diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 7dc481ce7cba..110801d699ee 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -216,10 +216,10 @@ struct ip6t_getinfo unsigned int valid_hooks; /* Hook entry points: one per netfilter hook. */ - unsigned int hook_entry[NF_IP6_NUMHOOKS]; + unsigned int hook_entry[NF_INET_NUMHOOKS]; /* Underflow points. */ - unsigned int underflow[NF_IP6_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; /* Number of entries */ unsigned int num_entries; @@ -245,10 +245,10 @@ struct ip6t_replace unsigned int size; /* Hook entry points. */ - unsigned int hook_entry[NF_IP6_NUMHOOKS]; + unsigned int hook_entry[NF_INET_NUMHOOKS]; /* Underflow points. */ - unsigned int underflow[NF_IP6_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; /* Information about old entries: */ /* Number of counters (must be equal to current number of entries). */ @@ -289,40 +289,12 @@ ip6t_get_target(struct ip6t_entry *e) } /* fn returns 0 to continue iteration */ -#define IP6T_MATCH_ITERATE(e, fn, args...) \ -({ \ - unsigned int __i; \ - int __ret = 0; \ - struct ip6t_entry_match *__m; \ - \ - for (__i = sizeof(struct ip6t_entry); \ - __i < (e)->target_offset; \ - __i += __m->u.match_size) { \ - __m = (void *)(e) + __i; \ - \ - __ret = fn(__m , ## args); \ - if (__ret != 0) \ - break; \ - } \ - __ret; \ -}) +#define IP6T_MATCH_ITERATE(e, fn, args...) \ + XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) /* fn returns 0 to continue iteration */ -#define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ -({ \ - unsigned int __i; \ - int __ret = 0; \ - struct ip6t_entry *__e; \ - \ - for (__i = 0; __i < (size); __i += __e->next_offset) { \ - __e = (void *)(entries) + __i; \ - \ - __ret = fn(__e , ## args); \ - if (__ret != 0) \ - break; \ - } \ - __ret; \ -}) +#define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) /* * Main firewall chains definitions and global var's definitions. @@ -352,7 +324,42 @@ extern int ip6_masked_addrcmp(const struct in6_addr *addr1, const struct in6_addr *mask, const struct in6_addr *addr2); -#define IP6T_ALIGN(s) (((s) + (__alignof__(struct ip6t_entry)-1)) & ~(__alignof__(struct ip6t_entry)-1)) +#define IP6T_ALIGN(s) XT_ALIGN(s) +#ifdef CONFIG_COMPAT +#include <net/compat.h> + +struct compat_ip6t_entry +{ + struct ip6t_ip6 ipv6; + compat_uint_t nfcache; + u_int16_t target_offset; + u_int16_t next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +static inline struct ip6t_entry_target * +compat_ip6t_get_target(struct compat_ip6t_entry *e) +{ + return (void *)e + e->target_offset; +} + +#define COMPAT_IP6T_ALIGN(s) COMPAT_XT_ALIGN(s) + +/* fn returns 0 to continue iteration */ +#define COMPAT_IP6T_MATCH_ITERATE(e, fn, args...) \ + XT_MATCH_ITERATE(struct compat_ip6t_entry, e, fn, ## args) + +/* fn returns 0 to continue iteration */ +#define COMPAT_IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct compat_ip6t_entry, entries, size, fn, ## args) + +#define COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \ + XT_ENTRY_ITERATE_CONTINUE(struct compat_ip6t_entry, entries, size, n, \ + fn, ## args) + +#endif /* CONFIG_COMPAT */ #endif /*__KERNEL__*/ #endif /* _IP6_TABLES_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index d5bfaba595c7..bd13b6f4a98e 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -178,6 +178,7 @@ extern struct sock *netlink_kernel_create(struct net *net, void (*input)(struct sk_buff *skb), struct mutex *cb_mutex, struct module *module); +extern void netlink_kernel_release(struct sock *sk); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); @@ -245,7 +246,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) } #define NLMSG_NEW(skb, pid, seq, type, len, flags) \ -({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) \ +({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \ goto nlmsg_failure; \ __nlmsg_put(skb, pid, seq, type, len, flags); }) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 20250d963d72..a0525a1f4715 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -20,12 +20,11 @@ struct netpoll { u32 local_ip, remote_ip; u16 local_port, remote_port; - u8 local_mac[ETH_ALEN], remote_mac[ETH_ALEN]; + u8 remote_mac[ETH_ALEN]; }; struct netpoll_info { atomic_t refcnt; - int rx_flags; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct sk_buff_head arp_tx; /* list of arp requests to reply to */ @@ -51,12 +50,12 @@ static inline int netpoll_rx(struct sk_buff *skb) unsigned long flags; int ret = 0; - if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags)) + if (!npinfo || !npinfo->rx_np) return 0; spin_lock_irqsave(&npinfo->rx_lock, flags); - /* check rx_flags again with the lock held */ - if (npinfo->rx_flags && __netpoll_rx(skb)) + /* check rx_np again with the lock held */ + if (npinfo->rx_np && __netpoll_rx(skb)) ret = 1; spin_unlock_irqrestore(&npinfo->rx_lock, flags); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 2d15d4aac094..099ddb4481c0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -196,28 +196,67 @@ struct nfs_inode { #define NFS_INO_STALE (2) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */ -static inline struct nfs_inode *NFS_I(struct inode *inode) +static inline struct nfs_inode *NFS_I(const struct inode *inode) { return container_of(inode, struct nfs_inode, vfs_inode); } -#define NFS_SB(s) ((struct nfs_server *)(s->s_fs_info)) -#define NFS_FH(inode) (&NFS_I(inode)->fh) -#define NFS_SERVER(inode) (NFS_SB(inode->i_sb)) -#define NFS_CLIENT(inode) (NFS_SERVER(inode)->client) -#define NFS_PROTO(inode) (NFS_SERVER(inode)->nfs_client->rpc_ops) -#define NFS_COOKIEVERF(inode) (NFS_I(inode)->cookieverf) -#define NFS_MINATTRTIMEO(inode) \ - (S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmin \ - : NFS_SERVER(inode)->acregmin) -#define NFS_MAXATTRTIMEO(inode) \ - (S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmax \ - : NFS_SERVER(inode)->acregmax) +static inline struct nfs_server *NFS_SB(const struct super_block *s) +{ + return (struct nfs_server *)(s->s_fs_info); +} + +static inline struct nfs_fh *NFS_FH(const struct inode *inode) +{ + return &NFS_I(inode)->fh; +} + +static inline struct nfs_server *NFS_SERVER(const struct inode *inode) +{ + return NFS_SB(inode->i_sb); +} + +static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode) +{ + return NFS_SERVER(inode)->client; +} + +static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) +{ + return NFS_SERVER(inode)->nfs_client->rpc_ops; +} + +static inline __be32 *NFS_COOKIEVERF(const struct inode *inode) +{ + return NFS_I(inode)->cookieverf; +} + +static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) +{ + struct nfs_server *nfss = NFS_SERVER(inode); + return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin; +} -#define NFS_FLAGS(inode) (NFS_I(inode)->flags) -#define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode))) +static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode) +{ + struct nfs_server *nfss = NFS_SERVER(inode); + return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax; +} -#define NFS_FILEID(inode) (NFS_I(inode)->fileid) +static inline int NFS_STALE(const struct inode *inode) +{ + return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); +} + +static inline __u64 NFS_FILEID(const struct inode *inode) +{ + return NFS_I(inode)->fileid; +} + +static inline void set_nfs_fileid(struct inode *inode, __u64 fileid) +{ + NFS_I(inode)->fileid = fileid; +} static inline void nfs_mark_for_revalidate(struct inode *inode) { @@ -237,7 +276,7 @@ static inline int nfs_server_capable(struct inode *inode, int cap) static inline int NFS_USE_READDIRPLUS(struct inode *inode) { - return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode)); + return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); } static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) @@ -366,6 +405,7 @@ extern const struct inode_operations nfs3_dir_inode_operations; extern const struct file_operations nfs_dir_operations; extern struct dentry_operations nfs_dentry_operations; +extern void nfs_force_lookup_revalidate(struct inode *dir); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 0cac49bc0955..3423c6761bf7 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -3,8 +3,12 @@ #include <linux/list.h> #include <linux/backing-dev.h> +#include <linux/wait.h> + +#include <asm/atomic.h> struct nfs_iostats; +struct nlm_host; /* * The nfs_client identifies our client state to the server. @@ -14,20 +18,19 @@ struct nfs_client { int cl_cons_state; /* current construction state (-ve: init error) */ #define NFS_CS_READY 0 /* ready to be used */ #define NFS_CS_INITING 1 /* busy initialising */ - int cl_nfsversion; /* NFS protocol version */ unsigned long cl_res_state; /* NFS resources state */ #define NFS_CS_CALLBACK 1 /* - callback started */ #define NFS_CS_IDMAP 2 /* - idmap started */ #define NFS_CS_RENEWD 3 /* - renewd started */ - struct sockaddr_in cl_addr; /* server identifier */ + struct sockaddr_storage cl_addr; /* server identifier */ + size_t cl_addrlen; char * cl_hostname; /* hostname of server */ struct list_head cl_share_link; /* link in global client list */ struct list_head cl_superblocks; /* List of nfs_server structs */ struct rpc_clnt * cl_rpcclient; const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ - unsigned long retrans_timeo; /* retransmit timeout */ - unsigned int retrans_count; /* number of retransmit tries */ + int cl_proto; /* Network transport protocol */ #ifdef CONFIG_NFS_V4 u64 cl_clientid; /* constant */ @@ -62,7 +65,7 @@ struct nfs_client { /* Our own IP address, as a null-terminated string. * This is used to generate the clientid, and the callback address. */ - char cl_ipaddr[16]; + char cl_ipaddr[48]; unsigned char cl_id_uniquifier; #endif }; @@ -78,6 +81,7 @@ struct nfs_server { struct list_head master_link; /* link in master servers list */ struct rpc_clnt * client; /* RPC client handle */ struct rpc_clnt * client_acl; /* ACL RPC client handle */ + struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats * io_stats; /* I/O statistics */ struct backing_dev_info backing_dev_info; atomic_long_t writeback; /* number of writeback pages */ @@ -110,6 +114,9 @@ struct nfs_server { filesystem */ #endif void (*destroy)(struct nfs_server *); + + atomic_t active; /* Keep trace of any activity to this server */ + wait_queue_head_t active_wq; /* Wait for any activity to stop */ }; /* Server capabilities */ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 30dbcc185e69..a1676e19e491 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -83,6 +83,7 @@ extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); +extern int nfs_set_page_tag_locked(struct nfs_page *req); extern void nfs_clear_page_tag_locked(struct nfs_page *req); @@ -95,18 +96,6 @@ nfs_lock_request_dontget(struct nfs_page *req) return !test_and_set_bit(PG_BUSY, &req->wb_flags); } -/* - * Lock the page of an asynchronous request and take a reference - */ -static inline int -nfs_lock_request(struct nfs_page *req) -{ - if (test_and_set_bit(PG_BUSY, &req->wb_flags)) - return 0; - kref_get(&req->wb_kref); - return 1; -} - /** * nfs_list_add_request - Insert a request into a list * @req: request diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index daab252f2e5c..f301d0b8babc 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -666,16 +666,17 @@ struct nfs4_rename_res { struct nfs_fattr * new_fattr; }; +#define NFS4_SETCLIENTID_NAMELEN (56) struct nfs4_setclientid { - const nfs4_verifier * sc_verifier; /* request */ + const nfs4_verifier * sc_verifier; unsigned int sc_name_len; - char sc_name[48]; /* request */ - u32 sc_prog; /* request */ + char sc_name[NFS4_SETCLIENTID_NAMELEN]; + u32 sc_prog; unsigned int sc_netid_len; - char sc_netid[4]; /* request */ + char sc_netid[RPCBIND_MAXNETIDLEN]; unsigned int sc_uaddr_len; - char sc_uaddr[24]; /* request */ - u32 sc_cb_ident; /* request */ + char sc_uaddr[RPCBIND_MAXUADDRLEN]; + u32 sc_cb_ident; }; struct nfs4_statfs_arg { @@ -773,7 +774,7 @@ struct nfs_access_entry; * RPC procedure vector for NFSv2/NFSv3 demuxing */ struct nfs_rpc_ops { - int version; /* Protocol version */ + u32 version; /* Protocol version */ struct dentry_operations *dentry_ops; const struct inode_operations *dir_inode_ops; const struct inode_operations *file_inode_ops; @@ -816,11 +817,11 @@ struct nfs_rpc_ops { struct nfs_pathconf *); int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); __be32 *(*decode_dirent)(__be32 *, struct nfs_entry *, int plus); - void (*read_setup) (struct nfs_read_data *); + void (*read_setup) (struct nfs_read_data *, struct rpc_message *); int (*read_done) (struct rpc_task *, struct nfs_read_data *); - void (*write_setup) (struct nfs_write_data *, int how); + void (*write_setup) (struct nfs_write_data *, struct rpc_message *); int (*write_done) (struct rpc_task *, struct nfs_write_data *); - void (*commit_setup) (struct nfs_write_data *, int how); + void (*commit_setup) (struct nfs_write_data *, struct rpc_message *); int (*commit_done) (struct rpc_task *, struct nfs_write_data *); int (*file_open) (struct inode *, struct file *); int (*file_release) (struct inode *, struct file *); diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 538ee1dd3d0a..9fecf902419c 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -7,6 +7,18 @@ */ /** + * DOC: Station handling + * + * Stations are added per interface, but a special case exists with VLAN + * interfaces. When a station is bound to an AP interface, it may be moved + * into a VLAN identified by a VLAN interface index (%NL80211_ATTR_STA_VLAN). + * The station is still assumed to belong to the AP interface it was added + * to. + * + * TODO: need more info? + */ + +/** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@ -37,6 +49,35 @@ * userspace to request deletion of a virtual interface, then requires * attribute %NL80211_ATTR_IFINDEX. * + * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified + * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. + * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT or + * %NL80211_ATTR_KEY_THRESHOLD. + * @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA, + * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC and %NL80211_ATTR_KEY_CIPHER + * attributes. + * @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX + * or %NL80211_ATTR_MAC. + * + * @NL80211_CMD_GET_BEACON: retrieve beacon information (returned in a + * %NL80222_CMD_NEW_BEACON message) + * @NL80211_CMD_SET_BEACON: set the beacon on an access point interface + * using the %NL80211_ATTR_BEACON_INTERVAL, %NL80211_ATTR_DTIM_PERIOD, + * %NL80211_BEACON_HEAD and %NL80211_BEACON_TAIL attributes. + * @NL80211_CMD_NEW_BEACON: add a new beacon to an access point interface, + * parameters are like for %NL80211_CMD_SET_BEACON. + * @NL80211_CMD_DEL_BEACON: remove the beacon, stop sending it + * + * @NL80211_CMD_GET_STATION: Get station attributes for station identified by + * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_STATION: Set station attributes for station identified by + * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_NEW_STATION: Add a station with given attributes to the + * the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC + * or, if no MAC address given, all stations, on the interface identified + * by %NL80211_ATTR_IFINDEX. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -54,6 +95,21 @@ enum nl80211_commands { NL80211_CMD_NEW_INTERFACE, NL80211_CMD_DEL_INTERFACE, + NL80211_CMD_GET_KEY, + NL80211_CMD_SET_KEY, + NL80211_CMD_NEW_KEY, + NL80211_CMD_DEL_KEY, + + NL80211_CMD_GET_BEACON, + NL80211_CMD_SET_BEACON, + NL80211_CMD_NEW_BEACON, + NL80211_CMD_DEL_BEACON, + + NL80211_CMD_GET_STATION, + NL80211_CMD_SET_STATION, + NL80211_CMD_NEW_STATION, + NL80211_CMD_DEL_STATION, + /* add commands here */ /* used to define NL80211_CMD_MAX below */ @@ -75,6 +131,36 @@ enum nl80211_commands { * @NL80211_ATTR_IFNAME: network interface name * @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype * + * @NL80211_ATTR_MAC: MAC address (various uses) + * + * @NL80211_ATTR_KEY_DATA: (temporal) key data; for TKIP this consists of + * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC + * keys + * @NL80211_ATTR_KEY_IDX: key ID (u8, 0-3) + * @NL80211_ATTR_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 + * section 7.3.2.25.1, e.g. 0x000FAC04) + * @NL80211_ATTR_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and + * CCMP keys, each six bytes in little endian + * + * @NL80211_ATTR_BEACON_INTERVAL: beacon interval in TU + * @NL80211_ATTR_DTIM_PERIOD: DTIM period for beaconing + * @NL80211_ATTR_BEACON_HEAD: portion of the beacon before the TIM IE + * @NL80211_ATTR_BEACON_TAIL: portion of the beacon after the TIM IE + * + * @NL80211_ATTR_STA_AID: Association ID for the station (u16) + * @NL80211_ATTR_STA_FLAGS: flags, nested element with NLA_FLAG attributes of + * &enum nl80211_sta_flags. + * @NL80211_ATTR_STA_LISTEN_INTERVAL: listen interval as defined by + * IEEE 802.11 7.3.1.6 (u16). + * @NL80211_ATTR_STA_SUPPORTED_RATES: supported rates, array of supported + * rates as defined by IEEE 802.11 7.3.2.2 but without the length + * restriction (at most %NL80211_MAX_SUPP_RATES). + * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station + * to, or the AP interface the station was originally added to to. + * @NL80211_ATTR_STA_STATS: statistics for a station, part of station info + * given for %NL80211_CMD_GET_STATION, nested attribute containing + * info as possible, see &enum nl80211_sta_stats. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -89,12 +175,34 @@ enum nl80211_attrs { NL80211_ATTR_IFNAME, NL80211_ATTR_IFTYPE, + NL80211_ATTR_MAC, + + NL80211_ATTR_KEY_DATA, + NL80211_ATTR_KEY_IDX, + NL80211_ATTR_KEY_CIPHER, + NL80211_ATTR_KEY_SEQ, + NL80211_ATTR_KEY_DEFAULT, + + NL80211_ATTR_BEACON_INTERVAL, + NL80211_ATTR_DTIM_PERIOD, + NL80211_ATTR_BEACON_HEAD, + NL80211_ATTR_BEACON_TAIL, + + NL80211_ATTR_STA_AID, + NL80211_ATTR_STA_FLAGS, + NL80211_ATTR_STA_LISTEN_INTERVAL, + NL80211_ATTR_STA_SUPPORTED_RATES, + NL80211_ATTR_STA_VLAN, + NL80211_ATTR_STA_STATS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; +#define NL80211_MAX_SUPP_RATES 32 + /** * enum nl80211_iftype - (virtual) interface types * @@ -126,4 +234,50 @@ enum nl80211_iftype { NL80211_IFTYPE_MAX = __NL80211_IFTYPE_AFTER_LAST - 1 }; +/** + * enum nl80211_sta_flags - station flags + * + * Station flags. When a station is added to an AP interface, it is + * assumed to be already associated (and hence authenticated.) + * + * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) + * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames + * with short barker preamble + * @NL80211_STA_FLAG_WME: station is WME/QoS capable + */ +enum nl80211_sta_flags { + __NL80211_STA_FLAG_INVALID, + NL80211_STA_FLAG_AUTHORIZED, + NL80211_STA_FLAG_SHORT_PREAMBLE, + NL80211_STA_FLAG_WME, + + /* keep last */ + __NL80211_STA_FLAG_AFTER_LAST, + NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1 +}; + +/** + * enum nl80211_sta_stats - station statistics + * + * These attribute types are used with %NL80211_ATTR_STA_STATS + * when getting information about a station. + * + * @__NL80211_STA_STAT_INVALID: attribute number 0 is reserved + * @NL80211_STA_STAT_INACTIVE_TIME: time since last activity (u32, msecs) + * @NL80211_STA_STAT_RX_BYTES: total received bytes (u32, from this station) + * @NL80211_STA_STAT_TX_BYTES: total transmitted bytes (u32, to this station) + * @__NL80211_STA_STAT_AFTER_LAST: internal + * @NL80211_STA_STAT_MAX: highest possible station stats attribute + */ +enum nl80211_sta_stats { + __NL80211_STA_STAT_INVALID, + NL80211_STA_STAT_INACTIVE_TIME, + NL80211_STA_STAT_RX_BYTES, + NL80211_STA_STAT_TX_BYTES, + + /* keep last */ + __NL80211_STA_STAT_AFTER_LAST, + NL80211_STA_STAT_MAX = __NL80211_STA_STAT_AFTER_LAST - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 0c40cc0b4a36..5dfbc684ce7d 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -207,9 +207,7 @@ static inline int notifier_to_errno(int ret) #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ -#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ -#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ -#define CPU_DYING 0x000A /* CPU (unsigned)v not running any task, +#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, * not handling interrupts, soon dead */ /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend diff --git a/include/linux/pci.h b/include/linux/pci.h index 0dd93bb62fbe..ae1006322f80 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -867,7 +867,7 @@ enum pci_fixup_pass { /* Anonymous variables would be nice... */ #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \ - static const struct pci_fixup __pci_fixup_##name __attribute_used__ \ + static const struct pci_fixup __pci_fixup_##name __used \ __attribute__((__section__(#section))) = { vendor, device, hook }; #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index ab4cb6ecd47c..8f67e8f2a3cc 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -174,7 +174,7 @@ extern int pci_hp_register (struct hotplug_slot *slot); extern int pci_hp_deregister (struct hotplug_slot *slot); extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, struct hotplug_slot_info *info); -extern struct kset pci_hotplug_slots_subsys; +extern struct kset *pci_hotplug_slots_kset; /* PCI Setting Record (Type 0) */ struct hpp_type0 { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 7f2215139e9a..41f6f28690f6 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1943,6 +1943,7 @@ #define PCI_DEVICE_ID_NX2_5706 0x164a #define PCI_DEVICE_ID_NX2_5708 0x164c #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d +#define PCI_DEVICE_ID_NX2_57710 0x164e #define PCI_DEVICE_ID_TIGON3_5705 0x1653 #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 #define PCI_DEVICE_ID_TIGON3_5720 0x1658 @@ -2066,6 +2067,9 @@ #define PCI_VENDOR_ID_NETCELL 0x169c #define PCI_DEVICE_ID_REVOLUTION 0x0044 +#define PCI_VENDOR_ID_CENATEK 0x16CA +#define PCI_DEVICE_ID_CENATEK_IDE 0x0001 + #define PCI_VENDOR_ID_VITESSE 0x1725 #define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 @@ -2078,6 +2082,16 @@ #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb +#define PCI_VENDOR_ID_BELKIN 0x1799 +#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f + +#define PCI_VENDOR_ID_RDC 0x17f3 +#define PCI_DEVICE_ID_RDC_R6020 0x6020 +#define PCI_DEVICE_ID_RDC_R6030 0x6030 +#define PCI_DEVICE_ID_RDC_R6040 0x6040 +#define PCI_DEVICE_ID_RDC_R6060 0x6060 +#define PCI_DEVICE_ID_RDC_R6061 0x6061 + #define PCI_VENDOR_ID_LENOVO 0x17aa #define PCI_VENDOR_ID_ARECA 0x17d3 @@ -2106,6 +2120,8 @@ #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 +#define PCI_VENDOR_ID_RDC 0x17f3 + #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 diff --git a/include/linux/pcounter.h b/include/linux/pcounter.h new file mode 100644 index 000000000000..a82d9f2628ca --- /dev/null +++ b/include/linux/pcounter.h @@ -0,0 +1,74 @@ +#ifndef __LINUX_PCOUNTER_H +#define __LINUX_PCOUNTER_H +/* + * Using a dynamic percpu 'int' variable has a cost : + * 1) Extra dereference + * Current per_cpu_ptr() implementation uses an array per 'percpu variable'. + * 2) memory cost of NR_CPUS*(32+sizeof(void *)) instead of num_possible_cpus()*4 + * + * This pcounter implementation is an abstraction to be able to use + * either a static or a dynamic per cpu variable. + * One dynamic per cpu variable gets a fast & cheap implementation, we can + * change pcounter implementation too. + */ +struct pcounter { +#ifdef CONFIG_SMP + void (*add)(struct pcounter *self, int inc); + int (*getval)(const struct pcounter *self, int cpu); + int *per_cpu_values; +#else + int val; +#endif +}; + +#ifdef CONFIG_SMP +#include <linux/percpu.h> + +#define DEFINE_PCOUNTER(NAME) \ +static DEFINE_PER_CPU(int, NAME##_pcounter_values); \ +static void NAME##_pcounter_add(struct pcounter *self, int val) \ +{ \ + __get_cpu_var(NAME##_pcounter_values) += val; \ +} \ +static int NAME##_pcounter_getval(const struct pcounter *self, int cpu) \ +{ \ + return per_cpu(NAME##_pcounter_values, cpu); \ +} \ + +#define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER) \ + MEMBER = { \ + .add = NAME##_pcounter_add, \ + .getval = NAME##_pcounter_getval, \ + } + + +static inline void pcounter_add(struct pcounter *self, int inc) +{ + self->add(self, inc); +} + +extern int pcounter_getval(const struct pcounter *self); +extern int pcounter_alloc(struct pcounter *self); +extern void pcounter_free(struct pcounter *self); + + +#else /* CONFIG_SMP */ + +static inline void pcounter_add(struct pcounter *self, int inc) +{ + self->val += inc; +} + +static inline int pcounter_getval(const struct pcounter *self) +{ + return self->val; +} + +#define DEFINE_PCOUNTER(NAME) +#define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER) +#define pcounter_alloc(self) 0 +#define pcounter_free(self) + +#endif /* CONFIG_SMP */ + +#endif /* __LINUX_PCOUNTER_H */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 926adaae0f96..00412bb494c4 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -9,6 +9,30 @@ #include <asm/percpu.h> +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif + +#ifdef CONFIG_SMP +#define DEFINE_PER_CPU(type, name) \ + __attribute__((__section__(".data.percpu"))) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name + +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp +#else +#define DEFINE_PER_CPU(type, name) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name + +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) +#endif + +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) + /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ #ifndef PERCPU_ENOUGH_ROOM #ifdef CONFIG_MODULES diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index 919af93b7059..32761352e858 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -83,6 +83,8 @@ struct tc_ratespec __u32 rate; }; +#define TC_RTAB_SIZE 1024 + /* FIFO section */ struct tc_fifo_qopt diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 5ea4f05683f6..04b4d7330e6d 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -290,7 +290,7 @@ struct pktcdvd_device int write_congestion_off; int write_congestion_on; - struct class_device *clsdev; /* sysfs pktcdvd[0-7] class dev */ + struct device *dev; /* sysfs pktcdvd[0-7] dev */ struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */ struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index e80804316cdb..3261681c82a4 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -35,7 +35,7 @@ extern struct resource *platform_get_resource_byname(struct platform_device *, u extern int platform_get_irq_byname(struct platform_device *, char *); extern int platform_add_devices(struct platform_device **, int); -extern struct platform_device *platform_device_register_simple(char *, int id, +extern struct platform_device *platform_device_register_simple(const char *, int id, struct resource *, unsigned int); extern struct platform_device *platform_device_alloc(const char *name, int id); diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index a5316829215b..8f92546b403d 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -201,6 +201,8 @@ static inline struct proc_dir_entry *create_proc_info_entry(const char *name, extern struct proc_dir_entry *proc_net_fops_create(struct net *net, const char *name, mode_t mode, const struct file_operations *fops); extern void proc_net_remove(struct net *net, const char *name); +extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, + struct proc_dir_entry *parent); #else diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 3ea5750a0f7e..515bff053de8 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -129,6 +129,81 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); #define force_successful_syscall_return() do { } while (0) #endif +/* + * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. + * + * These do-nothing inlines are used when the arch does not + * implement single-step. The kerneldoc comments are here + * to document the interface for all arch definitions. + */ + +#ifndef arch_has_single_step +/** + * arch_has_single_step - does this CPU support user-mode single-step? + * + * If this is defined, then there must be function declarations or + * inlines for user_enable_single_step() and user_disable_single_step(). + * arch_has_single_step() should evaluate to nonzero iff the machine + * supports instruction single-step for user mode. + * It can be a constant or it can test a CPU feature bit. + */ +#define arch_has_single_step() (0) + +/** + * user_enable_single_step - single-step in user-mode task + * @task: either current or a task stopped in %TASK_TRACED + * + * This can only be called when arch_has_single_step() has returned nonzero. + * Set @task so that when it returns to user mode, it will trap after the + * next single instruction executes. If arch_has_block_step() is defined, + * this must clear the effects of user_enable_block_step() too. + */ +static inline void user_enable_single_step(struct task_struct *task) +{ + BUG(); /* This can never be called. */ +} + +/** + * user_disable_single_step - cancel user-mode single-step + * @task: either current or a task stopped in %TASK_TRACED + * + * Clear @task of the effects of user_enable_single_step() and + * user_enable_block_step(). This can be called whether or not either + * of those was ever called on @task, and even if arch_has_single_step() + * returned zero. + */ +static inline void user_disable_single_step(struct task_struct *task) +{ +} +#endif /* arch_has_single_step */ + +#ifndef arch_has_block_step +/** + * arch_has_block_step - does this CPU support user-mode block-step? + * + * If this is defined, then there must be a function declaration or inline + * for user_enable_block_step(), and arch_has_single_step() must be defined + * too. arch_has_block_step() should evaluate to nonzero iff the machine + * supports step-until-branch for user mode. It can be a constant or it + * can test a CPU feature bit. + */ +#define arch_has_block_step() (0) + +/** + * user_enable_block_step - step until branch in user-mode task + * @task: either current or a task stopped in %TASK_TRACED + * + * This can only be called when arch_has_block_step() has returned nonzero, + * and will never be called when single-instruction stepping is being used. + * Set @task so that when it returns to user mode, it will trap after the + * next branch or trap taken. + */ +static inline void user_enable_block_step(struct task_struct *task) +{ + BUG(); /* This can never be called. */ +} +#endif /* arch_has_block_step */ + #endif #endif diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h new file mode 100644 index 000000000000..4d6624260b4c --- /dev/null +++ b/include/linux/rcuclassic.h @@ -0,0 +1,164 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (classic version) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2001 + * + * Author: Dipankar Sarma <dipankar@in.ibm.com> + * + * Based on the original work by Paul McKenney <paulmck@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + * + */ + +#ifndef __LINUX_RCUCLASSIC_H +#define __LINUX_RCUCLASSIC_H + +#ifdef __KERNEL__ + +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/threads.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> +#include <linux/seqlock.h> + + +/* Global control variables for rcupdate callback mechanism. */ +struct rcu_ctrlblk { + long cur; /* Current batch number. */ + long completed; /* Number of the last completed batch */ + int next_pending; /* Is the next batch already waiting? */ + + int signaled; + + spinlock_t lock ____cacheline_internodealigned_in_smp; + cpumask_t cpumask; /* CPUs that need to switch in order */ + /* for current batch to proceed. */ +} ____cacheline_internodealigned_in_smp; + +/* Is batch a before batch b ? */ +static inline int rcu_batch_before(long a, long b) +{ + return (a - b) < 0; +} + +/* Is batch a after batch b ? */ +static inline int rcu_batch_after(long a, long b) +{ + return (a - b) > 0; +} + +/* + * Per-CPU data for Read-Copy UPdate. + * nxtlist - new callbacks are added here + * curlist - current batch for which quiescent cycle started if any + */ +struct rcu_data { + /* 1) quiescent state handling : */ + long quiescbatch; /* Batch # for grace period */ + int passed_quiesc; /* User-mode/idle loop etc. */ + int qs_pending; /* core waits for quiesc state */ + + /* 2) batch handling */ + long batch; /* Batch # for current RCU batch */ + struct rcu_head *nxtlist; + struct rcu_head **nxttail; + long qlen; /* # of queued callbacks */ + struct rcu_head *curlist; + struct rcu_head **curtail; + struct rcu_head *donelist; + struct rcu_head **donetail; + long blimit; /* Upper limit on a processed batch */ + int cpu; + struct rcu_head barrier; +}; + +DECLARE_PER_CPU(struct rcu_data, rcu_data); +DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); + +/* + * Increment the quiescent state counter. + * The counter is a bit degenerated: We do not need to know + * how many quiescent states passed, just if there was at least + * one since the start of the grace period. Thus just a flag. + */ +static inline void rcu_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + rdp->passed_quiesc = 1; +} +static inline void rcu_bh_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); + rdp->passed_quiesc = 1; +} + +extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() \ + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#else +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +#endif + +#define __rcu_read_lock() \ + do { \ + preempt_disable(); \ + __acquire(RCU); \ + rcu_read_acquire(); \ + } while (0) +#define __rcu_read_unlock() \ + do { \ + rcu_read_release(); \ + __release(RCU); \ + preempt_enable(); \ + } while (0) +#define __rcu_read_lock_bh() \ + do { \ + local_bh_disable(); \ + __acquire(RCU_BH); \ + rcu_read_acquire(); \ + } while (0) +#define __rcu_read_unlock_bh() \ + do { \ + rcu_read_release(); \ + __release(RCU_BH); \ + local_bh_enable(); \ + } while (0) + +#define __synchronize_sched() synchronize_rcu() + +extern void __rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); +extern void rcu_restart_cpu(int cpu); + +extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_RCUCLASSIC_H */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index cc24a01df940..d32c14de270e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -15,7 +15,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * Copyright (C) IBM Corporation, 2001 + * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * @@ -53,96 +53,18 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; +#ifdef CONFIG_CLASSIC_RCU +#include <linux/rcuclassic.h> +#else /* #ifdef CONFIG_CLASSIC_RCU */ +#include <linux/rcupreempt.h> +#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ + #define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT #define INIT_RCU_HEAD(ptr) do { \ (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) - - -/* Global control variables for rcupdate callback mechanism. */ -struct rcu_ctrlblk { - long cur; /* Current batch number. */ - long completed; /* Number of the last completed batch */ - int next_pending; /* Is the next batch already waiting? */ - - int signaled; - - spinlock_t lock ____cacheline_internodealigned_in_smp; - cpumask_t cpumask; /* CPUs that need to switch in order */ - /* for current batch to proceed. */ -} ____cacheline_internodealigned_in_smp; - -/* Is batch a before batch b ? */ -static inline int rcu_batch_before(long a, long b) -{ - return (a - b) < 0; -} - -/* Is batch a after batch b ? */ -static inline int rcu_batch_after(long a, long b) -{ - return (a - b) > 0; -} - -/* - * Per-CPU data for Read-Copy UPdate. - * nxtlist - new callbacks are added here - * curlist - current batch for which quiescent cycle started if any - */ -struct rcu_data { - /* 1) quiescent state handling : */ - long quiescbatch; /* Batch # for grace period */ - int passed_quiesc; /* User-mode/idle loop etc. */ - int qs_pending; /* core waits for quiesc state */ - - /* 2) batch handling */ - long batch; /* Batch # for current RCU batch */ - struct rcu_head *nxtlist; - struct rcu_head **nxttail; - long qlen; /* # of queued callbacks */ - struct rcu_head *curlist; - struct rcu_head **curtail; - struct rcu_head *donelist; - struct rcu_head **donetail; - long blimit; /* Upper limit on a processed batch */ - int cpu; - struct rcu_head barrier; -}; - -DECLARE_PER_CPU(struct rcu_data, rcu_data); -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); - -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. - */ -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; -} -static inline void rcu_bh_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; -} - -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern struct lockdep_map rcu_lock_map; -# define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) -# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) -#else -# define rcu_read_acquire() do { } while (0) -# define rcu_read_release() do { } while (0) -#endif - /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * @@ -172,24 +94,13 @@ extern struct lockdep_map rcu_lock_map; * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() \ - do { \ - preempt_disable(); \ - __acquire(RCU); \ - rcu_read_acquire(); \ - } while(0) +#define rcu_read_lock() __rcu_read_lock() /** * rcu_read_unlock - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ -#define rcu_read_unlock() \ - do { \ - rcu_read_release(); \ - __release(RCU); \ - preempt_enable(); \ - } while(0) /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -200,6 +111,7 @@ extern struct lockdep_map rcu_lock_map; * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ +#define rcu_read_unlock() __rcu_read_unlock() /** * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section @@ -212,24 +124,14 @@ extern struct lockdep_map rcu_lock_map; * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() \ - do { \ - local_bh_disable(); \ - __acquire(RCU_BH); \ - rcu_read_acquire(); \ - } while(0) +#define rcu_read_lock_bh() __rcu_read_lock_bh() /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() \ - do { \ - rcu_read_release(); \ - __release(RCU_BH); \ - local_bh_enable(); \ - } while(0) +#define rcu_read_unlock_bh() __rcu_read_unlock_bh() /* * Prevent the compiler from merging or refetching accesses. The compiler @@ -293,21 +195,52 @@ extern struct lockdep_map rcu_lock_map; * In "classic RCU", these two guarantees happen to be one and * the same, but can differ in realtime RCU implementations. */ -#define synchronize_sched() synchronize_rcu() +#define synchronize_sched() __synchronize_sched() -extern void rcu_init(void); -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); -extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); +/** + * call_rcu - Queue an RCU callback for invocation after a grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + */ +extern void call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head)); -/* Exported interfaces */ -extern void FASTCALL(call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head))); -extern void FASTCALL(call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head))); +/** + * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by : + * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. + * OR + * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. + * These may be nested. + */ +extern void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); + +/* Exported common interfaces */ extern void synchronize_rcu(void); extern void rcu_barrier(void); +extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); + +/* Internal to kernel */ +extern void rcu_init(void); +extern int rcu_needs_cpu(int cpu); #endif /* __KERNEL__ */ #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h new file mode 100644 index 000000000000..ece8eb3e4151 --- /dev/null +++ b/include/linux/rcupreempt.h @@ -0,0 +1,86 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (RT implementation) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Paul McKenney <paulmck@us.ibm.com> + * + * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + * + */ + +#ifndef __LINUX_RCUPREEMPT_H +#define __LINUX_RCUPREEMPT_H + +#ifdef __KERNEL__ + +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/threads.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> +#include <linux/seqlock.h> + +#define rcu_qsctr_inc(cpu) +#define rcu_bh_qsctr_inc(cpu) +#define call_rcu_bh(head, rcu) call_rcu(head, rcu) + +extern void __rcu_read_lock(void); +extern void __rcu_read_unlock(void); +extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); + +#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } +#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } + +extern void __synchronize_sched(void); + +extern void __rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); +extern void rcu_restart_cpu(int cpu); +extern long rcu_batches_completed(void); + +/* + * Return the number of RCU batches processed thus far. Useful for debug + * and statistic. The _bh variant is identifcal to straight RCU + */ +static inline long rcu_batches_completed_bh(void) +{ + return rcu_batches_completed(); +} + +#ifdef CONFIG_RCU_TRACE +struct rcupreempt_trace; +extern long *rcupreempt_flipctr(int cpu); +extern long rcupreempt_data_completed(void); +extern int rcupreempt_flip_flag(int cpu); +extern int rcupreempt_mb_flag(int cpu); +extern char *rcupreempt_try_flip_state_name(void); +extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); +#endif + +struct softirq_action; + +#endif /* __KERNEL__ */ +#endif /* __LINUX_RCUPREEMPT_H */ diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h new file mode 100644 index 000000000000..21cd6b2a5c42 --- /dev/null +++ b/include/linux/rcupreempt_trace.h @@ -0,0 +1,99 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (RT implementation) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Paul McKenney <paulmck@us.ibm.com> + * + * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of the Preemptible Read-Copy Update mechanism see - + * http://lwn.net/Articles/253651/ + */ + +#ifndef __LINUX_RCUPREEMPT_TRACE_H +#define __LINUX_RCUPREEMPT_TRACE_H + +#ifdef __KERNEL__ +#include <linux/types.h> +#include <linux/kernel.h> + +#include <asm/atomic.h> + +/* + * PREEMPT_RCU data structures. + */ + +struct rcupreempt_trace { + long next_length; + long next_add; + long wait_length; + long wait_add; + long done_length; + long done_add; + long done_remove; + atomic_t done_invoked; + long rcu_check_callbacks; + atomic_t rcu_try_flip_1; + atomic_t rcu_try_flip_e1; + long rcu_try_flip_i1; + long rcu_try_flip_ie1; + long rcu_try_flip_g1; + long rcu_try_flip_a1; + long rcu_try_flip_ae1; + long rcu_try_flip_a2; + long rcu_try_flip_z1; + long rcu_try_flip_ze1; + long rcu_try_flip_z2; + long rcu_try_flip_m1; + long rcu_try_flip_me1; + long rcu_try_flip_m2; +}; + +#ifdef CONFIG_RCU_TRACE +#define RCU_TRACE(fn, arg) fn(arg); +#else +#define RCU_TRACE(fn, arg) +#endif + +extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); +extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_RCUPREEMPT_TRACE_H */ diff --git a/include/linux/regset.h b/include/linux/regset.h new file mode 100644 index 000000000000..8abee6556223 --- /dev/null +++ b/include/linux/regset.h @@ -0,0 +1,368 @@ +/* + * User-mode machine state access + * + * Copyright (C) 2007 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * Red Hat Author: Roland McGrath. + */ + +#ifndef _LINUX_REGSET_H +#define _LINUX_REGSET_H 1 + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/uaccess.h> +struct task_struct; +struct user_regset; + + +/** + * user_regset_active_fn - type of @active function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * + * Return -%ENODEV if not available on the hardware found. + * Return %0 if no interesting state in this thread. + * Return >%0 number of @size units of interesting state. + * Any get call fetching state beyond that number will + * see the default initialization state for this data, + * so a caller that knows what the default state is need + * not copy it all out. + * This call is optional; the pointer is %NULL if there + * is no inexpensive check to yield a value < @n. + */ +typedef int user_regset_active_fn(struct task_struct *target, + const struct user_regset *regset); + +/** + * user_regset_get_fn - type of @get function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @pos: offset into the regset data to access, in bytes + * @count: amount of data to copy, in bytes + * @kbuf: if not %NULL, a kernel-space pointer to copy into + * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into + * + * Fetch register values. Return %0 on success; -%EIO or -%ENODEV + * are usual failure returns. The @pos and @count values are in + * bytes, but must be properly aligned. If @kbuf is non-null, that + * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then + * ubuf gives a userland pointer to access directly, and an -%EFAULT + * return value is possible. + */ +typedef int user_regset_get_fn(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); + +/** + * user_regset_set_fn - type of @set function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @pos: offset into the regset data to access, in bytes + * @count: amount of data to copy, in bytes + * @kbuf: if not %NULL, a kernel-space pointer to copy from + * @ubuf: if @kbuf is %NULL, a user-space pointer to copy from + * + * Store register values. Return %0 on success; -%EIO or -%ENODEV + * are usual failure returns. The @pos and @count values are in + * bytes, but must be properly aligned. If @kbuf is non-null, that + * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then + * ubuf gives a userland pointer to access directly, and an -%EFAULT + * return value is possible. + */ +typedef int user_regset_set_fn(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf); + +/** + * user_regset_writeback_fn - type of @writeback function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @immediate: zero if writeback at completion of next context switch is OK + * + * This call is optional; usually the pointer is %NULL. When + * provided, there is some user memory associated with this regset's + * hardware, such as memory backing cached register data on register + * window machines; the regset's data controls what user memory is + * used (e.g. via the stack pointer value). + * + * Write register data back to user memory. If the @immediate flag + * is nonzero, it must be written to the user memory so uaccess or + * access_process_vm() can see it when this call returns; if zero, + * then it must be written back by the time the task completes a + * context switch (as synchronized with wait_task_inactive()). + * Return %0 on success or if there was nothing to do, -%EFAULT for + * a memory problem (bad stack pointer or whatever), or -%EIO for a + * hardware problem. + */ +typedef int user_regset_writeback_fn(struct task_struct *target, + const struct user_regset *regset, + int immediate); + +/** + * struct user_regset - accessible thread CPU state + * @n: Number of slots (registers). + * @size: Size in bytes of a slot (register). + * @align: Required alignment, in bytes. + * @bias: Bias from natural indexing. + * @core_note_type: ELF note @n_type value used in core dumps. + * @get: Function to fetch values. + * @set: Function to store values. + * @active: Function to report if regset is active, or %NULL. + * @writeback: Function to write data back to user memory, or %NULL. + * + * This data structure describes a machine resource we call a register set. + * This is part of the state of an individual thread, not necessarily + * actual CPU registers per se. A register set consists of a number of + * similar slots, given by @n. Each slot is @size bytes, and aligned to + * @align bytes (which is at least @size). + * + * These functions must be called only on the current thread or on a + * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are + * guaranteed will not be woken up and return to user mode, and that we + * have called wait_task_inactive() on. (The target thread always might + * wake up for SIGKILL while these functions are working, in which case + * that thread's user_regset state might be scrambled.) + * + * The @pos argument must be aligned according to @align; the @count + * argument must be a multiple of @size. These functions are not + * responsible for checking for invalid arguments. + * + * When there is a natural value to use as an index, @bias gives the + * difference between the natural index and the slot index for the + * register set. For example, x86 GDT segment descriptors form a regset; + * the segment selector produces a natural index, but only a subset of + * that index space is available as a regset (the TLS slots); subtracting + * @bias from a segment selector index value computes the regset slot. + * + * If nonzero, @core_note_type gives the n_type field (NT_* value) + * of the core file note in which this regset's data appears. + * NT_PRSTATUS is a special case in that the regset data starts at + * offsetof(struct elf_prstatus, pr_reg) into the note data; that is + * part of the per-machine ELF formats userland knows about. In + * other cases, the core file note contains exactly the whole regset + * (@n * @size) and nothing else. The core file note is normally + * omitted when there is an @active function and it returns zero. + */ +struct user_regset { + user_regset_get_fn *get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +/** + * struct user_regset_view - available regsets + * @name: Identifier, e.g. UTS_MACHINE string. + * @regsets: Array of @n regsets available in this view. + * @n: Number of elements in @regsets. + * @e_machine: ELF header @e_machine %EM_* value written in core dumps. + * @e_flags: ELF header @e_flags value written in core dumps. + * @ei_osabi: ELF header @e_ident[%EI_OSABI] value written in core dumps. + * + * A regset view is a collection of regsets (&struct user_regset, + * above). This describes all the state of a thread that can be seen + * from a given architecture/ABI environment. More than one view might + * refer to the same &struct user_regset, or more than one regset + * might refer to the same machine-specific state in the thread. For + * example, a 32-bit thread's state could be examined from the 32-bit + * view or from the 64-bit view. Either method reaches the same thread + * register state, doing appropriate widening or truncation. + */ +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +/* + * This is documented here rather than at the definition sites because its + * implementation is machine-dependent but its interface is universal. + */ +/** + * task_user_regset_view - Return the process's native regset view. + * @tsk: a thread of the process in question + * + * Return the &struct user_regset_view that is native for the given process. + * For example, what it would access when it called ptrace(). + * Throughout the life of the process, this only changes at exec. + */ +const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); + + +/* + * These are helpers for writing regset get/set functions in arch code. + * Because @start_pos and @end_pos are always compile-time constants, + * these are inlined into very little code though they look large. + * + * Use one or more calls sequentially for each chunk of regset data stored + * contiguously in memory. Call with constants for @start_pos and @end_pos, + * giving the range of byte positions in the regset that data corresponds + * to; @end_pos can be -1 if this chunk is at the end of the regset layout. + * Each call updates the arguments to point past its chunk. + */ + +static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, + void **kbuf, + void __user **ubuf, const void *data, + const int start_pos, const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(*kbuf, data, copy); + *kbuf += copy; + } else if (__copy_to_user(*ubuf, data, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, + const void **kbuf, + const void __user **ubuf, void *data, + const int start_pos, const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(data, *kbuf, copy); + *kbuf += copy; + } else if (__copy_from_user(data, *ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/* + * These two parallel the two above, but for portions of a regset layout + * that always read as all-zero or for which writes are ignored. + */ +static inline int user_regset_copyout_zero(unsigned int *pos, + unsigned int *count, + void **kbuf, void __user **ubuf, + const int start_pos, + const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) { + memset(*kbuf, 0, copy); + *kbuf += copy; + } else if (__clear_user(*ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int user_regset_copyin_ignore(unsigned int *pos, + unsigned int *count, + const void **kbuf, + const void __user **ubuf, + const int start_pos, + const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) + *kbuf += copy; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/** + * copy_regset_to_user - fetch a thread's user_regset data into user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy into + */ +static inline int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; + + if (!access_ok(VERIFY_WRITE, data, size)) + return -EIO; + + return regset->get(target, regset, offset, size, NULL, data); +} + +/** + * copy_regset_from_user - store into thread's user_regset data from user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy from + */ +static inline int copy_regset_from_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + const void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; + + if (!access_ok(VERIFY_READ, data, size)) + return -EIO; + + return regset->set(target, regset, offset, size, NULL, data); +} + + +#endif /* <linux/regset.h> */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 4e81836191df..b014f6b7fe29 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -100,6 +100,13 @@ enum { RTM_NEWNDUSEROPT = 68, #define RTM_NEWNDUSEROPT RTM_NEWNDUSEROPT + RTM_NEWADDRLABEL = 72, +#define RTM_NEWADDRLABEL RTM_NEWADDRLABEL + RTM_DELADDRLABEL, +#define RTM_NEWADDRLABEL RTM_NEWADDRLABEL + RTM_GETADDRLABEL, +#define RTM_GETADDRLABEL RTM_GETADDRLABEL + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; @@ -613,11 +620,11 @@ extern int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr, ({ data = RTA_PAYLOAD(rta) >= len ? RTA_DATA(rta) : NULL; \ __rtattr_parse_nested_compat(tb, max, rta, len); }) -extern int rtnetlink_send(struct sk_buff *skb, u32 pid, u32 group, int echo); -extern int rtnl_unicast(struct sk_buff *skb, u32 pid); -extern int rtnl_notify(struct sk_buff *skb, u32 pid, u32 group, +extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); +extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); +extern int rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, struct nlmsghdr *nlh, gfp_t flags); -extern void rtnl_set_sk_err(u32 group, int error); +extern void rtnl_set_sk_err(struct net *net, u32 group, int error); extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, u32 ts, u32 tsage, long expires, diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index e3ff21dbac53..a3d567a974e8 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -7,6 +7,12 @@ #include <linux/string.h> #include <asm/io.h> +struct sg_table { + struct scatterlist *sgl; /* the list */ + unsigned int nents; /* number of mapped entries */ + unsigned int orig_nents; /* original size of list */ +}; + /* * Notes on SG table design. * @@ -106,31 +112,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); } -/** - * sg_next - return the next scatterlist entry in a list - * @sg: The current sg entry - * - * Description: - * Usually the next entry will be @sg@ + 1, but if this sg element is part - * of a chained scatterlist, it could jump to the start of a new - * scatterlist array. - * - **/ -static inline struct scatterlist *sg_next(struct scatterlist *sg) -{ -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif - if (sg_is_last(sg)) - return NULL; - - sg++; - if (unlikely(sg_is_chain(sg))) - sg = sg_chain_ptr(sg); - - return sg; -} - /* * Loop over each sg element, following the pointer to a new list if necessary */ @@ -138,40 +119,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg) for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) /** - * sg_last - return the last scatterlist entry in a list - * @sgl: First entry in the scatterlist - * @nents: Number of entries in the scatterlist - * - * Description: - * Should only be used casually, it (currently) scan the entire list - * to get the last entry. - * - * Note that the @sgl@ pointer passed in need not be the first one, - * the important bit is that @nents@ denotes the number of entries that - * exist from @sgl@. - * - **/ -static inline struct scatterlist *sg_last(struct scatterlist *sgl, - unsigned int nents) -{ -#ifndef ARCH_HAS_SG_CHAIN - struct scatterlist *ret = &sgl[nents - 1]; -#else - struct scatterlist *sg, *ret = NULL; - unsigned int i; - - for_each_sg(sgl, sg, nents, i) - ret = sg; - -#endif -#ifdef CONFIG_DEBUG_SG - BUG_ON(sgl[0].sg_magic != SG_MAGIC); - BUG_ON(!sg_is_last(ret)); -#endif - return ret; -} - -/** * sg_chain - Chain two sglists together * @prv: First scatterlist * @prv_nents: Number of entries in prv @@ -223,47 +170,6 @@ static inline void sg_mark_end(struct scatterlist *sg) } /** - * sg_init_table - Initialize SG table - * @sgl: The SG table - * @nents: Number of entries in table - * - * Notes: - * If this is part of a chained sg table, sg_mark_end() should be - * used only on the last table part. - * - **/ -static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) -{ - memset(sgl, 0, sizeof(*sgl) * nents); -#ifdef CONFIG_DEBUG_SG - { - unsigned int i; - for (i = 0; i < nents; i++) - sgl[i].sg_magic = SG_MAGIC; - } -#endif - sg_mark_end(&sgl[nents - 1]); -} - -/** - * sg_init_one - Initialize a single entry sg list - * @sg: SG entry - * @buf: Virtual address for IO - * @buflen: IO length - * - * Notes: - * This should not be used on a single entry that is part of a larger - * table. Use sg_init_table() for that. - * - **/ -static inline void sg_init_one(struct scatterlist *sg, const void *buf, - unsigned int buflen) -{ - sg_init_table(sg, 1); - sg_set_buf(sg, buf, buflen); -} - -/** * sg_phys - Return physical address of an sg entry * @sg: SG entry * @@ -293,4 +199,24 @@ static inline void *sg_virt(struct scatterlist *sg) return page_address(sg_page(sg)) + sg->offset; } +struct scatterlist *sg_next(struct scatterlist *); +struct scatterlist *sg_last(struct scatterlist *s, unsigned int); +void sg_init_table(struct scatterlist *, unsigned int); +void sg_init_one(struct scatterlist *, const void *, unsigned int); + +typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); +typedef void (sg_free_fn)(struct scatterlist *, unsigned int); + +void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *); +void sg_free_table(struct sg_table *); +int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, + sg_alloc_fn *); +int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); + +/* + * Maximum number of entries that will be allocated in one piece, if + * a list larger than this is required then chaining will be utilized. + */ +#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) + #endif /* _LINUX_SCATTERLIST_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index cc14656f8682..9d4797609aa5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -27,6 +27,7 @@ #define CLONE_NEWUSER 0x10000000 /* New user namespace */ #define CLONE_NEWPID 0x20000000 /* New pid namespace */ #define CLONE_NEWNET 0x40000000 /* New network namespace */ +#define CLONE_IO 0x80000000 /* Clone io context */ /* * Scheduling policies @@ -78,7 +79,6 @@ struct sched_param { #include <linux/proportions.h> #include <linux/seccomp.h> #include <linux/rcupdate.h> -#include <linux/futex.h> #include <linux/rtmutex.h> #include <linux/time.h> @@ -88,11 +88,13 @@ struct sched_param { #include <linux/hrtimer.h> #include <linux/task_io_accounting.h> #include <linux/kobject.h> +#include <linux/latencytop.h> #include <asm/processor.h> struct exec_domain; struct futex_pi_state; +struct robust_list_head; struct bio; /* @@ -230,6 +232,8 @@ static inline int select_nohz_load_balancer(int cpu) } #endif +extern unsigned long rt_needs_cpu(int cpu); + /* * Only dump TASK_* tasks. (0 for all tasks) */ @@ -257,13 +261,19 @@ extern void trap_init(void); extern void account_process_tick(struct task_struct *task, int user); extern void update_process_times(int user); extern void scheduler_tick(void); +extern void hrtick_resched(void); + +extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); -extern int softlockup_thresh; +extern unsigned long softlockup_thresh; +extern unsigned long sysctl_hung_task_check_count; +extern unsigned long sysctl_hung_task_timeout_secs; +extern unsigned long sysctl_hung_task_warnings; #else static inline void softlockup_tick(void) { @@ -552,18 +562,13 @@ struct user_struct { #ifdef CONFIG_FAIR_USER_SCHED struct task_group *tg; #ifdef CONFIG_SYSFS - struct kset kset; - struct subsys_attribute user_attr; + struct kobject kobj; struct work_struct work; #endif #endif }; -#ifdef CONFIG_FAIR_USER_SCHED -extern int uids_kobject_init(void); -#else -static inline int uids_kobject_init(void) { return 0; } -#endif +extern int uids_sysfs_init(void); extern struct user_struct *find_user(uid_t); @@ -827,6 +832,7 @@ struct sched_class { void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); void (*yield_task) (struct rq *rq); + int (*select_task_rq)(struct task_struct *p, int sync); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); @@ -842,11 +848,25 @@ struct sched_class { int (*move_one_task) (struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle); + void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); + void (*post_schedule) (struct rq *this_rq); + void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); #endif void (*set_curr_task) (struct rq *rq); - void (*task_tick) (struct rq *rq, struct task_struct *p); + void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); void (*task_new) (struct rq *rq, struct task_struct *p); + void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); + + void (*join_domain)(struct rq *rq); + void (*leave_domain)(struct rq *rq); + + void (*switched_from) (struct rq *this_rq, struct task_struct *task, + int running); + void (*switched_to) (struct rq *this_rq, struct task_struct *task, + int running); + void (*prio_changed) (struct rq *this_rq, struct task_struct *task, + int oldprio, int running); }; struct load_weight { @@ -876,6 +896,8 @@ struct sched_entity { #ifdef CONFIG_SCHEDSTATS u64 wait_start; u64 wait_max; + u64 wait_count; + u64 wait_sum; u64 sleep_start; u64 sleep_max; @@ -914,6 +936,21 @@ struct sched_entity { #endif }; +struct sched_rt_entity { + struct list_head run_list; + unsigned int time_slice; + unsigned long timeout; + int nr_cpus_allowed; + +#ifdef CONFIG_FAIR_GROUP_SCHED + struct sched_rt_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct rt_rq *rt_rq; + /* rq "owned" by this entity/group: */ + struct rt_rq *my_q; +#endif +}; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; @@ -930,16 +967,15 @@ struct task_struct { #endif int prio, static_prio, normal_prio; - struct list_head run_list; const struct sched_class *sched_class; struct sched_entity se; + struct sched_rt_entity rt; #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ struct hlist_head preempt_notifiers; #endif - unsigned short ioprio; /* * fpu_counter contains the number of consecutive context switches * that the FPU is used. If this is over a threshold, the lazy fpu @@ -956,7 +992,11 @@ struct task_struct { unsigned int policy; cpumask_t cpus_allowed; - unsigned int time_slice; + +#ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; + int rcu_flipctr_idx; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; @@ -1046,6 +1086,11 @@ struct task_struct { /* ipc stuff */ struct sysv_sem sysvsem; #endif +#ifdef CONFIG_DETECT_SOFTLOCKUP +/* hung task detection */ + unsigned long last_switch_timestamp; + unsigned long last_switch_count; +#endif /* CPU-specific state of this task */ struct thread_struct thread; /* filesystem information */ @@ -1178,6 +1223,10 @@ struct task_struct { int make_it_fail; #endif struct prop_local_single dirties; +#ifdef CONFIG_LATENCYTOP + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; +#endif }; /* @@ -1458,6 +1507,12 @@ extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; +extern unsigned int sysctl_sched_rt_period; +extern unsigned int sysctl_sched_rt_ratio; +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) +extern unsigned int sysctl_sched_min_bal_int_shares; +extern unsigned int sysctl_sched_max_bal_int_shares; +#endif int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, @@ -1850,29 +1905,33 @@ static inline int need_resched(void) * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */ -extern int cond_resched(void); -extern int cond_resched_lock(spinlock_t * lock); -extern int cond_resched_softirq(void); - -/* - * Does a critical section need to be broken due to another - * task waiting?: - */ -#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) -# define need_lockbreak(lock) ((lock)->break_lock) +#ifdef CONFIG_PREEMPT +static inline int cond_resched(void) +{ + return 0; +} #else -# define need_lockbreak(lock) 0 +extern int _cond_resched(void); +static inline int cond_resched(void) +{ + return _cond_resched(); +} #endif +extern int cond_resched_lock(spinlock_t * lock); +extern int cond_resched_softirq(void); /* * Does a critical section need to be broken due to another - * task waiting or preemption being signalled: + * task waiting?: (technically does not depend on CONFIG_PREEMPT, + * but a general need for low latency) */ -static inline int lock_need_resched(spinlock_t *lock) +static inline int spin_needbreak(spinlock_t *lock) { - if (need_lockbreak(lock) || need_resched()) - return 1; +#ifdef CONFIG_PREEMPT + return spin_is_contended(lock); +#else return 0; +#endif } /* diff --git a/include/linux/security.h b/include/linux/security.h index ac050830a873..d24974262dc6 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -34,6 +34,12 @@ #include <linux/xfrm.h> #include <net/flow.h> +/* only a char in selinux superblock security struct flags */ +#define FSCONTEXT_MNT 0x01 +#define CONTEXT_MNT 0x02 +#define ROOTCONTEXT_MNT 0x04 +#define DEFCONTEXT_MNT 0x08 + /* * Bounding set */ @@ -243,9 +249,6 @@ struct request_sock; * @mnt contains the mounted file system. * @flags contains the new filesystem flags. * @data contains the filesystem-specific data. - * @sb_post_mountroot: - * Update the security module's state when the root filesystem is mounted. - * This hook is only called if the mount was successful. * @sb_post_addmount: * Update the security module's state when a filesystem is mounted. * This hook is called any time a mount is successfully grafetd to @@ -261,6 +264,22 @@ struct request_sock; * Update module state after a successful pivot. * @old_nd contains the nameidata structure for the old root. * @new_nd contains the nameidata structure for the new root. + * @sb_get_mnt_opts: + * Get the security relevant mount options used for a superblock + * @sb the superblock to get security mount options from + * @mount_options array for pointers to mount options + * @mount_flags array of ints specifying what each mount options is + * @num_opts number of options in the arrays + * @sb_set_mnt_opts: + * Set the security relevant mount options used for a superblock + * @sb the superblock to set security mount options for + * @mount_options array for pointers to mount options + * @mount_flags array of ints specifying what each mount options is + * @num_opts number of options in the arrays + * @sb_clone_mnt_opts: + * Copy all security options from a given superblock to another + * @oldsb old superblock which contain information to clone + * @newsb new superblock which needs filled in * * Security hooks for inode operations. * @@ -1183,6 +1202,10 @@ struct request_sock; * Convert secid to security context. * @secid contains the security ID. * @secdata contains the pointer that stores the converted security context. + * @secctx_to_secid: + * Convert security context to secid. + * @secid contains the pointer to the generated security ID. + * @secdata contains the security context. * * @release_secctx: * Release the security context. @@ -1235,13 +1258,19 @@ struct security_operations { void (*sb_umount_busy) (struct vfsmount * mnt); void (*sb_post_remount) (struct vfsmount * mnt, unsigned long flags, void *data); - void (*sb_post_mountroot) (void); void (*sb_post_addmount) (struct vfsmount * mnt, struct nameidata * mountpoint_nd); int (*sb_pivotroot) (struct nameidata * old_nd, struct nameidata * new_nd); void (*sb_post_pivotroot) (struct nameidata * old_nd, struct nameidata * new_nd); + int (*sb_get_mnt_opts) (const struct super_block *sb, + char ***mount_options, int **flags, + int *num_opts); + int (*sb_set_mnt_opts) (struct super_block *sb, char **mount_options, + int *flags, int num_opts); + void (*sb_clone_mnt_opts) (const struct super_block *oldsb, + struct super_block *newsb); int (*inode_alloc_security) (struct inode *inode); void (*inode_free_security) (struct inode *inode); @@ -1371,6 +1400,7 @@ struct security_operations { int (*getprocattr)(struct task_struct *p, char *name, char **value); int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size); int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); + int (*secctx_to_secid)(char *secdata, u32 seclen, u32 *secid); void (*release_secctx)(char *secdata, u32 seclen); #ifdef CONFIG_SECURITY_NETWORK @@ -1495,10 +1525,16 @@ int security_sb_umount(struct vfsmount *mnt, int flags); void security_sb_umount_close(struct vfsmount *mnt); void security_sb_umount_busy(struct vfsmount *mnt); void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *data); -void security_sb_post_mountroot(void); void security_sb_post_addmount(struct vfsmount *mnt, struct nameidata *mountpoint_nd); int security_sb_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); void security_sb_post_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); +int security_sb_get_mnt_opts(const struct super_block *sb, char ***mount_options, + int **flags, int *num_opts); +int security_sb_set_mnt_opts(struct super_block *sb, char **mount_options, + int *flags, int num_opts); +void security_sb_clone_mnt_opts(const struct super_block *oldsb, + struct super_block *newsb); + int security_inode_alloc(struct inode *inode); void security_inode_free(struct inode *inode); int security_inode_init_security(struct inode *inode, struct inode *dir, @@ -1603,6 +1639,7 @@ int security_setprocattr(struct task_struct *p, char *name, void *value, size_t int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_netlink_recv(struct sk_buff *skb, int cap); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); +int security_secctx_to_secid(char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); #else /* CONFIG_SECURITY */ @@ -1777,9 +1814,6 @@ static inline void security_sb_post_remount (struct vfsmount *mnt, unsigned long flags, void *data) { } -static inline void security_sb_post_mountroot (void) -{ } - static inline void security_sb_post_addmount (struct vfsmount *mnt, struct nameidata *mountpoint_nd) { } @@ -2266,7 +2300,7 @@ static inline struct dentry *securityfs_create_file(const char *name, mode_t mode, struct dentry *parent, void *data, - struct file_operations *fops) + const struct file_operations *fops) { return ERR_PTR(-ENODEV); } @@ -2280,6 +2314,13 @@ static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *secle return -EOPNOTSUPP; } +static inline int security_secctx_to_secid(char *secdata, + u32 seclen, + u32 *secid) +{ + return -EOPNOTSUPP; +} + static inline void security_release_secctx(char *secdata, u32 seclen) { } diff --git a/include/linux/selinux.h b/include/linux/selinux.h index 6080f73fc85f..8c2cc4c02526 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h @@ -120,16 +120,35 @@ void selinux_get_task_sid(struct task_struct *tsk, u32 *sid); int selinux_string_to_sid(char *str, u32 *sid); /** - * selinux_relabel_packet_permission - check permission to relabel a packet - * @sid: ID value to be applied to network packet (via SECMARK, most likely) + * selinux_secmark_relabel_packet_permission - secmark permission check + * @sid: SECMARK ID value to be applied to network packet * - * Returns 0 if the current task is allowed to label packets with the - * supplied security ID. Note that it is implicit that the packet is always - * being relabeled from the default unlabled value, and that the access - * control decision is made in the AVC. + * Returns 0 if the current task is allowed to set the SECMARK label of + * packets with the supplied security ID. Note that it is implicit that + * the packet is always being relabeled from the default unlabeled value, + * and that the access control decision is made in the AVC. */ -int selinux_relabel_packet_permission(u32 sid); +int selinux_secmark_relabel_packet_permission(u32 sid); +/** + * selinux_secmark_refcount_inc - increments the secmark use counter + * + * SELinux keeps track of the current SECMARK targets in use so it knows + * when to apply SECMARK label access checks to network packets. This + * function incements this reference count to indicate that a new SECMARK + * target has been configured. + */ +void selinux_secmark_refcount_inc(void); + +/** + * selinux_secmark_refcount_dec - decrements the secmark use counter + * + * SELinux keeps track of the current SECMARK targets in use so it knows + * when to apply SECMARK label access checks to network packets. This + * function decements this reference count to indicate that one of the + * existing SECMARK targets has been removed/flushed. + */ +void selinux_secmark_refcount_dec(void); #else static inline int selinux_audit_rule_init(u32 field, u32 op, @@ -184,11 +203,21 @@ static inline int selinux_string_to_sid(const char *str, u32 *sid) return 0; } -static inline int selinux_relabel_packet_permission(u32 sid) +static inline int selinux_secmark_relabel_packet_permission(u32 sid) { return 0; } +static inline void selinux_secmark_refcount_inc(void) +{ + return; +} + +static inline void selinux_secmark_refcount_dec(void) +{ + return; +} + #endif /* CONFIG_SECURITY_SELINUX */ #endif /* _LINUX_SELINUX_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index ebbc02b325fc..648dfeb444db 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -63,5 +63,18 @@ extern struct list_head *seq_list_start_head(struct list_head *head, extern struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos); +struct net; +struct seq_net_private { + struct net *net; +}; + +int seq_open_net(struct inode *, struct file *, + const struct seq_operations *, int); +int seq_release_net(struct inode *, struct file *); +static inline struct net *seq_file_net(struct seq_file *seq) +{ + return ((struct seq_net_private *)seq->private)->net; +} + #endif #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bddd50bd6878..c618fbf7d173 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -95,6 +95,7 @@ struct net_device; struct scatterlist; +struct pipe_inode_info; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -287,6 +288,7 @@ struct sk_buff { __u8 pkt_type:3, fclone:2, ipvs_property:1, + peeked:1, nf_trace:1; __be16 protocol; @@ -1537,6 +1539,8 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) skb = skb->prev) +extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *err); extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); extern unsigned int datagram_poll(struct file *file, struct socket *sock, @@ -1548,7 +1552,7 @@ extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, +extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); extern __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); @@ -1559,6 +1563,11 @@ extern int skb_store_bits(struct sk_buff *skb, int offset, extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum); +extern int skb_splice_bits(struct sk_buff *skb, + unsigned int offset, + struct pipe_inode_info *pipe, + unsigned int len, + unsigned int flags); extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); diff --git a/include/linux/smp.h b/include/linux/smp.h index c25e66bcecf3..55232ccf9cfd 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -78,6 +78,8 @@ int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); */ void smp_prepare_boot_cpu(void); +extern unsigned int setup_max_cpus; + #else /* !SMP */ /* diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 58962c51dee1..aab3a4cff4e1 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void); __release_kernel_lock(); \ } while (0) -/* - * Non-SMP kernels will never block on the kernel lock, - * so we are better off returning a constant zero from - * reacquire_kernel_lock() so that the compiler can see - * it at compile-time. - */ -#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) -# define return_value_on_smp return -#else -# define return_value_on_smp -#endif - static inline int reacquire_kernel_lock(struct task_struct *task) { if (unlikely(task->lock_depth >= 0)) - return_value_on_smp __reacquire_kernel_lock(); + return __reacquire_kernel_lock(); return 0; } diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 89f0c2b5f405..86d3effb2836 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -217,4 +217,35 @@ enum __LINUX_MIB_MAX }; +/* linux Xfrm mib definitions */ +enum +{ + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR, /* XfrmInError */ + LINUX_MIB_XFRMINBUFFERERROR, /* XfrmInBufferError */ + LINUX_MIB_XFRMINHDRERROR, /* XfrmInHdrError */ + LINUX_MIB_XFRMINNOSTATES, /* XfrmInNoStates */ + LINUX_MIB_XFRMINSTATEPROTOERROR, /* XfrmInStateProtoError */ + LINUX_MIB_XFRMINSTATEMODEERROR, /* XfrmInStateModeError */ + LINUX_MIB_XFRMINSEQOUTOFWINDOW, /* XfrmInSeqOutOfWindow */ + LINUX_MIB_XFRMINSTATEEXPIRED, /* XfrmInStateExpired */ + LINUX_MIB_XFRMINSTATEMISMATCH, /* XfrmInStateMismatch */ + LINUX_MIB_XFRMINSTATEINVALID, /* XfrmInStateInvalid */ + LINUX_MIB_XFRMINTMPLMISMATCH, /* XfrmInTmplMismatch */ + LINUX_MIB_XFRMINNOPOLS, /* XfrmInNoPols */ + LINUX_MIB_XFRMINPOLBLOCK, /* XfrmInPolBlock */ + LINUX_MIB_XFRMINPOLERROR, /* XfrmInPolError */ + LINUX_MIB_XFRMOUTERROR, /* XfrmOutError */ + LINUX_MIB_XFRMOUTBUNDLEGENERROR, /* XfrmOutBundleGenError */ + LINUX_MIB_XFRMOUTBUNDLECHECKERROR, /* XfrmOutBundleCheckError */ + LINUX_MIB_XFRMOUTNOSTATES, /* XfrmOutNoStates */ + LINUX_MIB_XFRMOUTSTATEPROTOERROR, /* XfrmOutStateProtoError */ + LINUX_MIB_XFRMOUTSTATEMODEERROR, /* XfrmOutStateModeError */ + LINUX_MIB_XFRMOUTSTATEEXPIRED, /* XfrmOutStateExpired */ + LINUX_MIB_XFRMOUTPOLBLOCK, /* XfrmOutPolBlock */ + LINUX_MIB_XFRMOUTPOLDEAD, /* XfrmOutPolDead */ + LINUX_MIB_XFRMOUTPOLERROR, /* XfrmOutPolError */ + __LINUX_MIB_XFRMMAX +}; + #endif /* _LINUX_SNMP_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index c22ef1c1afb8..bd2b30a74e76 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -24,7 +24,6 @@ struct __kernel_sockaddr_storage { #include <linux/types.h> /* pid_t */ #include <linux/compiler.h> /* __user */ -extern int sysctl_somaxconn; #ifdef CONFIG_PROC_FS struct seq_file; extern void socket_seq_show(struct seq_file *seq); @@ -185,6 +184,7 @@ struct ucred { #define AF_PPPOX 24 /* PPPoX sockets */ #define AF_WANPIPE 25 /* Wanpipe API Sockets */ #define AF_LLC 26 /* Linux LLC */ +#define AF_CAN 29 /* Controller Area Network */ #define AF_TIPC 30 /* TIPC sockets */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ #define AF_IUCV 32 /* IUCV sockets */ @@ -220,6 +220,7 @@ struct ucred { #define PF_PPPOX AF_PPPOX #define PF_WANPIPE AF_WANPIPE #define PF_LLC AF_LLC +#define PF_CAN AF_CAN #define PF_TIPC AF_TIPC #define PF_BLUETOOTH AF_BLUETOOTH #define PF_IUCV AF_IUCV diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index c376f3b36c89..124449733c55 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -120,6 +120,12 @@ do { \ #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) +#ifdef CONFIG_GENERIC_LOCKBREAK +#define spin_is_contended(lock) ((lock)->break_lock) +#else +#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) +#endif + /** * spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index f6a3a951b79e..68d88f71f1a2 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -19,7 +19,7 @@ typedef struct { raw_spinlock_t raw_lock; -#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) +#ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif #ifdef CONFIG_DEBUG_SPINLOCK @@ -35,7 +35,7 @@ typedef struct { typedef struct { raw_rwlock_t raw_lock; -#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) +#ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif #ifdef CONFIG_DEBUG_SPINLOCK diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index ea54c4c9a4ec..938234c4a996 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -64,6 +64,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ +#define __raw_spin_is_contended(lock) (((void)(lock), 0)) + #define __raw_read_can_lock(lock) (((void)(lock), 1)) #define __raw_write_can_lock(lock) (((void)(lock), 1)) diff --git a/include/linux/splice.h b/include/linux/splice.h index 33e447f98a54..528dcb93c2f2 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -53,6 +53,7 @@ struct splice_pipe_desc { int nr_pages; /* number of pages in map */ unsigned int flags; /* splice flags */ const struct pipe_buf_operations *ops;/* ops associated with output pipe */ + void (*spd_release)(struct splice_pipe_desc *, unsigned int); }; typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 2b5c312c4960..e18f5c23b930 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -15,22 +15,19 @@ struct pcmcia_device; struct ssb_bus; struct ssb_driver; - -struct ssb_sprom_r1 { - u16 pci_spid; /* Subsystem Product ID for PCI */ - u16 pci_svid; /* Subsystem Vendor ID for PCI */ - u16 pci_pid; /* Product ID for PCI */ +struct ssb_sprom { + u8 revision; u8 il0mac[6]; /* MAC address for 802.11b/g */ u8 et0mac[6]; /* MAC address for Ethernet */ u8 et1mac[6]; /* MAC address for 802.11a */ - u8 et0phyaddr:5; /* MII address for enet0 */ - u8 et1phyaddr:5; /* MII address for enet1 */ - u8 et0mdcport:1; /* MDIO for enet0 */ - u8 et1mdcport:1; /* MDIO for enet1 */ - u8 board_rev; /* Board revision */ - u8 country_code:4; /* Country Code */ - u8 antenna_a:2; /* Antenna 0/1 available for A-PHY */ - u8 antenna_bg:2; /* Antenna 0/1 available for B-PHY and G-PHY */ + u8 et0phyaddr; /* MII address for enet0 */ + u8 et1phyaddr; /* MII address for enet1 */ + u8 et0mdcport; /* MDIO for enet0 */ + u8 et1mdcport; /* MDIO for enet1 */ + u8 board_rev; /* Board revision number from SPROM. */ + u8 country_code; /* Country Code */ + u8 ant_available_a; /* A-PHY antenna available bits (up to 4) */ + u8 ant_available_bg; /* B/G-PHY antenna available bits (up to 4) */ u16 pa0b0; u16 pa0b1; u16 pa0b2; @@ -41,61 +38,26 @@ struct ssb_sprom_r1 { u8 gpio1; /* GPIO pin 1 */ u8 gpio2; /* GPIO pin 2 */ u8 gpio3; /* GPIO pin 3 */ - u16 maxpwr_a; /* A-PHY Power Amplifier Max Power (in dBm Q5.2) */ - u16 maxpwr_bg; /* B/G-PHY Power Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_a; /* A-PHY Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_bg; /* B/G-PHY Amplifier Max Power (in dBm Q5.2) */ u8 itssi_a; /* Idle TSSI Target for A-PHY */ u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */ u16 boardflags_lo; /* Boardflags (low 16 bits) */ - u8 antenna_gain_a; /* A-PHY Antenna gain (in dBm Q5.2) */ - u8 antenna_gain_bg; /* B/G-PHY Antenna gain (in dBm Q5.2) */ - u8 oem[8]; /* OEM string (rev 1 only) */ -}; - -struct ssb_sprom_r2 { u16 boardflags_hi; /* Boardflags (high 16 bits) */ - u8 maxpwr_a_lo; /* A-PHY Max Power Low */ - u8 maxpwr_a_hi; /* A-PHY Max Power High */ - u16 pa1lob0; /* A-PHY PA Low Settings */ - u16 pa1lob1; /* A-PHY PA Low Settings */ - u16 pa1lob2; /* A-PHY PA Low Settings */ - u16 pa1hib0; /* A-PHY PA High Settings */ - u16 pa1hib1; /* A-PHY PA High Settings */ - u16 pa1hib2; /* A-PHY PA High Settings */ - u8 ofdm_pwr_off; /* OFDM Power Offset from CCK Level */ - u8 country_str[2]; /* Two char Country Code */ -}; - -struct ssb_sprom_r3 { - u32 ofdmapo; /* A-PHY OFDM Mid Power Offset */ - u32 ofdmalpo; /* A-PHY OFDM Low Power Offset */ - u32 ofdmahpo; /* A-PHY OFDM High Power Offset */ - u8 gpioldc_on_cnt; /* GPIO LED Powersave Duty Cycle ON count */ - u8 gpioldc_off_cnt; /* GPIO LED Powersave Duty Cycle OFF count */ - u8 cckpo_1M:4; /* CCK Power Offset for Rate 1M */ - u8 cckpo_2M:4; /* CCK Power Offset for Rate 2M */ - u8 cckpo_55M:4; /* CCK Power Offset for Rate 5.5M */ - u8 cckpo_11M:4; /* CCK Power Offset for Rate 11M */ - u32 ofdmgpo; /* G-PHY OFDM Power Offset */ -}; - -struct ssb_sprom_r4 { - /* TODO */ -}; -struct ssb_sprom { - u8 revision; - u8 crc; - /* The valid r# fields are selected by the "revision". - * Revision 3 and lower inherit from lower revisions. - */ - union { + /* Antenna gain values for up to 4 antennas + * on each band. Values in dBm/4 (Q5.2). Negative gain means the + * loss in the connectors is bigger than the gain. */ + struct { struct { - struct ssb_sprom_r1 r1; - struct ssb_sprom_r2 r2; - struct ssb_sprom_r3 r3; - }; - struct ssb_sprom_r4 r4; - }; + s8 a0, a1, a2, a3; + } ghz24; /* 2.4GHz band */ + struct { + s8 a0, a1, a2, a3; + } ghz5; /* 5GHz band */ + } antenna_gain; + + /* TODO - add any parameters needed from rev 2, 3, or 4 SPROMs */ }; /* Information about the PCB the circuitry is soldered on. */ @@ -270,7 +232,8 @@ struct ssb_bus { struct ssb_device *mapped_device; /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ u8 mapped_pcmcia_seg; - /* Lock for core and segment switching. */ + /* Lock for core and segment switching. + * On PCMCIA-host busses this is used to protect the whole MMIO access. */ spinlock_t bar_lock; /* The bus this backplane is running on. */ @@ -288,6 +251,7 @@ struct ssb_bus { /* ID information about the Chip. */ u16 chip_id; u16 chip_rev; + u16 sprom_size; /* number of words in sprom */ u8 chip_package; /* List of devices (cores) on the backplane. */ @@ -402,6 +366,13 @@ static inline void ssb_pcihost_unregister(struct pci_driver *driver) { pci_unregister_driver(driver); } + +static inline +void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) +{ + if (sdev->bus->bustype == SSB_BUSTYPE_PCI) + pci_set_power_state(sdev->bus->host_pci, state); +} #endif /* CONFIG_SSB_PCIHOST */ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 47c7c71a5acf..ebad0bac9801 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -147,6 +147,10 @@ #define SSB_IDLOW_SSBREV 0xF0000000 /* Sonics Backplane Revision code */ #define SSB_IDLOW_SSBREV_22 0x00000000 /* <= 2.2 */ #define SSB_IDLOW_SSBREV_23 0x10000000 /* 2.3 */ +#define SSB_IDLOW_SSBREV_24 0x40000000 /* ?? Found in BCM4328 */ +#define SSB_IDLOW_SSBREV_25 0x50000000 /* ?? Not Found yet */ +#define SSB_IDLOW_SSBREV_26 0x60000000 /* ?? Found in some BCM4311/2 */ +#define SSB_IDLOW_SSBREV_27 0x70000000 /* ?? Found in some BCM4311/2 */ #define SSB_IDHIGH 0x0FFC /* SB Identification High */ #define SSB_IDHIGH_RCLO 0x0000000F /* Revision Code (low part) */ #define SSB_IDHIGH_CC 0x00008FF0 /* Core Code */ @@ -162,11 +166,16 @@ */ #define SSB_SPROMSIZE_WORDS 64 #define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R123 64 +#define SSB_SPROMSIZE_WORDS_R4 220 +#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) +#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) #define SSB_SPROM_BASE 0x1000 #define SSB_SPROM_REVISION 0x107E #define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */ #define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */ #define SSB_SPROM_REVISION_CRC_SHIFT 8 + /* SPROM Revision 1 */ #define SSB_SPROM1_SPID 0x1004 /* Subsystem Product ID for PCI */ #define SSB_SPROM1_SVID 0x1006 /* Subsystem Vendor ID for PCI */ @@ -184,10 +193,10 @@ #define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */ #define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */ #define SSB_SPROM1_BINF_CCODE_SHIFT 8 -#define SSB_SPROM1_BINF_ANTA 0x3000 /* Available A-PHY antennas */ -#define SSB_SPROM1_BINF_ANTA_SHIFT 12 -#define SSB_SPROM1_BINF_ANTBG 0xC000 /* Available B-PHY antennas */ -#define SSB_SPROM1_BINF_ANTBG_SHIFT 14 +#define SSB_SPROM1_BINF_ANTBG 0x3000 /* Available B-PHY and G-PHY antennas */ +#define SSB_SPROM1_BINF_ANTBG_SHIFT 12 +#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */ +#define SSB_SPROM1_BINF_ANTA_SHIFT 14 #define SSB_SPROM1_PA0B0 0x105E #define SSB_SPROM1_PA0B1 0x1060 #define SSB_SPROM1_PA0B2 0x1062 @@ -212,10 +221,11 @@ #define SSB_SPROM1_ITSSI_A_SHIFT 8 #define SSB_SPROM1_BFLLO 0x1072 /* Boardflags (low 16 bits) */ #define SSB_SPROM1_AGAIN 0x1074 /* Antenna Gain (in dBm Q5.2) */ -#define SSB_SPROM1_AGAIN_A 0x00FF /* A-PHY */ -#define SSB_SPROM1_AGAIN_BG 0xFF00 /* B-PHY and G-PHY */ -#define SSB_SPROM1_AGAIN_BG_SHIFT 8 -#define SSB_SPROM1_OEM 0x1076 /* 8 bytes OEM string (rev 1 only) */ +#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */ +#define SSB_SPROM1_AGAIN_BG_SHIFT 0 +#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_AGAIN_A_SHIFT 8 + /* SPROM Revision 2 (inherits from rev 1) */ #define SSB_SPROM2_BFLHI 0x1038 /* Boardflags (high 16 bits) */ #define SSB_SPROM2_MAXP_A 0x103A /* A-PHY Max Power */ @@ -232,7 +242,11 @@ #define SSB_SPROM2_OPO_VALUE 0x00FF #define SSB_SPROM2_OPO_UNUSED 0xFF00 #define SSB_SPROM2_CCODE 0x107C /* Two char Country Code */ -/* SPROM Revision 3 (inherits from rev 2) */ + +/* SPROM Revision 3 (inherits most data from rev 2) */ +#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM3_ET0MAC 0x1050 /* 6 bytes MAC address for Ethernet ?? */ +#define SSB_SPROM3_ET1MAC 0x1050 /* 6 bytes MAC address for 802.11a ?? */ #define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ @@ -251,6 +265,57 @@ #define SSB_SPROM3_CCKPO_11M_SHIFT 12 #define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */ +/* SPROM Revision 4 */ +#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM4_ET0MAC 0x1018 /* 6 bytes MAC address for Ethernet ?? */ +#define SSB_SPROM4_ET1MAC 0x1018 /* 6 bytes MAC address for 802.11a ?? */ +#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */ +#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM4_CCODE 0x1052 /* Country Code (2 bytes) */ +#define SSB_SPROM4_ANTAVAIL 0x105D /* Antenna available bitfields */ +#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0 +#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8 +#define SSB_SPROM4_BFLLO 0x1044 /* Boardflags (low 16 bits) */ +#define SSB_SPROM4_AGAIN01 0x105E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM4_AGAIN0_SHIFT 0 +#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM4_AGAIN1_SHIFT 8 +#define SSB_SPROM4_AGAIN23 0x1060 +#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM4_AGAIN2_SHIFT 0 +#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM4_AGAIN3_SHIFT 8 +#define SSB_SPROM4_BFLHI 0x1046 /* Board Flags Hi */ +#define SSB_SPROM4_MAXP_BG 0x1080 /* Max Power BG in path 1 */ +#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */ +#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM4_ITSSI_BG_SHIFT 8 +#define SSB_SPROM4_MAXP_A 0x108A /* Max Power A in path 1 */ +#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */ +#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM4_ITSSI_A_SHIFT 8 +#define SSB_SPROM4_GPIOA 0x1056 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM4_GPIOA_P1_SHIFT 8 +#define SSB_SPROM4_GPIOB 0x1058 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM4_GPIOB_P3_SHIFT 8 +#define SSB_SPROM4_PA0B0 0x1082 /* The paXbY locations are */ +#define SSB_SPROM4_PA0B1 0x1084 /* only guesses */ +#define SSB_SPROM4_PA0B2 0x1086 +#define SSB_SPROM4_PA1B0 0x108E +#define SSB_SPROM4_PA1B1 0x1090 +#define SSB_SPROM4_PA1B2 0x1092 + /* Values for SSB_SPROM1_BINF_CCODE */ enum { SSB_SPROM1CCODE_WORLD = 0, diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index e7fa657d0c49..5da9794b2d78 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -9,10 +9,13 @@ struct stack_trace { }; extern void save_stack_trace(struct stack_trace *trace); +extern void save_stack_trace_tsk(struct task_struct *tsk, + struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); #else # define save_stack_trace(trace) do { } while (0) +# define save_stack_trace_tsk(tsk, trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) #endif diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index d9d5c5ad826c..3e9addc741c1 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -46,6 +46,7 @@ struct rpc_clnt { cl_autobind : 1;/* use getport() */ struct rpc_rtt * cl_rtt; /* RTO estimator data */ + const struct rpc_timeout *cl_timeout; /* Timeout strategy */ int cl_nodelen; /* nodename length */ char cl_nodename[UNX_MAXNODENAME]; @@ -54,6 +55,7 @@ struct rpc_clnt { struct dentry * cl_dentry; /* inode */ struct rpc_clnt * cl_parent; /* Points to parent of clones */ struct rpc_rtt cl_rtt_default; + struct rpc_timeout cl_timeout_default; struct rpc_program * cl_program; char cl_inline_name[32]; }; @@ -99,7 +101,7 @@ struct rpc_create_args { struct sockaddr *address; size_t addrsize; struct sockaddr *saddress; - struct rpc_timeout *timeout; + const struct rpc_timeout *timeout; char *servername; struct rpc_program *program; u32 version; @@ -123,11 +125,10 @@ void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); int rpcb_register(u32, u32, int, unsigned short, int *); -int rpcb_getport_sync(struct sockaddr_in *, __u32, __u32, int); +int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int); void rpcb_getport_async(struct rpc_task *); -void rpc_call_setup(struct rpc_task *, struct rpc_message *, int); - +void rpc_call_start(struct rpc_task *); int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *calldata); @@ -142,7 +143,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); size_t rpc_max_payload(struct rpc_clnt *); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); -char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); +const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index c4beb5775111..70df4f1d8847 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -152,5 +152,44 @@ typedef __be32 rpc_fraghdr; */ #define RPCBIND_MAXNETIDLEN (4u) +/* + * Universal addresses are introduced in RFC 1833 and further spelled + * out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length + * of a universal address for use in allocating buffers and character + * arrays. + * + * Quoting RFC 3530, section 2.2: + * + * For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the + * US-ASCII string: + * + * h1.h2.h3.h4.p1.p2 + * + * The prefix, "h1.h2.h3.h4", is the standard textual form for + * representing an IPv4 address, which is always four octets long. + * Assuming big-endian ordering, h1, h2, h3, and h4, are respectively, + * the first through fourth octets each converted to ASCII-decimal. + * Assuming big-endian ordering, p1 and p2 are, respectively, the first + * and second octets each converted to ASCII-decimal. For example, if a + * host, in big-endian order, has an address of 0x0A010307 and there is + * a service listening on, in big endian order, port 0x020F (decimal + * 527), then the complete universal address is "10.1.3.7.2.15". + * + * ... + * + * For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the + * US-ASCII string: + * + * x1:x2:x3:x4:x5:x6:x7:x8.p1.p2 + * + * The suffix "p1.p2" is the service port, and is computed the same way + * as with universal addresses for TCP and UDP over IPv4. The prefix, + * "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for + * representing an IPv6 address as defined in Section 2.2 of [RFC2373]. + * Additionally, the two alternative forms specified in Section 2.2 of + * [RFC2373] are also acceptable. + */ +#define RPCBIND_MAXUADDRLEN (56u) + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 8ea077db0099..ce3d1b132729 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -56,8 +56,6 @@ struct rpc_task { __u8 tk_garb_retry; __u8 tk_cred_retry; - unsigned long tk_cookie; /* Cookie for batching tasks */ - /* * timeout_fn to be executed by timer bottom half * callback to be executed after waking up @@ -78,7 +76,6 @@ struct rpc_task { struct timer_list tk_timer; /* kernel timer */ unsigned long tk_timeout; /* timeout for rpc_sleep() */ unsigned short tk_flags; /* misc flags */ - unsigned char tk_priority : 2;/* Task priority */ unsigned long tk_runstate; /* Task run status */ struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could * be any workqueue @@ -94,6 +91,9 @@ struct rpc_task { unsigned long tk_start; /* RPC task init timestamp */ long tk_rtt; /* round-trip time (jiffies) */ + pid_t tk_owner; /* Process id for batching tasks */ + unsigned char tk_priority : 2;/* Task priority */ + #ifdef RPC_DEBUG unsigned short tk_pid; /* debugging aid */ #endif @@ -117,6 +117,15 @@ struct rpc_call_ops { void (*rpc_release)(void *); }; +struct rpc_task_setup { + struct rpc_task *task; + struct rpc_clnt *rpc_client; + const struct rpc_message *rpc_message; + const struct rpc_call_ops *callback_ops; + void *callback_data; + unsigned short flags; + signed char priority; +}; /* * RPC task flags @@ -180,10 +189,10 @@ struct rpc_call_ops { * Note: if you change these, you must also change * the task initialization definitions below. */ -#define RPC_PRIORITY_LOW 0 -#define RPC_PRIORITY_NORMAL 1 -#define RPC_PRIORITY_HIGH 2 -#define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1) +#define RPC_PRIORITY_LOW (-1) +#define RPC_PRIORITY_NORMAL (0) +#define RPC_PRIORITY_HIGH (1) +#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW) /* * RPC synchronization objects @@ -191,7 +200,7 @@ struct rpc_call_ops { struct rpc_wait_queue { spinlock_t lock; struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ - unsigned long cookie; /* cookie of last task serviced */ + pid_t owner; /* process id of last task serviced */ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ unsigned char priority; /* current priority */ unsigned char count; /* # task groups remaining serviced so far */ @@ -208,41 +217,13 @@ struct rpc_wait_queue { * performance of NFS operations such as read/write. */ #define RPC_BATCH_COUNT 16 - -#ifndef RPC_DEBUG -# define RPC_WAITQ_INIT(var,qname) { \ - .lock = __SPIN_LOCK_UNLOCKED(var.lock), \ - .tasks = { \ - [0] = LIST_HEAD_INIT(var.tasks[0]), \ - [1] = LIST_HEAD_INIT(var.tasks[1]), \ - [2] = LIST_HEAD_INIT(var.tasks[2]), \ - }, \ - } -#else -# define RPC_WAITQ_INIT(var,qname) { \ - .lock = __SPIN_LOCK_UNLOCKED(var.lock), \ - .tasks = { \ - [0] = LIST_HEAD_INIT(var.tasks[0]), \ - [1] = LIST_HEAD_INIT(var.tasks[1]), \ - [2] = LIST_HEAD_INIT(var.tasks[2]), \ - }, \ - .name = qname, \ - } -#endif -# define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname) - #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) /* * Function prototypes */ -struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags, - const struct rpc_call_ops *ops, void *data); -struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, - const struct rpc_call_ops *ops, void *data); -void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, - int flags, const struct rpc_call_ops *ops, - void *data); +struct rpc_task *rpc_new_task(const struct rpc_task_setup *); +struct rpc_task *rpc_run_task(const struct rpc_task_setup *); void rpc_put_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *); void rpc_release_calldata(const struct rpc_call_ops *, void *); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 30b17b3bc1a9..b3ff9a815e6f 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -120,7 +120,7 @@ struct rpc_xprt { struct kref kref; /* Reference count */ struct rpc_xprt_ops * ops; /* transport methods */ - struct rpc_timeout timeout; /* timeout parms */ + const struct rpc_timeout *timeout; /* timeout parms */ struct sockaddr_storage addr; /* server address */ size_t addrlen; /* size of server address */ int prot; /* IP protocol */ @@ -183,7 +183,7 @@ struct rpc_xprt { bklog_u; /* backlog queue utilization */ } stat; - char * address_strings[RPC_DISPLAY_MAX]; + const char *address_strings[RPC_DISPLAY_MAX]; }; struct xprt_create { @@ -191,7 +191,6 @@ struct xprt_create { struct sockaddr * srcaddr; /* optional local address */ struct sockaddr * dstaddr; /* remote peer address */ size_t addrlen; - struct rpc_timeout * timeout; /* optional timeout parameters */ }; struct xprt_class { @@ -203,11 +202,6 @@ struct xprt_class { }; /* - * Transport operations used by ULPs - */ -void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr); - -/* * Generic internal transport functions */ struct rpc_xprt *xprt_create_transport(struct xprt_create *args); @@ -245,7 +239,8 @@ void xprt_adjust_cwnd(struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); void xprt_complete_rqst(struct rpc_task *task, int copied); void xprt_release_rqst_cong(struct rpc_task *task); -void xprt_disconnect(struct rpc_xprt *xprt); +void xprt_disconnect_done(struct rpc_xprt *xprt); +void xprt_force_disconnect(struct rpc_xprt *xprt); /* * Reserved bit positions in xprt->state @@ -256,6 +251,7 @@ void xprt_disconnect(struct rpc_xprt *xprt); #define XPRT_CLOSE_WAIT (3) #define XPRT_BOUND (4) #define XPRT_BINDING (5) +#define XPRT_CLOSING (6) static inline void xprt_set_connected(struct rpc_xprt *xprt) { diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4360e0816956..40280df2a3db 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -211,9 +211,6 @@ static inline int hibernate(void) { return -ENOSYS; } #ifdef CONFIG_PM_SLEEP void save_processor_state(void); void restore_processor_state(void); -struct saved_context; -void __save_processor_state(struct saved_context *ctxt); -void __restore_processor_state(struct saved_context *ctxt); /* kernel/power/main.c */ extern struct blocking_notifier_head pm_chain_head; diff --git a/include/linux/swap.h b/include/linux/swap.h index 4f3838adbb30..2c3ce4c69b25 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -6,6 +6,7 @@ #include <linux/mmzone.h> #include <linux/list.h> #include <linux/sched.h> +#include <linux/pagemap.h> #include <asm/atomic.h> #include <asm/page.h> diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 4f5047df8a9e..89faebfe48b8 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -945,7 +945,10 @@ enum /* For the /proc/sys support */ struct ctl_table; +struct nsproxy; extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev); +extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, + struct ctl_table_header *prev); extern void sysctl_head_finish(struct ctl_table_header *prev); extern int sysctl_perm(struct ctl_table *table, int op); @@ -1049,6 +1052,13 @@ struct ctl_table void *extra2; }; +struct ctl_table_root { + struct list_head root_list; + struct list_head header_list; + struct list_head *(*lookup)(struct ctl_table_root *root, + struct nsproxy *namespaces); +}; + /* struct ctl_table_header is used to maintain dynamic lists of struct ctl_table trees. */ struct ctl_table_header @@ -1057,12 +1067,26 @@ struct ctl_table_header struct list_head ctl_entry; int used; struct completion *unregistering; + struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; +}; + +/* struct ctl_path describes where in the hierarchy a table is added */ +struct ctl_path { + const char *procname; + int ctl_name; }; +void register_sysctl_root(struct ctl_table_root *root); +struct ctl_table_header *__register_sysctl_paths( + struct ctl_table_root *root, struct nsproxy *namespaces, + const struct ctl_path *path, struct ctl_table *table); struct ctl_table_header *register_sysctl_table(struct ctl_table * table); +struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, + struct ctl_table *table); void unregister_sysctl_table(struct ctl_table_header * table); -int sysctl_check_table(struct ctl_table *table); +int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table); #else /* __KERNEL__ */ diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index e285746588d6..f752e73bf977 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h @@ -29,6 +29,7 @@ struct sys_device; struct sysdev_class { + const char *name; struct list_head drivers; /* Default operations for these types of devices */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 149ab62329e2..802710438a9e 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -32,6 +32,8 @@ struct attribute { struct attribute_group { const char *name; + int (*is_visible)(struct kobject *, + struct attribute *, int); struct attribute **attrs; }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index bac17c59b24e..08027f1d7f31 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -330,10 +330,12 @@ struct tcp_sock { struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ - struct tcp_sack_block_wire recv_sack_cache[4]; + struct tcp_sack_block recv_sack_cache[4]; - u32 highest_sack; /* Start seq of globally highest revd SACK - * (validity guaranteed only if sacked_out > 0) */ + struct sk_buff *highest_sack; /* highest skb with SACK received + * (validity guaranteed only if + * sacked_out > 0) + */ /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; @@ -341,10 +343,7 @@ struct tcp_sock { struct sk_buff *scoreboard_skb_hint; struct sk_buff *retransmit_skb_hint; struct sk_buff *forward_skb_hint; - struct sk_buff *fastpath_skb_hint; - int fastpath_cnt_hint; /* Lags behind by current skb's pcount - * compared to respective fackets_out */ int lost_cnt_hint; int retransmit_cnt_hint; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 9c4ad755d7e5..dfbdfb9836f4 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -42,27 +42,27 @@ extern long do_no_restart_syscall(struct restart_block *parm); static inline void set_ti_thread_flag(struct thread_info *ti, int flag) { - set_bit(flag,&ti->flags); + set_bit(flag, (unsigned long *)&ti->flags); } static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) { - clear_bit(flag,&ti->flags); + clear_bit(flag, (unsigned long *)&ti->flags); } static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) { - return test_and_set_bit(flag,&ti->flags); + return test_and_set_bit(flag, (unsigned long *)&ti->flags); } static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) { - return test_and_clear_bit(flag,&ti->flags); + return test_and_clear_bit(flag, (unsigned long *)&ti->flags); } static inline int test_ti_thread_flag(struct thread_info *ti, int flag) { - return test_bit(flag,&ti->flags); + return test_bit(flag, (unsigned long *)&ti->flags); } #define set_thread_flag(flag) \ diff --git a/include/linux/tick.h b/include/linux/tick.h index f4a1395e05ff..0fadf95debe1 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -51,8 +51,10 @@ struct tick_sched { unsigned long idle_jiffies; unsigned long idle_calls; unsigned long idle_sleeps; + int idle_active; ktime_t idle_entrytime; ktime_t idle_sleeptime; + ktime_t idle_lastupdate; ktime_t sleep_length; unsigned long last_jiffies; unsigned long next_jiffies; @@ -103,6 +105,8 @@ extern void tick_nohz_stop_sched_tick(void); extern void tick_nohz_restart_sched_tick(void); extern void tick_nohz_update_jiffies(void); extern ktime_t tick_nohz_get_sleep_length(void); +extern void tick_nohz_stop_idle(int cpu); +extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); # else static inline void tick_nohz_stop_sched_tick(void) { } static inline void tick_nohz_restart_sched_tick(void) { } @@ -113,6 +117,8 @@ static inline ktime_t tick_nohz_get_sleep_length(void) return len; } +static inline void tick_nohz_stop_idle(int cpu) { } +static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; } # endif /* !NO_HZ */ #endif diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 6b3a31805c72..2096b76d0cee 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -120,7 +120,7 @@ struct tifm_adapter { struct completion *finish_me; struct work_struct media_switcher; - struct class_device cdev; + struct device dev; void (*eject)(struct tifm_adapter *fm, struct tifm_dev *sock); diff --git a/include/linux/timer.h b/include/linux/timer.h index 78cf899b4409..de0e71359ede 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -5,7 +5,7 @@ #include <linux/ktime.h> #include <linux/stddef.h> -struct tvec_t_base_s; +struct tvec_base; struct timer_list { struct list_head entry; @@ -14,7 +14,7 @@ struct timer_list { void (*function)(unsigned long); unsigned long data; - struct tvec_t_base_s *base; + struct tvec_base *base; #ifdef CONFIG_TIMER_STATS void *start_site; char start_comm[16]; @@ -22,7 +22,7 @@ struct timer_list { #endif }; -extern struct tvec_t_base_s boot_tvec_bases; +extern struct tvec_base boot_tvec_bases; #define TIMER_INITIALIZER(_function, _expires, _data) { \ .function = (_function), \ diff --git a/include/linux/topology.h b/include/linux/topology.h index 47729f18bfdf..2352f46160d3 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -5,7 +5,7 @@ * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -103,6 +103,7 @@ .forkexec_idx = 0, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ + | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ | SD_WAKE_IDLE \ @@ -134,6 +135,7 @@ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ + | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ | SD_WAKE_IDLE \ @@ -165,6 +167,7 @@ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ + | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ | BALANCE_FOR_PKG_POWER,\ diff --git a/include/linux/tty.h b/include/linux/tty.h index defd2ab72449..402de892b3ed 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -23,7 +23,7 @@ */ #define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ #define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ -#define NR_LDISCS 17 +#define NR_LDISCS 18 /* line disciplines */ #define N_TTY 0 @@ -44,6 +44,7 @@ #define N_SYNC_PPP 14 /* synchronous PPP */ #define N_HCI 15 /* Bluetooth HCI UART */ #define N_GIGASET_M101 16 /* Siemens Gigaset M101 serial DECT adapter */ +#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ /* * This character is the same as _POSIX_VDISABLE: it cannot be used as diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 44c28e94df50..973386d439da 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -18,20 +18,22 @@ #include <linux/fs.h> #include <linux/interrupt.h> +struct uio_map; + /** * struct uio_mem - description of a UIO memory region - * @kobj: kobject for this mapping * @addr: address of the device's memory * @size: size of IO * @memtype: type of memory addr points to * @internal_addr: ioremap-ped version of addr, for driver internal use + * @map: for use by the UIO core only. */ struct uio_mem { - struct kobject kobj; unsigned long addr; unsigned long size; int memtype; void __iomem *internal_addr; + struct uio_map *map; }; #define MAX_UIO_MAPS 5 diff --git a/include/linux/wireless.h b/include/linux/wireless.h index 0987aa7a6cf5..74e84caa1e20 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h @@ -541,6 +541,16 @@ /* Maximum size of returned data */ #define IW_SCAN_MAX_DATA 4096 /* In bytes */ +/* Scan capability flags - in (struct iw_range *)->scan_capa */ +#define IW_SCAN_CAPA_NONE 0x00 +#define IW_SCAN_CAPA_ESSID 0x01 +#define IW_SCAN_CAPA_BSSID 0x02 +#define IW_SCAN_CAPA_CHANNEL 0x04 +#define IW_SCAN_CAPA_MODE 0x08 +#define IW_SCAN_CAPA_RATE 0x10 +#define IW_SCAN_CAPA_TYPE 0x20 +#define IW_SCAN_CAPA_TIME 0x40 + /* Max number of char in custom event - use multiple of them if needed */ #define IW_CUSTOM_MAX 256 /* In bytes */ @@ -963,6 +973,9 @@ struct iw_range __u16 old_num_channels; __u8 old_num_frequency; + /* Scan capabilities */ + __u8 scan_capa; /* IW_SCAN_CAPA_* bit field */ + /* Wireless event capability bitmasks */ __u32 event_capa[6]; diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index b58adc52448d..9b5b00c4ef9d 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -91,9 +91,9 @@ struct xfrm_replay_state }; struct xfrm_algo { - char alg_name[64]; - int alg_key_len; /* in bits */ - char alg_key[0]; + char alg_name[64]; + unsigned int alg_key_len; /* in bits */ + char alg_key[0]; }; struct xfrm_stats { @@ -114,6 +114,7 @@ enum XFRM_POLICY_IN = 0, XFRM_POLICY_OUT = 1, XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, XFRM_POLICY_MAX = 3 }; @@ -328,6 +329,7 @@ struct xfrm_usersa_info { #define XFRM_STATE_DECAP_DSCP 2 #define XFRM_STATE_NOPMTUDISC 4 #define XFRM_STATE_WILDRECV 8 +#define XFRM_STATE_ICMP 16 }; struct xfrm_usersa_id { @@ -362,6 +364,8 @@ struct xfrm_userpolicy_info { #define XFRM_POLICY_BLOCK 1 __u8 flags; #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */ + /* Automatically expand selector to include matching ICMP payloads. */ +#define XFRM_POLICY_ICMP 2 __u8 share; }; diff --git a/include/media/cs5345.h b/include/media/cs5345.h new file mode 100644 index 000000000000..6ccae24e65ed --- /dev/null +++ b/include/media/cs5345.h @@ -0,0 +1,39 @@ +/* + cs5345.h - definition for cs5345 inputs and outputs + + Copyright (C) 2007 Hans Verkuil (hverkuil@xs4all.nl) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef _CS5345_H_ +#define _CS5345_H_ + +/* CS5345 HW inputs */ +#define CS5345_IN_MIC 0 +#define CS5345_IN_1 1 +#define CS5345_IN_2 2 +#define CS5345_IN_3 3 +#define CS5345_IN_4 4 +#define CS5345_IN_5 5 +#define CS5345_IN_6 6 + +#define CS5345_MCLK_1 0x00 +#define CS5345_MCLK_1_5 0x10 +#define CS5345_MCLK_2 0x20 +#define CS5345_MCLK_3 0x30 +#define CS5345_MCLK_4 0x40 + +#endif diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h index af8071d7620d..5f4608e88476 100644 --- a/include/media/cx2341x.h +++ b/include/media/cx2341x.h @@ -83,7 +83,7 @@ struct cx2341x_mpeg_params { #define CX2341X_MBOX_MAX_DATA 16 extern const u32 cx2341x_mpeg_ctrls[]; -typedef int (*cx2341x_mbox_func)(void *priv, int cmd, int in, int out, +typedef int (*cx2341x_mbox_func)(void *priv, u32 cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]); int cx2341x_update(void *priv, cx2341x_mbox_func func, const struct cx2341x_mpeg_params *old, diff --git a/include/media/cx25840.h b/include/media/cx25840.h index 8e7e52d659a0..cd599ad29fb2 100644 --- a/include/media/cx25840.h +++ b/include/media/cx25840.h @@ -49,6 +49,25 @@ enum cx25840_video_input { CX25840_SVIDEO2 = 0x620, CX25840_SVIDEO3 = 0x730, CX25840_SVIDEO4 = 0x840, + + /* Allow frames to specify specific input configurations */ + CX25840_VIN1_CH1 = 0x80000000, + CX25840_VIN2_CH1 = 0x80000001, + CX25840_VIN3_CH1 = 0x80000002, + CX25840_VIN4_CH1 = 0x80000003, + CX25840_VIN5_CH1 = 0x80000004, + CX25840_VIN6_CH1 = 0x80000005, + CX25840_VIN7_CH1 = 0x80000006, + CX25840_VIN8_CH1 = 0x80000007, + CX25840_VIN4_CH2 = 0x80000000, + CX25840_VIN5_CH2 = 0x80000010, + CX25840_VIN6_CH2 = 0x80000020, + CX25840_NONE_CH2 = 0x80000030, + CX25840_VIN7_CH3 = 0x80000000, + CX25840_VIN8_CH3 = 0x80000040, + CX25840_NONE0_CH3 = 0x80000080, + CX25840_NONE1_CH3 = 0x800000c0, + CX25840_SVIDEO_ON = 0x80000100, }; enum cx25840_audio_input { diff --git a/include/media/ir-common.h b/include/media/ir-common.h index 7a785fa77212..831547d79683 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -97,7 +97,6 @@ int ir_dump_samples(u32 *samples, int count); int ir_decode_biphase(u32 *samples, int count, int low, int high); int ir_decode_pulsedistance(u32 *samples, int count, int low, int high); -u32 ir_rc5_decode(unsigned int code); void ir_rc5_timer_end(unsigned long data); void ir_rc5_timer_keyup(unsigned long data); @@ -141,6 +140,8 @@ extern IR_KEYTAB_TYPE ir_codes_asus_pc39[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_encore_enltv[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_tt_1500[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_fusionhdtv_mce[IR_KEYTAB_SIZE]; +extern IR_KEYTAB_TYPE ir_codes_behold[IR_KEYTAB_SIZE]; +extern IR_KEYTAB_TYPE ir_codes_pinnacle_pctv_hd[IR_KEYTAB_SIZE]; #endif diff --git a/include/media/m52790.h b/include/media/m52790.h new file mode 100644 index 000000000000..7ddffae31a67 --- /dev/null +++ b/include/media/m52790.h @@ -0,0 +1,93 @@ +/* + m52790.h - definition for m52790 inputs and outputs + + Copyright (C) 2007 Hans Verkuil (hverkuil@xs4all.nl) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef _M52790_H_ +#define _M52790_H_ + +/* Input routing switch 1 */ + +#define M52790_SW1_IN_MASK 0x0003 +#define M52790_SW1_IN_TUNER 0x0000 +#define M52790_SW1_IN_V2 0x0001 +#define M52790_SW1_IN_V3 0x0002 +#define M52790_SW1_IN_V4 0x0003 + +/* Selects component input instead of composite */ +#define M52790_SW1_YCMIX 0x0004 + + +/* Input routing switch 2 */ + +#define M52790_SW2_IN_MASK 0x0300 +#define M52790_SW2_IN_TUNER 0x0000 +#define M52790_SW2_IN_V2 0x0100 +#define M52790_SW2_IN_V3 0x0200 +#define M52790_SW2_IN_V4 0x0300 + +/* Selects component input instead of composite */ +#define M52790_SW2_YCMIX 0x0400 + + +/* Output routing switch 1 */ + +/* Enable 6dB amplifier for composite out */ +#define M52790_SW1_V_AMP 0x0008 + +/* Enable 6dB amplifier for component out */ +#define M52790_SW1_YC_AMP 0x0010 + +/* Audio output mode */ +#define M52790_SW1_AUDIO_MASK 0x00c0 +#define M52790_SW1_AUDIO_MUTE 0x0000 +#define M52790_SW1_AUDIO_R 0x0040 +#define M52790_SW1_AUDIO_L 0x0080 +#define M52790_SW1_AUDIO_STEREO 0x00c0 + + +/* Output routing switch 2 */ + +/* Enable 6dB amplifier for composite out */ +#define M52790_SW2_V_AMP 0x0800 + +/* Enable 6dB amplifier for component out */ +#define M52790_SW2_YC_AMP 0x1000 + +/* Audio output mode */ +#define M52790_SW2_AUDIO_MASK 0xc000 +#define M52790_SW2_AUDIO_MUTE 0x0000 +#define M52790_SW2_AUDIO_R 0x4000 +#define M52790_SW2_AUDIO_L 0x8000 +#define M52790_SW2_AUDIO_STEREO 0xc000 + + +/* Common values */ +#define M52790_IN_TUNER (M52790_SW1_IN_TUNER | M52790_SW2_IN_TUNER) +#define M52790_IN_V2 (M52790_SW1_IN_V2 | M52790_SW2_IN_V2) +#define M52790_IN_V3 (M52790_SW1_IN_V3 | M52790_SW2_IN_V3) +#define M52790_IN_V4 (M52790_SW1_IN_V4 | M52790_SW2_IN_V4) + +#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \ + M52790_SW2_AUDIO_STEREO) +#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \ + M52790_SW1_V_AMP | \ + M52790_SW2_AUDIO_STEREO | \ + M52790_SW2_V_AMP) + +#endif diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h index e49f7e156061..89c442eb8849 100644 --- a/include/media/saa7146_vv.h +++ b/include/media/saa7146_vv.h @@ -1,7 +1,6 @@ #ifndef __SAA7146_VV__ #define __SAA7146_VV__ -#include <linux/videodev.h> #include <media/v4l2-common.h> #include <media/saa7146.h> #include <media/videobuf-dma-sg.h> diff --git a/include/media/tuner.h b/include/media/tuner.h index c03dceb92605..1bf24a6ed8f1 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -24,8 +24,6 @@ #include <linux/videodev2.h> -extern int tuner_debug; - #define ADDR_UNSET (255) #define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ @@ -117,12 +115,13 @@ extern int tuner_debug; #define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ #define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ #define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ -#define TUNER_XCEIVE_XC3028 71 +#define TUNER_XC2028 71 #define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */ #define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ #define TUNER_TDA9887 74 /* This tuner should be used only internally */ #define TUNER_TEA5761 75 /* Only FM Radio Tuner */ +#define TUNER_XC5000 76 /* Xceive Silicon Tuner */ /* tv card specific */ #define TDA9887_PRESENT (1<<0) diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h index 8ae42c41dd08..032bb75f69c2 100644 --- a/include/media/v4l2-chip-ident.h +++ b/include/media/v4l2-chip-ident.h @@ -68,6 +68,9 @@ enum { /* module vp27smpx: just ident 2700 */ V4L2_IDENT_VP27SMPX = 2700, + /* module cs5345: just ident 5345 */ + V4L2_IDENT_CS5345 = 5345, + /* module wm8739: just ident 8739 */ V4L2_IDENT_WM8739 = 8739, @@ -83,6 +86,9 @@ enum { /* module upd64083: just ident 64083 */ V4L2_IDENT_UPD64083 = 64083, + /* module m52790: just ident 52790 */ + V4L2_IDENT_M52790 = 52790, + /* module msp34xx: reserved range 34000-34999 */ V4L2_IDENT_MSP3400B = 34002, V4L2_IDENT_MSP3410B = 34102, diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 181a40c46a52..475d0d8275e0 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -104,6 +104,17 @@ int v4l2_chip_match_host(u32 id_type, u32 chip_id); /* ------------------------------------------------------------------------- */ +/* Helper function for I2C legacy drivers */ + +struct i2c_driver; +struct i2c_adapter; +struct i2c_client; + +int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver, + const char *name, int (*probe)(struct i2c_client *)); + +/* ------------------------------------------------------------------------- */ + /* Internal ioctls */ /* VIDIOC_INT_DECODE_VBI_LINE */ @@ -116,6 +127,11 @@ struct v4l2_decode_vbi_line { u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */ }; +struct v4l2_priv_tun_config { + int tuner; + void *priv; +}; + /* audio ioctls */ /* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */ @@ -131,7 +147,7 @@ struct v4l2_decode_vbi_line { #define TUNER_SET_STANDBY _IOW('d', 91, int) /* Sets tda9887 specific stuff, like port1, port2 and qss */ -#define TDA9887_SET_CONFIG _IOW('d', 92, int) +#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) /* Switch the tuner to a specific tuner mode. Replacement of AUDC_SET_RADIO */ #define VIDIOC_INT_S_TUNER_MODE _IOW('d', 93, enum v4l2_tuner_type) diff --git a/include/media/v4l2-i2c-drv-legacy.h b/include/media/v4l2-i2c-drv-legacy.h new file mode 100644 index 000000000000..e7645578fc22 --- /dev/null +++ b/include/media/v4l2-i2c-drv-legacy.h @@ -0,0 +1,140 @@ +/* + * v4l2-i2c-drv-legacy.h - contains I2C handling code that's identical + * for all V4L2 I2C drivers. Use this header if the + * I2C driver is used by both legacy drivers and + * drivers converted to the bus-based I2C API. + * + * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +struct v4l2_i2c_driver_data { + const char * const name; + int driverid; + int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); + int (*probe)(struct i2c_client *client); + int (*remove)(struct i2c_client *client); + int (*suspend)(struct i2c_client *client, pm_message_t state); + int (*resume)(struct i2c_client *client); + int (*legacy_probe)(struct i2c_adapter *adapter); + int legacy_class; +}; + +static struct v4l2_i2c_driver_data v4l2_i2c_data; +static const struct i2c_client_address_data addr_data; +static struct i2c_driver v4l2_i2c_driver_legacy; +static char v4l2_i2c_drv_name_legacy[32]; + +static int v4l2_i2c_drv_attach_legacy(struct i2c_adapter *adapter, int address, int kind) +{ + return v4l2_i2c_attach(adapter, address, &v4l2_i2c_driver_legacy, + v4l2_i2c_drv_name_legacy, v4l2_i2c_data.probe); +} + +static int v4l2_i2c_drv_probe_legacy(struct i2c_adapter *adapter) +{ + if (v4l2_i2c_data.legacy_probe) { + if (v4l2_i2c_data.legacy_probe(adapter)) + return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy); + return 0; + } + if (adapter->class & v4l2_i2c_data.legacy_class) + return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy); + return 0; +} + +static int v4l2_i2c_drv_detach_legacy(struct i2c_client *client) +{ + int err; + + if (v4l2_i2c_data.remove) + v4l2_i2c_data.remove(client); + + err = i2c_detach_client(client); + if (err) + return err; + kfree(client); + + return 0; +} + +static int v4l2_i2c_drv_suspend_helper(struct i2c_client *client, pm_message_t state) +{ + return v4l2_i2c_data.suspend ? v4l2_i2c_data.suspend(client, state) : 0; +} + +static int v4l2_i2c_drv_resume_helper(struct i2c_client *client) +{ + return v4l2_i2c_data.resume ? v4l2_i2c_data.resume(client) : 0; +} + +/* ----------------------------------------------------------------------- */ + +/* i2c implementation */ +static struct i2c_driver v4l2_i2c_driver_legacy = { + .driver = { + .owner = THIS_MODULE, + }, + .attach_adapter = v4l2_i2c_drv_probe_legacy, + .detach_client = v4l2_i2c_drv_detach_legacy, + .suspend = v4l2_i2c_drv_suspend_helper, + .resume = v4l2_i2c_drv_resume_helper, +}; + +/* ----------------------------------------------------------------------- */ + +/* i2c implementation */ +static struct i2c_driver v4l2_i2c_driver = { + .suspend = v4l2_i2c_drv_suspend_helper, + .resume = v4l2_i2c_drv_resume_helper, +}; + +static int __init v4l2_i2c_drv_init(void) +{ + int err; + + strlcpy(v4l2_i2c_drv_name_legacy, v4l2_i2c_data.name, sizeof(v4l2_i2c_drv_name_legacy)); + strlcat(v4l2_i2c_drv_name_legacy, "'", sizeof(v4l2_i2c_drv_name_legacy)); + + if (v4l2_i2c_data.legacy_class == 0) + v4l2_i2c_data.legacy_class = I2C_CLASS_TV_ANALOG; + + v4l2_i2c_driver_legacy.driver.name = v4l2_i2c_drv_name_legacy; + v4l2_i2c_driver_legacy.id = v4l2_i2c_data.driverid; + v4l2_i2c_driver_legacy.command = v4l2_i2c_data.command; + err = i2c_add_driver(&v4l2_i2c_driver_legacy); + + if (err) + return err; + v4l2_i2c_driver.driver.name = v4l2_i2c_data.name; + v4l2_i2c_driver.id = v4l2_i2c_data.driverid; + v4l2_i2c_driver.command = v4l2_i2c_data.command; + v4l2_i2c_driver.probe = v4l2_i2c_data.probe; + v4l2_i2c_driver.remove = v4l2_i2c_data.remove; + err = i2c_add_driver(&v4l2_i2c_driver); + if (err) + i2c_del_driver(&v4l2_i2c_driver_legacy); + return err; +} + +static void __exit v4l2_i2c_drv_cleanup(void) +{ + i2c_del_driver(&v4l2_i2c_driver_legacy); + i2c_del_driver(&v4l2_i2c_driver); +} + +module_init(v4l2_i2c_drv_init); +module_exit(v4l2_i2c_drv_cleanup); diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h new file mode 100644 index 000000000000..9e4bab276915 --- /dev/null +++ b/include/media/v4l2-i2c-drv.h @@ -0,0 +1,68 @@ +/* + * v4l2-i2c-drv.h - contains I2C handling code that's identical for + * all V4L2 I2C drivers. Use this header if the + * I2C driver is only used by drivers converted + * to the bus-based I2C API. + * + * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __V4L2_I2C_DRV_H__ +#define __V4L2_I2C_DRV_H__ + +#include <media/v4l2-common.h> + +struct v4l2_i2c_driver_data { + const char * const name; + int driverid; + int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); + int (*probe)(struct i2c_client *client); + int (*remove)(struct i2c_client *client); + int (*suspend)(struct i2c_client *client, pm_message_t state); + int (*resume)(struct i2c_client *client); + int (*legacy_probe)(struct i2c_adapter *adapter); + int legacy_class; +}; + +static struct v4l2_i2c_driver_data v4l2_i2c_data; +static struct i2c_driver v4l2_i2c_driver; + + +/* Bus-based I2C implementation for kernels >= 2.6.22 */ + +static int __init v4l2_i2c_drv_init(void) +{ + v4l2_i2c_driver.driver.name = v4l2_i2c_data.name; + v4l2_i2c_driver.id = v4l2_i2c_data.driverid; + v4l2_i2c_driver.command = v4l2_i2c_data.command; + v4l2_i2c_driver.probe = v4l2_i2c_data.probe; + v4l2_i2c_driver.remove = v4l2_i2c_data.remove; + v4l2_i2c_driver.suspend = v4l2_i2c_data.suspend; + v4l2_i2c_driver.resume = v4l2_i2c_data.resume; + return i2c_add_driver(&v4l2_i2c_driver); +} + + +static void __exit v4l2_i2c_drv_cleanup(void) +{ + i2c_del_driver(&v4l2_i2c_driver); +} + +module_init(v4l2_i2c_drv_init); +module_exit(v4l2_i2c_drv_cleanup); + +#endif /* __V4L2_I2C_DRV_H__ */ diff --git a/include/media/v4l2-int-device.h b/include/media/v4l2-int-device.h index 066ebfc4f983..c8b80e0f0651 100644 --- a/include/media/v4l2-int-device.h +++ b/include/media/v4l2-int-device.h @@ -44,9 +44,8 @@ enum v4l2_int_type { struct v4l2_int_device; struct v4l2_int_master { - int (*attach)(struct v4l2_int_device *master, - struct v4l2_int_device *slave); - void (*detach)(struct v4l2_int_device *master); + int (*attach)(struct v4l2_int_device *slave); + void (*detach)(struct v4l2_int_device *slave); }; typedef int (v4l2_int_ioctl_func)(struct v4l2_int_device *); diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h index 4fd5d0eaa935..97f14d469595 100644 --- a/include/media/videobuf-core.h +++ b/include/media/videobuf-core.h @@ -56,13 +56,13 @@ struct videobuf_mapping { }; enum videobuf_state { - STATE_NEEDS_INIT = 0, - STATE_PREPARED = 1, - STATE_QUEUED = 2, - STATE_ACTIVE = 3, - STATE_DONE = 4, - STATE_ERROR = 5, - STATE_IDLE = 6, + VIDEOBUF_NEEDS_INIT = 0, + VIDEOBUF_PREPARED = 1, + VIDEOBUF_QUEUED = 2, + VIDEOBUF_ACTIVE = 3, + VIDEOBUF_DONE = 4, + VIDEOBUF_ERROR = 5, + VIDEOBUF_IDLE = 6, }; struct videobuf_buffer { @@ -162,12 +162,14 @@ struct videobuf_queue { struct videobuf_queue_ops *ops; struct videobuf_qtype_ops *int_ops; + unsigned int streaming:1; + unsigned int reading:1; + unsigned int is_mmapped:1; + /* capture via mmap() + ioctl(QBUF/DQBUF) */ - unsigned int streaming; struct list_head stream; /* capture via read() */ - unsigned int reading; unsigned int read_off; struct videobuf_buffer *read_buf; diff --git a/include/net/act_api.h b/include/net/act_api.h index 68b4eaf7719d..565eed8fe496 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -89,7 +89,7 @@ struct tc_action_ops { int (*dump)(struct sk_buff *, struct tc_action *, int, int); int (*cleanup)(struct tc_action *, int bind); int (*lookup)(struct tc_action *, u32); - int (*init)(struct rtattr *, struct rtattr *, struct tc_action *, int , int); + int (*init)(struct nlattr *, struct nlattr *, struct tc_action *, int , int); int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); }; @@ -104,7 +104,7 @@ extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo); extern int tcf_hash_search(struct tc_action *a, u32 index); extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, struct tcf_hashinfo *hinfo); -extern struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, +extern struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo); @@ -114,8 +114,8 @@ extern int tcf_register_action(struct tc_action_ops *a); extern int tcf_unregister_action(struct tc_action_ops *a); extern void tcf_action_destroy(struct tc_action *a, int bind); extern int tcf_action_exec(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res); -extern struct tc_action *tcf_action_init(struct rtattr *rta, struct rtattr *est, char *n, int ovr, int bind, int *err); -extern struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, char *n, int ovr, int bind, int *err); +extern struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind); +extern struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind); extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int); extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 33b593e17441..496503c03846 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -17,6 +17,7 @@ #define IPV6_MAX_ADDRESSES 16 +#include <linux/in.h> #include <linux/in6.h> struct prefix_info { @@ -58,15 +59,20 @@ extern int addrconf_add_ifaddr(void __user *arg); extern int addrconf_del_ifaddr(void __user *arg); extern int addrconf_set_dstaddr(void __user *arg); -extern int ipv6_chk_addr(struct in6_addr *addr, +extern int ipv6_chk_addr(struct net *net, + struct in6_addr *addr, struct net_device *dev, int strict); + #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) -extern int ipv6_chk_home_addr(struct in6_addr *addr); +extern int ipv6_chk_home_addr(struct net *net, + struct in6_addr *addr); #endif -extern struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr, - struct net_device *dev, - int strict); +extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, + struct in6_addr *addr, + struct net_device *dev, + int strict); + extern int ipv6_get_saddr(struct dst_entry *dst, struct in6_addr *daddr, struct in6_addr *saddr); @@ -84,6 +90,14 @@ extern void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr); /* + * IPv6 Address Label subsystem (addrlabel.c) + */ +extern int ipv6_addr_label_init(void); +extern void ipv6_addr_label_rtnl_register(void); +extern u32 ipv6_addr_label(const struct in6_addr *addr, + int type, int ifindex); + +/* * multicast prototypes (mcast.c) */ extern int ipv6_sock_mc_join(struct sock *sk, int ifindex, @@ -241,6 +255,26 @@ static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) addr->s6_addr32[3] == htonl(0x00000002)); } +static inline int ipv6_isatap_eui64(u8 *eui, __be32 addr) +{ + eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) || + ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) || + ipv4_is_private_172(addr) || ipv4_is_test_192(addr) || + ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) || + ipv4_is_test_198(addr) || ipv4_is_multicast(addr) || + ipv4_is_lbcast(addr)) ? 0x00 : 0x02; + eui[1] = 0; + eui[2] = 0x5E; + eui[3] = 0xFE; + memcpy (eui+4, &addr, 4); + return 0; +} + +static inline int ipv6_addr_is_isatap(const struct in6_addr *addr) +{ + return ((addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE)); +} + #ifdef CONFIG_PROC_FS extern int if6_proc_init(void); extern void if6_proc_exit(void); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index a1c805d7f488..2dfa96b0575e 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -59,12 +59,11 @@ struct unix_sock { #define unix_sk(__sk) ((struct unix_sock *)__sk) #ifdef CONFIG_SYSCTL -extern int sysctl_unix_max_dgram_qlen; -extern void unix_sysctl_register(void); -extern void unix_sysctl_unregister(void); +extern int unix_sysctl_register(struct net *net); +extern void unix_sysctl_unregister(struct net *net); #else -static inline void unix_sysctl_register(void) {} -static inline void unix_sysctl_unregister(void) {} +static inline int unix_sysctl_register(struct net *net) { return 0; } +static inline void unix_sysctl_unregister(struct net *net) {} #endif #endif #endif diff --git a/include/net/arp.h b/include/net/arp.h index f02664568600..752eb47b2678 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -5,13 +5,12 @@ #include <linux/if_arp.h> #include <net/neighbour.h> -#define HAVE_ARP_CREATE extern struct neigh_table arp_tbl; extern void arp_init(void); extern int arp_find(unsigned char *haddr, struct sk_buff *skb); -extern int arp_ioctl(unsigned int cmd, void __user *arg); +extern int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); extern void arp_send(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, unsigned char *dest_hw, unsigned char *src_hw, unsigned char *th); diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index 25aa575db807..98ec7a320689 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -252,8 +252,8 @@ static inline void rfcomm_dlc_put(struct rfcomm_dlc *d) rfcomm_dlc_free(d); } -extern void FASTCALL(__rfcomm_dlc_throttle(struct rfcomm_dlc *d)); -extern void FASTCALL(__rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)); +extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d); +extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d); static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d) { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index d30960e1755c..bcc480b8892a 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -49,6 +49,120 @@ extern int ieee80211_radiotap_iterator_next( struct ieee80211_radiotap_iterator *iterator); + /** + * struct key_params - key information + * + * Information about a key + * + * @key: key material + * @key_len: length of key material + * @cipher: cipher suite selector + * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used + * with the get_key() callback, must be in little endian, + * length given by @seq_len. + */ +struct key_params { + u8 *key; + u8 *seq; + int key_len; + int seq_len; + u32 cipher; +}; + +/** + * struct beacon_parameters - beacon parameters + * + * Used to configure the beacon for an interface. + * + * @head: head portion of beacon (before TIM IE) + * or %NULL if not changed + * @tail: tail portion of beacon (after TIM IE) + * or %NULL if not changed + * @interval: beacon interval or zero if not changed + * @dtim_period: DTIM period or zero if not changed + * @head_len: length of @head + * @tail_len: length of @tail + */ +struct beacon_parameters { + u8 *head, *tail; + int interval, dtim_period; + int head_len, tail_len; +}; + +/** + * enum station_flags - station flags + * + * Station capability flags. Note that these must be the bits + * according to the nl80211 flags. + * + * @STATION_FLAG_CHANGED: station flags were changed + * @STATION_FLAG_AUTHORIZED: station is authorized to send frames (802.1X) + * @STATION_FLAG_SHORT_PREAMBLE: station is capable of receiving frames + * with short preambles + * @STATION_FLAG_WME: station is WME/QoS capable + */ +enum station_flags { + STATION_FLAG_CHANGED = 1<<0, + STATION_FLAG_AUTHORIZED = 1<<NL80211_STA_FLAG_AUTHORIZED, + STATION_FLAG_SHORT_PREAMBLE = 1<<NL80211_STA_FLAG_SHORT_PREAMBLE, + STATION_FLAG_WME = 1<<NL80211_STA_FLAG_WME, +}; + +/** + * struct station_parameters - station parameters + * + * Used to change and create a new station. + * + * @vlan: vlan interface station should belong to + * @supported_rates: supported rates in IEEE 802.11 format + * (or NULL for no change) + * @supported_rates_len: number of supported rates + * @station_flags: station flags (see &enum station_flags) + * @listen_interval: listen interval or -1 for no change + * @aid: AID or zero for no change + */ +struct station_parameters { + u8 *supported_rates; + struct net_device *vlan; + u32 station_flags; + int listen_interval; + u16 aid; + u8 supported_rates_len; +}; + +/** + * enum station_stats_flags - station statistics flags + * + * Used by the driver to indicate which info in &struct station_stats + * it has filled in during get_station(). + * + * @STATION_STAT_INACTIVE_TIME: @inactive_time filled + * @STATION_STAT_RX_BYTES: @rx_bytes filled + * @STATION_STAT_TX_BYTES: @tx_bytes filled + */ +enum station_stats_flags { + STATION_STAT_INACTIVE_TIME = 1<<0, + STATION_STAT_RX_BYTES = 1<<1, + STATION_STAT_TX_BYTES = 1<<2, +}; + +/** + * struct station_stats - station statistics + * + * Station information filled by driver for get_station(). + * + * @filled: bitflag of flags from &enum station_stats_flags + * @inactive_time: time since last station activity (tx/rx) in milliseconds + * @rx_bytes: bytes received from this station + * @tx_bytes: bytes transmitted to this station + */ +struct station_stats { + u32 filled; + u32 inactive_time; + u32 rx_bytes; + u32 tx_bytes; +}; + /* from net/wireless.h */ struct wiphy; @@ -71,6 +185,31 @@ struct wiphy; * * @change_virtual_intf: change type of virtual interface * + * @add_key: add a key with the given parameters. @mac_addr will be %NULL + * when adding a group key. + * + * @get_key: get information about the key with the given parameters. + * @mac_addr will be %NULL when requesting information for a group + * key. All pointers given to the @callback function need not be valid + * after it returns. + * + * @del_key: remove a key given the @mac_addr (%NULL for a group key) + * and @key_index + * + * @set_default_key: set the default key on an interface + * + * @add_beacon: Add a beacon with given parameters, @head, @interval + * and @dtim_period will be valid, @tail is optional. + * @set_beacon: Change the beacon parameters for an access point mode + * interface. This should reject the call when no beacon has been + * configured. + * @del_beacon: Remove beacon configuration and stop sending the beacon. + * + * @add_station: Add a new station. + * + * @del_station: Remove a station; @mac may be NULL to remove all stations. + * + * @change_station: Modify a given station. */ struct cfg80211_ops { int (*add_virtual_intf)(struct wiphy *wiphy, char *name, @@ -78,6 +217,34 @@ struct cfg80211_ops { int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex); int (*change_virtual_intf)(struct wiphy *wiphy, int ifindex, enum nl80211_iftype type); + + int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, u8 *mac_addr, + struct key_params *params); + int (*get_key)(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, u8 *mac_addr, void *cookie, + void (*callback)(void *cookie, struct key_params*)); + int (*del_key)(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, u8 *mac_addr); + int (*set_default_key)(struct wiphy *wiphy, + struct net_device *netdev, + u8 key_index); + + int (*add_beacon)(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info); + int (*set_beacon)(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info); + int (*del_beacon)(struct wiphy *wiphy, struct net_device *dev); + + + int (*add_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_parameters *params); + int (*del_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac); + int (*change_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_parameters *params); + int (*get_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_stats *stats); }; #endif /* __NET_CFG80211_H */ diff --git a/include/net/checksum.h b/include/net/checksum.h index 124246172a88..07602b7fa218 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -93,4 +93,29 @@ static inline __wsum csum_unfold(__sum16 n) } #define CSUM_MANGLED_0 ((__force __sum16)0xffff) + +static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) +{ + __be32 diff[] = { ~from, to }; + + *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(*sum))); +} + +static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to) +{ + csum_replace4(sum, (__force __be32)from, (__force __be32)to); +} + +struct sk_buff; +extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, + __be32 from, __be32 to, int pseudohdr); + +static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, + __be16 from, __be16 to, + int pseudohdr) +{ + inet_proto_csum_replace4(sum, skb, (__force __be32)from, + (__force __be32)to, pseudohdr); +} + #endif diff --git a/include/net/dsfield.h b/include/net/dsfield.h index eb65bf2e2502..8a8d4e06900d 100644 --- a/include/net/dsfield.h +++ b/include/net/dsfield.h @@ -12,15 +12,15 @@ #include <asm/byteorder.h> -static inline __u8 ipv4_get_dsfield(struct iphdr *iph) +static inline __u8 ipv4_get_dsfield(const struct iphdr *iph) { return iph->tos; } -static inline __u8 ipv6_get_dsfield(struct ipv6hdr *ipv6h) +static inline __u8 ipv6_get_dsfield(const struct ipv6hdr *ipv6h) { - return ntohs(*(__be16 *) ipv6h) >> 4; + return ntohs(*(const __be16 *)ipv6h) >> 4; } diff --git a/include/net/dst.h b/include/net/dst.h index 2f65e894b829..e3ac7d0fc4e1 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -50,14 +50,17 @@ struct dst_entry unsigned long expires; unsigned short header_len; /* more space at head required */ - unsigned short nfheader_len; /* more non-fragment space at head required */ unsigned short trailer_len; /* space to reserve at tail */ u32 metrics[RTAX_MAX]; struct dst_entry *path; unsigned long rate_last; /* rate limiting for ICMP */ - unsigned long rate_tokens; + unsigned int rate_tokens; + +#ifdef CONFIG_NET_CLS_ROUTE + __u32 tclassid; +#endif struct neighbour *neighbour; struct hh_cache *hh; @@ -66,10 +69,6 @@ struct dst_entry int (*input)(struct sk_buff*); int (*output)(struct sk_buff*); -#ifdef CONFIG_NET_CLS_ROUTE - __u32 tclassid; -#endif - struct dst_ops *ops; unsigned long lastuse; @@ -81,7 +80,6 @@ struct dst_entry struct rt6_info *rt6_next; struct dn_route *dn_next; }; - char info[0]; }; @@ -91,7 +89,7 @@ struct dst_ops __be16 protocol; unsigned gc_thresh; - int (*gc)(void); + int (*gc)(struct dst_ops *ops); struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, @@ -99,10 +97,12 @@ struct dst_ops struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, u32 mtu); + int (*local_out)(struct sk_buff *skb); int entry_size; atomic_t entries; struct kmem_cache *kmem_cachep; + struct net *dst_net; }; #ifdef __KERNEL__ @@ -180,6 +180,7 @@ static inline struct dst_entry *dst_pop(struct dst_entry *dst) return child; } +extern int dst_discard(struct sk_buff *skb); extern void * dst_alloc(struct dst_ops * ops); extern void __dst_free(struct dst_entry * dst); extern struct dst_entry *dst_destroy(struct dst_entry * dst); @@ -264,6 +265,12 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) extern void dst_init(void); +/* Flags for xfrm_lookup flags argument. */ +enum { + XFRM_LOOKUP_WAIT = 1 << 0, + XFRM_LOOKUP_ICMP = 1 << 1, +}; + struct flowi; #ifndef CONFIG_XFRM static inline int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 41a301e38643..34349f9f4331 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -22,6 +22,7 @@ struct fib_rule u32 target; struct fib_rule * ctarget; struct rcu_head rcu; + struct net * fr_net; }; struct fib_lookup_arg @@ -56,7 +57,7 @@ struct fib_rules_ops int (*fill)(struct fib_rule *, struct sk_buff *, struct nlmsghdr *, struct fib_rule_hdr *); - u32 (*default_pref)(void); + u32 (*default_pref)(struct fib_rules_ops *ops); size_t (*nlmsg_payload)(struct fib_rule *); /* Called after modifications to the rules set, must flush @@ -67,6 +68,7 @@ struct fib_rules_ops const struct nla_policy *policy; struct list_head rules_list; struct module *owner; + struct net *fro_net; }; #define FRA_GENERIC_POLICY \ @@ -101,8 +103,9 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) return frh->table; } -extern int fib_rules_register(struct fib_rules_ops *); -extern int fib_rules_unregister(struct fib_rules_ops *); +extern int fib_rules_register(struct fib_rules_ops *); +extern void fib_rules_unregister(struct fib_rules_ops *); +extern void fib_rules_cleanup_ops(struct fib_rules_ops *); extern int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, diff --git a/include/net/flow.h b/include/net/flow.h index af59fa5cc1f8..ad16e0076c89 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -48,7 +48,6 @@ struct flowi { __u8 proto; __u8 flags; -#define FLOWI_FLAG_MULTIPATHOLDROUTE 0x01 union { struct { __be16 sport; diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 0b95cf031d6e..8cd8185fa2ed 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -10,7 +10,7 @@ struct gnet_dump { spinlock_t * lock; struct sk_buff * skb; - struct rtattr * tail; + struct nlattr * tail; /* Backward compatability */ int compat_tc_stats; @@ -39,11 +39,11 @@ extern int gnet_stats_finish_copy(struct gnet_dump *d); extern int gen_new_estimator(struct gnet_stats_basic *bstats, struct gnet_stats_rate_est *rate_est, - spinlock_t *stats_lock, struct rtattr *opt); + spinlock_t *stats_lock, struct nlattr *opt); extern void gen_kill_estimator(struct gnet_stats_basic *bstats, struct gnet_stats_rate_est *rate_est); extern int gen_replace_estimator(struct gnet_stats_basic *bstats, struct gnet_stats_rate_est *rate_est, - spinlock_t *stats_lock, struct rtattr *opt); + spinlock_t *stats_lock, struct nlattr *opt); #endif diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index d8ae48439f12..285b2adfa648 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -677,7 +677,7 @@ struct ieee80211_probe_request { struct ieee80211_probe_response { struct ieee80211_hdr_3addr header; - u32 time_stamp[2]; + __le32 time_stamp[2]; __le16 beacon_interval; __le16 capability; /* SSID, supported rates, FH params, DS params, @@ -718,8 +718,8 @@ struct ieee80211_txb { u8 encrypted; u8 rts_included; u8 reserved; - __le16 frag_size; - __le16 payload_size; + u16 frag_size; + u16 payload_size; struct sk_buff *fragments[0]; }; diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 448eccb20638..b24508abb850 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -269,18 +269,21 @@ static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf) buf[0] = 0x00; } -static inline void ipv6_ib_mc_map(struct in6_addr *addr, char *buf) +static inline void ipv6_ib_mc_map(const struct in6_addr *addr, + const unsigned char *broadcast, char *buf) { + unsigned char scope = broadcast[5] & 0xF; + buf[0] = 0; /* Reserved */ buf[1] = 0xff; /* Multicast QPN */ buf[2] = 0xff; buf[3] = 0xff; buf[4] = 0xff; - buf[5] = 0x12; /* link local scope */ + buf[5] = 0x10 | scope; /* scope from broadcast address */ buf[6] = 0x60; /* IPv6 signature */ buf[7] = 0x1b; - buf[8] = 0; /* P_Key */ - buf[9] = 0; + buf[8] = broadcast[8]; /* P_Key */ + buf[9] = broadcast[9]; memcpy(buf + 10, addr->s6_addr + 6, 10); } #endif diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index de8399a79774..ba33db053854 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -83,9 +83,9 @@ static inline void IP_ECN_clear(struct iphdr *iph) iph->tos &= ~INET_ECN_MASK; } -static inline void ipv4_copy_dscp(struct iphdr *outer, struct iphdr *inner) +static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner) { - u32 dscp = ipv4_get_dsfield(outer) & ~INET_ECN_MASK; + dscp &= ~INET_ECN_MASK; ipv4_change_dsfield(inner, INET_ECN_MASK, dscp); } @@ -104,9 +104,9 @@ static inline void IP6_ECN_clear(struct ipv6hdr *iph) *(__be32*)iph &= ~htonl(INET_ECN_MASK << 20); } -static inline void ipv6_copy_dscp(struct ipv6hdr *outer, struct ipv6hdr *inner) +static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) { - u32 dscp = ipv6_get_dsfield(outer) & ~INET_ECN_MASK; + dscp &= ~INET_ECN_MASK; ipv6_change_dsfield(inner, INET_ECN_MASK, dscp); } diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 954def408975..7374251b9787 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -1,8 +1,20 @@ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ +struct netns_frags { + int nqueues; + atomic_t mem; + struct list_head lru_list; + + /* sysctls */ + int timeout; + int high_thresh; + int low_thresh; +}; + struct inet_frag_queue { struct hlist_node list; + struct netns_frags *net; struct list_head lru_list; /* lru list member */ spinlock_t lock; atomic_t refcnt; @@ -20,23 +32,13 @@ struct inet_frag_queue { #define INETFRAGS_HASHSZ 64 -struct inet_frags_ctl { - int high_thresh; - int low_thresh; - int timeout; - int secret_interval; -}; - struct inet_frags { - struct list_head lru_list; struct hlist_head hash[INETFRAGS_HASHSZ]; rwlock_t lock; u32 rnd; - int nqueues; int qsize; - atomic_t mem; + int secret_interval; struct timer_list secret_timer; - struct inet_frags_ctl *ctl; unsigned int (*hashfn)(struct inet_frag_queue *); void (*constructor)(struct inet_frag_queue *q, @@ -51,12 +53,15 @@ struct inet_frags { void inet_frags_init(struct inet_frags *); void inet_frags_fini(struct inet_frags *); +void inet_frags_init_net(struct netns_frags *nf); +void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); + void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, int *work); -int inet_frag_evictor(struct inet_frags *f); -struct inet_frag_queue *inet_frag_find(struct inet_frags *f, void *key, - unsigned int hash); +int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f); +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, + struct inet_frags *f, void *key, unsigned int hash); static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) { diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 37f6cb112127..761bdc01425d 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -264,37 +264,14 @@ static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo) wake_up(&hashinfo->lhash_wait); } -static inline void __inet_hash(struct inet_hashinfo *hashinfo, - struct sock *sk, const int listen_possible) -{ - struct hlist_head *list; - rwlock_t *lock; - - BUG_TRAP(sk_unhashed(sk)); - if (listen_possible && sk->sk_state == TCP_LISTEN) { - list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; - lock = &hashinfo->lhash_lock; - inet_listen_wlock(hashinfo); - } else { - struct inet_ehash_bucket *head; - sk->sk_hash = inet_sk_ehashfn(sk); - head = inet_ehash_bucket(hashinfo, sk->sk_hash); - list = &head->chain; - lock = inet_ehash_lockp(hashinfo, sk->sk_hash); - write_lock(lock); - } - __sk_add_node(sk, list); - sock_prot_inc_use(sk->sk_prot); - write_unlock(lock); - if (listen_possible && sk->sk_state == TCP_LISTEN) - wake_up(&hashinfo->lhash_wait); -} +extern void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk); +extern void __inet_hash_nolisten(struct inet_hashinfo *hinfo, struct sock *sk); static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { local_bh_disable(); - __inet_hash(hashinfo, sk, 1); + __inet_hash(hashinfo, sk); local_bh_enable(); } } @@ -316,7 +293,7 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk) } if (__sk_del_node_init(sk)) - sock_prot_dec_use(sk->sk_prot); + sock_prot_inuse_add(sk->sk_prot, -1); write_unlock_bh(lock); out: if (sk->sk_state == TCP_LISTEN) @@ -397,43 +374,9 @@ typedef __u64 __bitwise __addrpair; * * Local BH must be disabled here. */ -static inline struct sock * - __inet_lookup_established(struct inet_hashinfo *hashinfo, - const __be32 saddr, const __be16 sport, - const __be32 daddr, const u16 hnum, - const int dif) -{ - INET_ADDR_COOKIE(acookie, saddr, daddr) - const __portpair ports = INET_COMBINED_PORTS(sport, hnum); - struct sock *sk; - const struct hlist_node *node; - /* Optimize here for direct hit, only listening connections can - * have wildcards anyways. - */ - unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport); - struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); - rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); - - prefetch(head->chain.first); - read_lock(lock); - sk_for_each(sk, node, &head->chain) { - if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) - goto hit; /* You sunk my battleship! */ - } - - /* Must check for a TIME_WAIT'er before going to listener hash. */ - sk_for_each(sk, node, &head->twchain) { - if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) - goto hit; - } - sk = NULL; -out: - read_unlock(lock); - return sk; -hit: - sock_hold(sk); - goto out; -} +extern struct sock * __inet_lookup_established(struct inet_hashinfo *hashinfo, + const __be32 saddr, const __be16 sport, + const __be32 daddr, const u16 hnum, const int dif); static inline struct sock * inet_lookup_established(struct inet_hashinfo *hashinfo, diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index abaff0597270..67e925065aae 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -193,19 +193,7 @@ static inline __be32 inet_rcv_saddr(const struct sock *sk) inet_sk(sk)->rcv_saddr : inet_twsk(sk)->tw_rcv_saddr; } -static inline void inet_twsk_put(struct inet_timewait_sock *tw) -{ - if (atomic_dec_and_test(&tw->tw_refcnt)) { - struct module *owner = tw->tw_prot->owner; - twsk_destructor((struct sock *)tw); -#ifdef SOCK_REFCNT_DEBUG - printk(KERN_DEBUG "%s timewait_sock %p released\n", - tw->tw_prot->name, tw); -#endif - kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); - module_put(owner); - } -} +extern void inet_twsk_put(struct inet_timewait_sock *tw); extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state); diff --git a/include/net/ip.h b/include/net/ip.h index 840dd91b513b..9f50d4f1f157 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -82,8 +82,6 @@ struct packet_type; struct rtable; struct sockaddr; -extern void ip_mc_dropsocket(struct sock *); -extern void ip_mc_dropdevice(struct net_device *dev); extern int igmp_mc_proc_init(void); /* @@ -102,6 +100,8 @@ extern int ip_mc_output(struct sk_buff *skb); extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); extern int ip_do_nat(struct sk_buff *skb); extern void ip_send_check(struct iphdr *ip); +extern int __ip_local_out(struct sk_buff *skb); +extern int ip_local_out(struct sk_buff *skb); extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok); extern void ip_init(void); extern int ip_append_data(struct sock *sk, @@ -169,7 +169,7 @@ DECLARE_SNMP_STAT(struct linux_mib, net_statistics); #define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd) extern unsigned long snmp_fold_field(void *mib[], int offt); -extern int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); +extern int snmp_mib_init(void *ptr[2], size_t mibsize); extern void snmp_mib_free(void *ptr[2]); extern void inet_get_local_port_range(int *low, int *high); @@ -177,10 +177,7 @@ extern void inet_get_local_port_range(int *low, int *high); extern int sysctl_ip_default_ttl; extern int sysctl_ip_nonlocal_bind; -/* From ip_fragment.c */ -struct inet_frags_ctl; -extern struct inet_frags_ctl ip4_frags_ctl; -extern int sysctl_ipfrag_max_dist; +extern struct ctl_path net_ipv4_ctl_path[]; /* From inetpeer.c */ extern int inet_peer_threshold; @@ -266,20 +263,22 @@ static inline void ip_eth_mc_map(__be32 naddr, char *buf) * Leave P_Key as 0 to be filled in by driver. */ -static inline void ip_ib_mc_map(__be32 naddr, char *buf) +static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) { __u32 addr; + unsigned char scope = broadcast[5] & 0xF; + buf[0] = 0; /* Reserved */ buf[1] = 0xff; /* Multicast QPN */ buf[2] = 0xff; buf[3] = 0xff; addr = ntohl(naddr); buf[4] = 0xff; - buf[5] = 0x12; /* link local scope */ + buf[5] = 0x10 | scope; /* scope from broadcast address */ buf[6] = 0x40; /* IPv4 signature */ buf[7] = 0x1b; - buf[8] = 0; /* P_Key */ - buf[9] = 0; + buf[8] = broadcast[8]; /* P_Key */ + buf[9] = broadcast[9]; buf[10] = 0; buf[11] = 0; buf[12] = 0; @@ -317,7 +316,7 @@ static __inline__ void inet_reset_saddr(struct sock *sk) extern int ip_call_ra_chain(struct sk_buff *skb); /* - * Functions provided by ip_fragment.o + * Functions provided by ip_fragment.c */ enum ip_defrag_users @@ -332,15 +331,14 @@ enum ip_defrag_users }; int ip_defrag(struct sk_buff *skb, u32 user); -int ip_frag_mem(void); -int ip_frag_nqueues(void); +int ip_frag_mem(struct net *net); +int ip_frag_nqueues(struct net *net); /* * Functions provided by ip_forward.c */ extern int ip_forward(struct sk_buff *skb); -extern int ip_net_unreachable(struct sk_buff *skb); /* * Functions provided by ip_options.c @@ -391,6 +389,4 @@ int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen, extern int ip_misc_proc_init(void); #endif -extern struct ctl_table ipv4_table[]; - #endif /* _IP_H */ diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 857821360bb6..d8d85b13364d 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -99,16 +99,21 @@ struct rt6_info u32 rt6i_flags; u32 rt6i_metric; atomic_t rt6i_ref; - struct fib6_table *rt6i_table; - struct rt6key rt6i_dst; - struct rt6key rt6i_src; + /* more non-fragment space at head required */ + unsigned short rt6i_nfheader_len; u8 rt6i_protocol; + struct fib6_table *rt6i_table; + + struct rt6key rt6i_dst; + #ifdef CONFIG_XFRM u32 rt6i_flow_cache_genid; #endif + + struct rt6key rt6i_src; }; static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) @@ -219,10 +224,20 @@ extern void fib6_run_gc(unsigned long dummy); extern void fib6_gc_cleanup(void); -extern void fib6_init(void); +extern int fib6_init(void); -extern void fib6_rules_init(void); +#ifdef CONFIG_IPV6_MULTIPLE_TABLES +extern int fib6_rules_init(void); extern void fib6_rules_cleanup(void); - +#else +static inline int fib6_rules_init(void) +{ + return 0; +} +static inline void fib6_rules_cleanup(void) +{ + return ; +} +#endif #endif #endif diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 5456fdd6d047..faac0eee1ef3 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -43,14 +43,12 @@ extern struct rt6_info ip6_prohibit_entry; extern struct rt6_info ip6_blk_hole_entry; #endif -extern int ip6_rt_gc_interval; - extern void ip6_route_input(struct sk_buff *skb); extern struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl); -extern void ip6_route_init(void); +extern int ip6_route_init(void); extern void ip6_route_cleanup(void); extern int ipv6_route_ioctl(unsigned int cmd, void __user *arg); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ed514bfb61ba..9daa60b544ba 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -125,11 +125,15 @@ struct fib_result_nl { #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) #define FIB_RES_RESET(res) ((res).nh_sel = 0) +#define FIB_TABLE_HASHSZ 2 + #else /* CONFIG_IP_ROUTE_MULTIPATH */ #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) #define FIB_RES_RESET(res) +#define FIB_TABLE_HASHSZ 256 + #endif /* CONFIG_IP_ROUTE_MULTIPATH */ #define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __fib_res_prefsrc(&res)) @@ -141,6 +145,7 @@ struct fib_table { struct hlist_node tb_hlist; u32 tb_id; unsigned tb_stamp; + int tb_default; int (*tb_lookup)(struct fib_table *tb, const struct flowi *flp, struct fib_result *res); int (*tb_insert)(struct fib_table *, struct fib_config *); int (*tb_delete)(struct fib_table *, struct fib_config *); @@ -155,50 +160,51 @@ struct fib_table { #ifndef CONFIG_IP_MULTIPLE_TABLES -extern struct fib_table *ip_fib_local_table; -extern struct fib_table *ip_fib_main_table; +#define TABLE_LOCAL_INDEX 0 +#define TABLE_MAIN_INDEX 1 -static inline struct fib_table *fib_get_table(u32 id) +static inline struct fib_table *fib_get_table(struct net *net, u32 id) { - if (id != RT_TABLE_LOCAL) - return ip_fib_main_table; - return ip_fib_local_table; -} + struct hlist_head *ptr; -static inline struct fib_table *fib_new_table(u32 id) -{ - return fib_get_table(id); + ptr = id == RT_TABLE_LOCAL ? + &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] : + &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]; + return hlist_entry(ptr->first, struct fib_table, tb_hlist); } -static inline int fib_lookup(const struct flowi *flp, struct fib_result *res) +static inline struct fib_table *fib_new_table(struct net *net, u32 id) { - if (ip_fib_local_table->tb_lookup(ip_fib_local_table, flp, res) && - ip_fib_main_table->tb_lookup(ip_fib_main_table, flp, res)) - return -ENETUNREACH; - return 0; + return fib_get_table(net, id); } -static inline void fib_select_default(const struct flowi *flp, struct fib_result *res) +static inline int fib_lookup(struct net *net, const struct flowi *flp, + struct fib_result *res) { - if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) - ip_fib_main_table->tb_select_default(ip_fib_main_table, flp, res); + struct fib_table *table; + + table = fib_get_table(net, RT_TABLE_LOCAL); + if (!table->tb_lookup(table, flp, res)) + return 0; + + table = fib_get_table(net, RT_TABLE_MAIN); + if (!table->tb_lookup(table, flp, res)) + return 0; + return -ENETUNREACH; } #else /* CONFIG_IP_MULTIPLE_TABLES */ -extern void __init fib4_rules_init(void); +extern int __net_init fib4_rules_init(struct net *net); +extern void __net_exit fib4_rules_exit(struct net *net); #ifdef CONFIG_NET_CLS_ROUTE extern u32 fib_rules_tclass(struct fib_result *res); #endif -#define ip_fib_local_table fib_get_table(RT_TABLE_LOCAL) -#define ip_fib_main_table fib_get_table(RT_TABLE_MAIN) - -extern int fib_lookup(struct flowi *flp, struct fib_result *res); +extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res); -extern struct fib_table *fib_new_table(u32 id); -extern struct fib_table *fib_get_table(u32 id); -extern void fib_select_default(const struct flowi *flp, struct fib_result *res); +extern struct fib_table *fib_new_table(struct net *net, u32 id); +extern struct fib_table *fib_get_table(struct net *net, u32 id); #endif /* CONFIG_IP_MULTIPLE_TABLES */ @@ -207,18 +213,19 @@ extern const struct nla_policy rtm_ipv4_policy[]; extern void ip_fib_init(void); extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, __be32 *spec_dst, u32 *itag); -extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res); - -struct rtentry; +extern void fib_select_default(struct net *net, const struct flowi *flp, + struct fib_result *res); /* Exported by fib_semantics.c */ extern int ip_fib_check_default(__be32 gw, struct net_device *dev); extern int fib_sync_down(__be32 local, struct net_device *dev, int force); extern int fib_sync_up(struct net_device *dev); extern __be32 __fib_res_prefsrc(struct fib_result *res); +extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res); -/* Exported by fib_hash.c */ -extern struct fib_table *fib_hash_init(u32 id); +/* Exported by fib_{hash|trie}.c */ +extern void fib_hash_init(void); +extern struct fib_table *fib_hash_table(u32 id); static inline void fib_combine_itag(u32 *itag, struct fib_result *res) { @@ -255,8 +262,8 @@ static inline void fib_res_put(struct fib_result *res) } #ifdef CONFIG_PROC_FS -extern int fib_proc_init(void); -extern void fib_proc_exit(void); +extern int __net_init fib_proc_init(struct net *net); +extern void __net_exit fib_proc_exit(struct net *net); #endif #endif /* _NET_FIB_H */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 8a7d59be8a0d..56f3c94ae620 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -9,6 +9,8 @@ #include <asm/types.h> /* For __uXX types */ #include <linux/types.h> /* For __beXX types in userland */ +#include <linux/sysctl.h> /* For ctl_path */ + #define IP_VS_VERSION_CODE 0x010201 #define NVERSION(version) \ (version >> 16) & 0xFF, \ @@ -676,7 +678,6 @@ extern const char *ip_vs_proto_name(unsigned proto); extern void ip_vs_init_hash_table(struct list_head *table, int rows); #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0])) -#define IP_VS_APP_TYPE_UNSPEC 0 #define IP_VS_APP_TYPE_FTP 1 /* @@ -735,7 +736,6 @@ extern const char * ip_vs_state_name(__u16 proto, int state); extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); extern int ip_vs_check_template(struct ip_vs_conn *ct); -extern void ip_vs_secure_tcp_set(int on); extern void ip_vs_random_dropentry(void); extern int ip_vs_conn_init(void); extern void ip_vs_conn_cleanup(void); @@ -856,6 +856,7 @@ extern int sysctl_ip_vs_expire_quiescent_template; extern int sysctl_ip_vs_sync_threshold[2]; extern int sysctl_ip_vs_nat_icmp_send; extern struct ip_vs_stats ip_vs_stats; +extern struct ctl_path net_vs_ctl_path[]; extern struct ip_vs_service * ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); diff --git a/include/net/ipip.h b/include/net/ipip.h index 7cdc914322f0..549e132bca9c 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -2,6 +2,7 @@ #define __NET_IPIP_H 1 #include <linux/if_tunnel.h> +#include <net/ip.h> /* Keep error state on tunnel for 30 sec */ #define IPTUNNEL_ERR_TIMEO (30*HZ) @@ -30,11 +31,9 @@ struct ip_tunnel int pkt_len = skb->len; \ \ skb->ip_summed = CHECKSUM_NONE; \ - iph->tot_len = htons(skb->len); \ ip_select_ident(iph, &rt->u.dst, NULL); \ - ip_send_check(iph); \ \ - err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output);\ + err = ip_local_out(skb); \ if (net_xmit_eval(err) == 0) { \ stats->tx_bytes += pkt_len; \ stats->tx_packets++; \ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ae328b680ff2..fa80ea48639d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -109,9 +109,10 @@ struct frag_hdr { #include <net/sock.h> /* sysctls */ -extern int sysctl_ipv6_bindv6only; extern int sysctl_mld_max_msf; +extern struct ctl_path net_ipv6_ctl_path[]; + #define _DEVINC(statname, modifier, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ @@ -143,14 +144,6 @@ DECLARE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics); #define ICMP6_INC_STATS_BH(idev, field) _DEVINC(icmpv6, _BH, idev, field) #define ICMP6_INC_STATS_USER(idev, field) _DEVINC(icmpv6, _USER, idev, field) -#define ICMP6_INC_STATS_OFFSET_BH(idev, field, offset) ({ \ - struct inet6_dev *_idev = idev; \ - __typeof__(offset) _offset = (offset); \ - if (likely(_idev != NULL)) \ - SNMP_INC_STATS_OFFSET_BH(_idev->stats.icmpv6, field, _offset); \ - SNMP_INC_STATS_OFFSET_BH(icmpv6_statistics, field, _offset); \ -}) - #define ICMP6MSGOUT_INC_STATS(idev, field) \ _DEVINC(icmpv6msg, , idev, field +256) #define ICMP6MSGOUT_INC_STATS_BH(idev, field) \ @@ -164,15 +157,6 @@ DECLARE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics); #define ICMP6MSGIN_INC_STATS_USER(idev, field) \ _DEVINC(icmpv6msg, _USER, idev, field) -DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6); -DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); -#define UDP6_INC_STATS_BH(field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_BH(udplite_stats_in6, field); \ - else SNMP_INC_STATS_BH(udp_stats_in6, field); } while(0) -#define UDP6_INC_STATS_USER(field, is_udplite) do { \ - if (is_udplite) SNMP_INC_STATS_USER(udplite_stats_in6, field); \ - else SNMP_INC_STATS_USER(udp_stats_in6, field); } while(0) - struct ip6_ra_chain { struct ip6_ra_chain *next; @@ -236,7 +220,7 @@ extern struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_spac struct ipv6_txoptions * fopt); extern void fl6_free_socklist(struct sock *sk); extern int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen); -extern void ip6_flowlabel_init(void); +extern int ip6_flowlabel_init(void); extern void ip6_flowlabel_cleanup(void); static inline void fl6_sock_release(struct ip6_flowlabel *fl) @@ -261,8 +245,8 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb); -int ip6_frag_nqueues(void); -int ip6_frag_mem(void); +int ip6_frag_nqueues(struct net *net); +int ip6_frag_mem(struct net *net); #define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */ @@ -509,6 +493,9 @@ extern int ip6_forward(struct sk_buff *skb); extern int ip6_input(struct sk_buff *skb); extern int ip6_mc_input(struct sk_buff *skb); +extern int __ip6_local_out(struct sk_buff *skb); +extern int ip6_local_out(struct sk_buff *skb); + /* * Extension header (options) processing */ @@ -559,7 +546,7 @@ extern int compat_ipv6_getsockopt(struct sock *sk, char __user *optval, int __user *optlen); -extern void ipv6_packet_init(void); +extern int ipv6_packet_init(void); extern void ipv6_packet_cleanup(void); @@ -585,9 +572,6 @@ extern int inet6_hash_connect(struct inet_timewait_death_row *death_row, /* * reassembly.c */ -struct inet_frags_ctl; -extern struct inet_frags_ctl ip6_frags_ctl; - extern const struct proto_ops inet6_stream_ops; extern const struct proto_ops inet6_dgram_ops; @@ -602,6 +586,9 @@ extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, int __user *optlen); #ifdef CONFIG_PROC_FS +extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); +extern struct ctl_table *ipv6_route_sysctl_init(struct net *net); + extern int ac6_proc_init(void); extern void ac6_proc_exit(void); extern int raw6_proc_init(void); @@ -631,10 +618,10 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) #endif #ifdef CONFIG_SYSCTL -extern ctl_table ipv6_route_table[]; -extern ctl_table ipv6_icmp_table[]; +extern ctl_table ipv6_route_table_template[]; +extern ctl_table ipv6_icmp_table_template[]; -extern void ipv6_sysctl_register(void); +extern int ipv6_sysctl_register(void); extern void ipv6_sysctl_unregister(void); #endif diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h index bca19ca7bdd4..f70e9b39ebaf 100644 --- a/include/net/irda/irda_device.h +++ b/include/net/irda/irda_device.h @@ -228,21 +228,8 @@ static inline int irda_device_txqueue_empty(const struct net_device *dev) int irda_device_set_raw_mode(struct net_device* self, int status); struct net_device *alloc_irdadev(int sizeof_priv); -/* Dongle interface */ -void irda_device_unregister_dongle(struct dongle_reg *dongle); -int irda_device_register_dongle(struct dongle_reg *dongle); -dongle_t *irda_device_dongle_init(struct net_device *dev, int type); -int irda_device_dongle_cleanup(dongle_t *dongle); - void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode); -void irda_task_delete(struct irda_task *task); -struct irda_task *irda_task_execute(void *instance, - IRDA_TASK_CALLBACK function, - IRDA_TASK_CALLBACK finished, - struct irda_task *parent, void *param); -void irda_task_next_state(struct irda_task *task, IRDA_TASK_STATE state); - /* * Function irda_get_mtt (skb) * diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 17b60391fcd6..9083bafb63ca 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -139,17 +139,54 @@ enum ieee80211_phymode { }; /** + * struct ieee80211_ht_info - describing STA's HT capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11n HT capabilities for an STA. + * + * @ht_supported: is HT supported by STA, 0: no, 1: yes + * @cap: HT capabilities map as described in 802.11n spec + * @ampdu_factor: Maximum A-MPDU length factor + * @ampdu_density: Minimum A-MPDU spacing + * @supp_mcs_set: Supported MCS set as described in 802.11n spec + */ +struct ieee80211_ht_info { + u8 ht_supported; + u16 cap; /* use IEEE80211_HT_CAP_ */ + u8 ampdu_factor; + u8 ampdu_density; + u8 supp_mcs_set[16]; +}; + +/** + * struct ieee80211_ht_bss_info - describing BSS's HT characteristics + * + * This structure describes most essential parameters needed + * to describe 802.11n HT characteristics in a BSS + * + * @primary_channel: channel number of primery channel + * @bss_cap: 802.11n's general BSS capabilities (e.g. channel width) + * @bss_op_mode: 802.11n's BSS operation modes (e.g. HT protection) + */ +struct ieee80211_ht_bss_info { + u8 primary_channel; + u8 bss_cap; /* use IEEE80211_HT_IE_CHA_ */ + u8 bss_op_mode; /* use IEEE80211_HT_IE_ */ +}; + +/** * struct ieee80211_hw_mode - PHY mode definition * * This structure describes the capabilities supported by the device * in a single PHY mode. * + * @list: internal + * @channels: pointer to array of supported channels + * @rates: pointer to array of supported bitrates * @mode: the PHY mode for this definition * @num_channels: number of supported channels - * @channels: pointer to array of supported channels * @num_rates: number of supported bitrates - * @rates: pointer to array of supported bitrates - * @list: internal + * @ht_info: PHY's 802.11n HT abilities for this mode */ struct ieee80211_hw_mode { struct list_head list; @@ -158,6 +195,7 @@ struct ieee80211_hw_mode { enum ieee80211_phymode mode; int num_channels; int num_rates; + struct ieee80211_ht_info ht_info; }; /** @@ -237,11 +275,49 @@ struct ieee80211_low_level_stats { unsigned int dot11RTSSuccessCount; }; +/** + * enum ieee80211_bss_change - BSS change notification flags + * + * These flags are used with the bss_info_changed() callback + * to indicate which BSS parameter changed. + * + * @BSS_CHANGED_ASSOC: association status changed (associated/disassociated), + * also implies a change in the AID. + * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed + * @BSS_CHANGED_ERP_PREAMBLE: preamble changed + */ +enum ieee80211_bss_change { + BSS_CHANGED_ASSOC = 1<<0, + BSS_CHANGED_ERP_CTS_PROT = 1<<1, + BSS_CHANGED_ERP_PREAMBLE = 1<<2, +}; + +/** + * struct ieee80211_bss_conf - holds the BSS's changing parameters + * + * This structure keeps information about a BSS (and an association + * to that BSS) that can change during the lifetime of the BSS. + * + * @assoc: association status + * @aid: association ID number, valid only when @assoc is true + * @use_cts_prot: use CTS protection + * @use_short_preamble: use 802.11b short preamble + */ +struct ieee80211_bss_conf { + /* association related data */ + bool assoc; + u16 aid; + /* erp related data */ + bool use_cts_prot; + bool use_short_preamble; +}; + /* Transmit control fields. This data structure is passed to low-level driver * with each TX frame. The low-level driver is responsible for configuring * the hardware to use given values (depending on what is supported). */ struct ieee80211_tx_control { + struct ieee80211_vif *vif; int tx_rate; /* Transmit rate, given as the hw specific value for the * rate (from struct ieee80211_rate) */ int rts_cts_rate; /* Transmit rate for RTS/CTS frame, given as the hw @@ -269,6 +345,9 @@ struct ieee80211_tx_control { * using the through * set_retry_limit configured * long retry value */ +#define IEEE80211_TXCTL_EAPOL_FRAME (1<<11) /* internal to mac80211 */ +#define IEEE80211_TXCTL_SEND_AFTER_DTIM (1<<12) /* send this frame after DTIM + * beacon */ u32 flags; /* tx control flags defined * above */ u8 key_idx; /* keyidx from hw->set_key(), undefined if @@ -291,7 +370,6 @@ struct ieee80211_tx_control { * packet dropping when probing higher rates, if hw * supports multiple retry rates. -1 = not used */ int type; /* internal */ - int ifindex; /* internal */ }; @@ -312,6 +390,8 @@ struct ieee80211_tx_control { * the frame. * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on * the frame. + * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) + * is valid. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = 1<<0, @@ -321,6 +401,7 @@ enum mac80211_rx_flags { RX_FLAG_IV_STRIPPED = 1<<4, RX_FLAG_FAILED_FCS_CRC = 1<<5, RX_FLAG_FAILED_PLCP_CRC = 1<<6, + RX_FLAG_TSFT = 1<<7, }; /** @@ -406,11 +487,12 @@ struct ieee80211_tx_status { * * @IEEE80211_CONF_SHORT_SLOT_TIME: use 802.11g short slot time * @IEEE80211_CONF_RADIOTAP: add radiotap header at receive time (if supported) - * + * @IEEE80211_CONF_SUPPORT_HT_MODE: use 802.11n HT capabilities (if supported) */ enum ieee80211_conf_flags { - IEEE80211_CONF_SHORT_SLOT_TIME = 1<<0, - IEEE80211_CONF_RADIOTAP = 1<<1, + IEEE80211_CONF_SHORT_SLOT_TIME = (1<<0), + IEEE80211_CONF_RADIOTAP = (1<<1), + IEEE80211_CONF_SUPPORT_HT_MODE = (1<<2), }; /** @@ -434,6 +516,8 @@ enum ieee80211_conf_flags { * @antenna_sel_tx: transmit antenna selection, 0: default/diversity, * 1/2: antenna 0/1 * @antenna_sel_rx: receive antenna selection, like @antenna_sel_tx + * @ht_conf: describes current self configuration of 802.11n HT capabilies + * @ht_bss_conf: describes current BSS configuration of 802.11n HT parameters */ struct ieee80211_conf { int channel; /* IEEE 802.11 channel number */ @@ -452,6 +536,9 @@ struct ieee80211_conf { u8 antenna_max; u8 antenna_sel_tx; u8 antenna_sel_rx; + + struct ieee80211_ht_info ht_conf; + struct ieee80211_ht_bss_info ht_bss_conf; }; /** @@ -480,13 +567,27 @@ enum ieee80211_if_types { }; /** + * struct ieee80211_vif - per-interface data + * + * Data in this structure is continually present for driver + * use during the life of a virtual interface. + * + * @type: type of this virtual interface + * @drv_priv: data area for driver use, will always be aligned to + * sizeof(void *). + */ +struct ieee80211_vif { + enum ieee80211_if_types type; + /* must be last */ + u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); +}; + +/** * struct ieee80211_if_init_conf - initial configuration of an interface * - * @if_id: internal interface ID. This number has no particular meaning to - * drivers and the only allowed usage is to pass it to - * ieee80211_beacon_get() and ieee80211_get_buffered_bc() functions. - * This field is not valid for monitor interfaces - * (interfaces of %IEEE80211_IF_TYPE_MNTR type). + * @vif: pointer to a driver-use per-interface structure. The pointer + * itself is also used for various functions including + * ieee80211_beacon_get() and ieee80211_get_buffered_bc(). * @type: one of &enum ieee80211_if_types constants. Determines the type of * added/removed interface. * @mac_addr: pointer to MAC address of the interface. This pointer is valid @@ -503,8 +604,8 @@ enum ieee80211_if_types { * in pure monitor mode. */ struct ieee80211_if_init_conf { - int if_id; enum ieee80211_if_types type; + struct ieee80211_vif *vif; void *mac_addr; }; @@ -597,9 +698,6 @@ struct ieee80211_key_conf { u8 key[0]; }; -#define IEEE80211_SEQ_COUNTER_RX 0 -#define IEEE80211_SEQ_COUNTER_TX 1 - /** * enum set_key_cmd - key command * @@ -710,6 +808,9 @@ enum ieee80211_hw_flags { * @rate_control_algorithm: rate control algorithm for this hardware. * If unset (NULL), the default algorithm will be used. Must be * set before calling ieee80211_register_hw(). + * + * @vif_data_size: size (in bytes) of the drv_priv data area + * within &struct ieee80211_vif. */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -720,6 +821,7 @@ struct ieee80211_hw { u32 flags; unsigned int extra_tx_headroom; int channel_change_time; + int vif_data_size; u8 queues; s8 max_rssi; s8 max_signal; @@ -859,19 +961,18 @@ enum ieee80211_filter_flags { }; /** - * enum ieee80211_erp_change_flags - erp change flags + * enum ieee80211_ampdu_mlme_action - A-MPDU actions * - * These flags are used with the erp_ie_changed() callback in - * &struct ieee80211_ops to indicate which parameter(s) changed. - * @IEEE80211_ERP_CHANGE_PROTECTION: protection changed - * @IEEE80211_ERP_CHANGE_PREAMBLE: barker preamble mode changed + * These flags are used with the ampdu_action() callback in + * &struct ieee80211_ops to indicate which action is needed. + * @IEEE80211_AMPDU_RX_START: start Rx aggregation + * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation */ -enum ieee80211_erp_change_flags { - IEEE80211_ERP_CHANGE_PROTECTION = 1<<0, - IEEE80211_ERP_CHANGE_PREAMBLE = 1<<1, +enum ieee80211_ampdu_mlme_action { + IEEE80211_AMPDU_RX_START, + IEEE80211_AMPDU_RX_STOP, }; - /** * struct ieee80211_ops - callbacks from mac80211 to the driver * @@ -927,6 +1028,14 @@ enum ieee80211_erp_change_flags { * @config_interface: Handler for configuration requests related to interfaces * (e.g. BSSID changes.) * + * @bss_info_changed: Handler for configuration requests related to BSS + * parameters that may vary during BSS's lifespan, and may affect low + * level driver (e.g. assoc/disassoc status, erp parameters). + * This function should not be used if no BSS has been set, unless + * for association indication. The @changed parameter indicates which + * of the bss parameters has changed when a call is made. This callback + * has to be atomic. + * * @configure_filter: Configure the device's RX filter. * See the section "Frame filtering" for more information. * This callback must be implemented and atomic. @@ -946,9 +1055,9 @@ enum ieee80211_erp_change_flags { * * @get_stats: return low-level statistics * - * @get_sequence_counter: For devices that have internal sequence counters this - * callback allows mac80211 to access the current value of a counter. - * This callback seems not well-defined, tell us if you need it. + * @get_tkip_seq: If your device implements TKIP encryption in hardware this + * callback should be provided to read the TKIP transmit IVs (both IV32 + * and IV16) for the given key from hardware. * * @set_rts_threshold: Configuration of RTS threshold (if device needs it) * @@ -961,8 +1070,6 @@ enum ieee80211_erp_change_flags { * @sta_notify: Notifies low level driver about addition or removal * of assocaited station or AP. * - * @erp_ie_changed: Handle ERP IE change notifications. Must be atomic. - * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), * bursting) for a hardware TX queue. The @queue parameter uses the * %IEEE80211_TX_QUEUE_* constants. Must be atomic. @@ -997,6 +1104,14 @@ enum ieee80211_erp_change_flags { * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us. * This is needed only for IBSS mode and the result of this function is * used to determine whether to reply to Probe Requests. + * + * @conf_ht: Configures low level driver with 802.11n HT data. Must be atomic. + * + * @ampdu_action: Perform a certain A-MPDU action + * The RA/TID combination determines the destination and TID we want + * the ampdu action to be performed for. The action is defined through + * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) + * is the first frame we expect to perform the action on. */ struct ieee80211_ops { int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb, @@ -1009,7 +1124,12 @@ struct ieee80211_ops { struct ieee80211_if_init_conf *conf); int (*config)(struct ieee80211_hw *hw, struct ieee80211_conf *conf); int (*config_interface)(struct ieee80211_hw *hw, - int if_id, struct ieee80211_if_conf *conf); + struct ieee80211_vif *vif, + struct ieee80211_if_conf *conf); + void (*bss_info_changed)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed); void (*configure_filter)(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, @@ -1021,17 +1141,14 @@ struct ieee80211_ops { int (*hw_scan)(struct ieee80211_hw *hw, u8 *ssid, size_t len); int (*get_stats)(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); - int (*get_sequence_counter)(struct ieee80211_hw *hw, - u8* addr, u8 keyidx, u8 txrx, - u32* iv32, u16* iv16); + void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, + u32 *iv32, u16 *iv16); int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value); int (*set_retry_limit)(struct ieee80211_hw *hw, u32 short_retry, u32 long_retr); - void (*sta_notify)(struct ieee80211_hw *hw, int if_id, + void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd, const u8 *addr); - void (*erp_ie_changed)(struct ieee80211_hw *hw, u8 changes, - int cts_protection, int preamble); int (*conf_tx)(struct ieee80211_hw *hw, int queue, const struct ieee80211_tx_queue_params *params); int (*get_tx_stats)(struct ieee80211_hw *hw, @@ -1042,6 +1159,10 @@ struct ieee80211_ops { struct sk_buff *skb, struct ieee80211_tx_control *control); int (*tx_last_beacon)(struct ieee80211_hw *hw); + int (*conf_ht)(struct ieee80211_hw *hw, struct ieee80211_conf *conf); + int (*ampdu_action)(struct ieee80211_hw *hw, + enum ieee80211_ampdu_mlme_action action, + const u8 *ra, u16 tid, u16 ssn); }; /** @@ -1073,6 +1194,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw); extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); +extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); #endif /** * ieee80211_get_tx_led_name - get name of TX LED @@ -1112,6 +1234,16 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) #endif } +/** + * ieee80211_get_assoc_led_name - get name of association LED + * + * mac80211 creates a association LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) { #ifdef CONFIG_MAC80211_LEDS @@ -1121,6 +1253,24 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) #endif } +/** + * ieee80211_get_radio_led_name - get name of radio LED + * + * mac80211 creates a radio change LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ +static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw) +{ +#ifdef CONFIG_MAC80211_LEDS + return __ieee80211_get_radio_led_name(hw); +#else + return NULL; +#endif +} /* Register a new hardware PHYMODE capability to the stack. */ int ieee80211_register_hwmode(struct ieee80211_hw *hw, @@ -1210,7 +1360,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, /** * ieee80211_beacon_get - beacon generation function * @hw: pointer obtained from ieee80211_alloc_hw(). - * @if_id: interface ID from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. * @control: will be filled with information needed to send this beacon. * * If the beacon frames are generated by the host system (i.e., not in @@ -1221,13 +1371,13 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, * is responsible of freeing it. */ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, - int if_id, + struct ieee80211_vif *vif, struct ieee80211_tx_control *control); /** * ieee80211_rts_get - RTS frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). - * @if_id: interface ID from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. * @frame: pointer to the frame that is going to be protected by the RTS. * @frame_len: the frame length (in octets). * @frame_txctl: &struct ieee80211_tx_control of the frame. @@ -1238,7 +1388,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, * the next RTS frame from the 802.11 code. The low-level is responsible * for calling this function before and RTS frame is needed. */ -void ieee80211_rts_get(struct ieee80211_hw *hw, int if_id, +void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_control *frame_txctl, struct ieee80211_rts *rts); @@ -1246,7 +1396,7 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, int if_id, /** * ieee80211_rts_duration - Get the duration field for an RTS frame * @hw: pointer obtained from ieee80211_alloc_hw(). - * @if_id: interface ID from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. * @frame_len: the length of the frame that is going to be protected by the RTS. * @frame_txctl: &struct ieee80211_tx_control of the frame. * @@ -1254,14 +1404,14 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, int if_id, * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. */ -__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, int if_id, - size_t frame_len, +__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_control *frame_txctl); /** * ieee80211_ctstoself_get - CTS-to-self frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). - * @if_id: interface ID from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. * @frame: pointer to the frame that is going to be protected by the CTS-to-self. * @frame_len: the frame length (in octets). * @frame_txctl: &struct ieee80211_tx_control of the frame. @@ -1272,7 +1422,8 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, int if_id, * the next CTS-to-self frame from the 802.11 code. The low-level is responsible * for calling this function before and CTS-to-self frame is needed. */ -void ieee80211_ctstoself_get(struct ieee80211_hw *hw, int if_id, +void ieee80211_ctstoself_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_control *frame_txctl, struct ieee80211_cts *cts); @@ -1280,7 +1431,7 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw, int if_id, /** * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame * @hw: pointer obtained from ieee80211_alloc_hw(). - * @if_id: interface ID from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. * @frame_len: the length of the frame that is going to be protected by the CTS-to-self. * @frame_txctl: &struct ieee80211_tx_control of the frame. * @@ -1288,28 +1439,30 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw, int if_id, * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. */ -__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, int if_id, +__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_control *frame_txctl); /** * ieee80211_generic_frame_duration - Calculate the duration field for a frame * @hw: pointer obtained from ieee80211_alloc_hw(). - * @if_id: interface ID from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. * @frame_len: the length of the frame. * @rate: the rate (in 100kbps) at which the frame is going to be transmitted. * * Calculate the duration field of some generic frame, given its * length and transmission rate (in 100kbps). */ -__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, int if_id, +__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, size_t frame_len, int rate); /** * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames * @hw: pointer as obtained from ieee80211_alloc_hw(). - * @if_id: interface ID from &struct ieee80211_if_init_conf. + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. * @control: will be filled with information needed to send returned frame. * * Function for accessing buffered broadcast and multicast frames. If @@ -1328,7 +1481,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, int if_id, * use common code for all beacons. */ struct sk_buff * -ieee80211_get_buffered_bc(struct ieee80211_hw *hw, int if_id, +ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tx_control *control); /** @@ -1406,4 +1559,19 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw); */ void ieee80211_scan_completed(struct ieee80211_hw *hw); +/** + * ieee80211_iterate_active_interfaces - iterate active interfaces + * + * This function iterates over the interfaces associated with a given + * hardware that are currently active and calls the callback for them. + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iterator: the iterator function to call, cannot sleep + * @data: first argument of the iterator function + */ +void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data); + #endif /* MAC80211_H */ diff --git a/include/net/neighbour.h b/include/net/neighbour.h index a4f26187fc1a..ebbfb509822e 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -26,6 +26,10 @@ #include <linux/sysctl.h> #include <net/rtnetlink.h> +/* + * NUD stands for "neighbor unreachability detection" + */ + #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE) #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY) #define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE) @@ -34,6 +38,7 @@ struct neighbour; struct neigh_parms { + struct net *net; struct net_device *dev; struct neigh_parms *next; int (*neigh_setup)(struct neighbour *); @@ -126,7 +131,8 @@ struct neigh_ops struct pneigh_entry { struct pneigh_entry *next; - struct net_device *dev; + struct net *net; + struct net_device *dev; u8 flags; u8 key[0]; }; @@ -187,6 +193,7 @@ extern struct neighbour * neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev); extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl, + struct net *net, const void *pkey); extern struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey, @@ -206,13 +213,12 @@ extern struct neighbour *neigh_event_ns(struct neigh_table *tbl, extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl); extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); -extern void neigh_parms_destroy(struct neigh_parms *parms); extern unsigned long neigh_rand_reach_time(unsigned long base); extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, struct sk_buff *skb); -extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, const void *key, struct net_device *dev, int creat); -extern int pneigh_delete(struct neigh_table *tbl, const void *key, struct net_device *dev); +extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat); +extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev); extern void neigh_app_ns(struct neighbour *n); extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); @@ -220,6 +226,7 @@ extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct n extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); struct neigh_seq_state { + struct seq_net_private p; struct neigh_table *tbl; void *(*neigh_sub_iter)(struct neigh_seq_state *state, struct neighbour *n, loff_t *pos); @@ -246,12 +253,6 @@ static inline void __neigh_parms_put(struct neigh_parms *parms) atomic_dec(&parms->refcnt); } -static inline void neigh_parms_put(struct neigh_parms *parms) -{ - if (atomic_dec_and_test(&parms->refcnt)) - neigh_parms_destroy(parms); -} - static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) { atomic_inc(&parms->refcnt); @@ -288,10 +289,6 @@ static inline int neigh_is_connected(struct neighbour *neigh) return neigh->nud_state&NUD_CONNECTED; } -static inline int neigh_is_valid(struct neighbour *neigh) -{ - return neigh->nud_state&NUD_VALID; -} static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 5dd6d90b37eb..b8c1d60ba9e4 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -8,8 +8,16 @@ #include <linux/workqueue.h> #include <linux/list.h> +#include <net/netns/unix.h> +#include <net/netns/packet.h> +#include <net/netns/ipv4.h> +#include <net/netns/ipv6.h> + struct proc_dir_entry; struct net_device; +struct sock; +struct ctl_table_header; + struct net { atomic_t count; /* To decided when the network * namespace should be freed. @@ -24,11 +32,30 @@ struct net { struct proc_dir_entry *proc_net_stat; struct proc_dir_entry *proc_net_root; + struct list_head sysctl_table_headers; + struct net_device *loopback_dev; /* The loopback */ struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; + + /* core fib_rules */ + struct list_head rules_ops; + spinlock_t rules_mod_lock; + + struct sock *rtnl; /* rtnetlink socket */ + + /* core sysctls */ + struct ctl_table_header *sysctl_core_hdr; + int sysctl_somaxconn; + + struct netns_packet packet; + struct netns_unix unx; + struct netns_ipv4 ipv4; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct netns_ipv6 ipv6; +#endif }; #ifdef CONFIG_NET @@ -137,4 +164,11 @@ extern void unregister_pernet_subsys(struct pernet_operations *); extern int register_pernet_device(struct pernet_operations *); extern void unregister_pernet_device(struct pernet_operations *); +struct ctl_path; +struct ctl_table; +struct ctl_table_header; +extern struct ctl_table_header *register_net_sysctl_table(struct net *net, + const struct ctl_path *path, struct ctl_table *table); +extern void unregister_net_sysctl_table(struct ctl_table_header *header); + #endif /* __NET_NET_NAMESPACE_H */ diff --git a/include/net/netevent.h b/include/net/netevent.h index e5d216241423..e82b7bab3ff3 100644 --- a/include/net/netevent.h +++ b/include/net/netevent.h @@ -12,7 +12,7 @@ */ #ifdef __KERNEL__ -#include <net/dst.h> +struct dst_entry; struct netevent_redirect { struct dst_entry *old; diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index f703533fb4db..abc55ad75c2b 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -16,6 +16,8 @@ extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, int (*okfn)(struct sk_buff *)); struct inet_frags_ctl; -extern struct inet_frags_ctl nf_frags_ctl; + +#include <linux/sysctl.h> +extern struct ctl_table nf_ct_ipv6_sysctl_table[]; #endif /* _NF_CONNTRACK_IPV6_H*/ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 4ac5ab187c2a..857d89951790 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -223,8 +223,6 @@ extern void nf_conntrack_tcp_update(struct sk_buff *skb, /* Fake conntrack entry for untracked connections */ extern struct nf_conn nf_conntrack_untracked; -extern int nf_ct_no_defrag; - /* Iterate over all conntracks: if iter returns true, it's deleted. */ extern void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data); @@ -264,10 +262,5 @@ do { \ local_bh_enable(); \ } while (0) -extern int -nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size); -extern void -nf_conntrack_unregister_cache(u_int32_t features); - #endif /* __KERNEL__ */ #endif /* _NF_CONNTRACK_H */ diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index a532e7b5ed6a..7ad0828f05cf 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -30,16 +30,6 @@ extern void nf_conntrack_cleanup(void); extern int nf_conntrack_proto_init(void); extern void nf_conntrack_proto_fini(void); -extern int nf_conntrack_helper_init(void); -extern void nf_conntrack_helper_fini(void); - -struct nf_conntrack_l3proto; -extern struct nf_conntrack_l3proto *nf_ct_find_l3proto(u_int16_t pf); -/* Like above, but you already have conntrack read lock. */ -extern struct nf_conntrack_l3proto *__nf_ct_find_l3proto(u_int16_t l3proto); - -struct nf_conntrack_l4proto; - extern int nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, @@ -76,8 +66,6 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) return ret; } -extern void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb); - int print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto, diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index b47c04f12dbe..6c3fd254c28e 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -73,8 +73,8 @@ void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); nf_ct_expect_related. You will have to call put afterwards. */ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me); void nf_ct_expect_init(struct nf_conntrack_expect *, int, - union nf_conntrack_address *, - union nf_conntrack_address *, + union nf_inet_addr *, + union nf_inet_addr *, u_int8_t, __be16 *, __be16 *); void nf_ct_expect_put(struct nf_conntrack_expect *exp); int nf_ct_expect_related(struct nf_conntrack_expect *expect); diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index d7b2d5483a71..2f3af00643cf 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -58,4 +58,8 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct) { return nf_ct_ext_find(ct, NF_CT_EXT_HELPER); } + +extern int nf_conntrack_helper_init(void); +extern void nf_conntrack_helper_fini(void); + #endif /*_NF_CONNTRACK_HELPER_H*/ diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index 15888fc7b72d..d5526bcce147 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -42,9 +42,6 @@ struct nf_conntrack_l3proto int (*print_tuple)(struct seq_file *s, const struct nf_conntrack_tuple *); - /* Print out the private part of the conntrack. */ - int (*print_conntrack)(struct seq_file *s, const struct nf_conn *); - /* Returns verdict for packet, or -1 for invalid. */ int (*packet)(struct nf_conn *conntrack, const struct sk_buff *skb, @@ -73,7 +70,7 @@ struct nf_conntrack_l3proto #ifdef CONFIG_SYSCTL struct ctl_table_header *ctl_table_header; - struct ctl_table *ctl_table_path; + struct ctl_path *ctl_table_path; struct ctl_table *ctl_table; #endif /* CONFIG_SYSCTL */ diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index c48e390f4b0f..45cb17cdcfd0 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h @@ -10,6 +10,7 @@ #ifndef _NF_CONNTRACK_TUPLE_H #define _NF_CONNTRACK_TUPLE_H +#include <linux/netfilter/x_tables.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> /* A `tuple' is a structure containing the information to uniquely @@ -20,15 +21,7 @@ "non-manipulatable" lines, for the benefit of the NAT code. */ -#define NF_CT_TUPLE_L3SIZE 4 - -/* The l3 protocol-specific manipulable parts of the tuple: always in - network order! */ -union nf_conntrack_address { - u_int32_t all[NF_CT_TUPLE_L3SIZE]; - __be32 ip; - __be32 ip6[4]; -}; +#define NF_CT_TUPLE_L3SIZE ARRAY_SIZE(((union nf_inet_addr *)NULL)->all) /* The protocol-specific manipulable parts of the tuple: always in network order! */ @@ -57,7 +50,7 @@ union nf_conntrack_man_proto /* The manipulable part of the tuple. */ struct nf_conntrack_man { - union nf_conntrack_address u3; + union nf_inet_addr u3; union nf_conntrack_man_proto u; /* Layer 3 protocol */ u_int16_t l3num; @@ -70,7 +63,7 @@ struct nf_conntrack_tuple /* These are the parts of the tuple which are fixed. */ struct { - union nf_conntrack_address u3; + union nf_inet_addr u3; union { /* Add other protocols here. */ __be16 all; @@ -103,7 +96,7 @@ struct nf_conntrack_tuple struct nf_conntrack_tuple_mask { struct { - union nf_conntrack_address u3; + union nf_inet_addr u3; union nf_conntrack_man_proto u; } src; }; diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h new file mode 100644 index 000000000000..037e82403f91 --- /dev/null +++ b/include/net/netfilter/nf_log.h @@ -0,0 +1,59 @@ +#ifndef _NF_LOG_H +#define _NF_LOG_H + +/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will + * disappear once iptables is replaced with pkttables. Please DO NOT use them + * for any new code! */ +#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ +#define NF_LOG_TCPOPT 0x02 /* Log TCP options */ +#define NF_LOG_IPOPT 0x04 /* Log IP options */ +#define NF_LOG_UID 0x08 /* Log UID owning local socket */ +#define NF_LOG_MASK 0x0f + +#define NF_LOG_TYPE_LOG 0x01 +#define NF_LOG_TYPE_ULOG 0x02 + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +typedef void nf_logfn(unsigned int pf, + unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct nf_loginfo *li, + const char *prefix); + +struct nf_logger { + struct module *me; + nf_logfn *logfn; + char *name; +}; + +/* Function to register/unregister log function. */ +int nf_log_register(int pf, const struct nf_logger *logger); +void nf_log_unregister(const struct nf_logger *logger); +void nf_log_unregister_pf(int pf); + +/* Calls the registered backend logging function */ +void nf_log_packet(int pf, + unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct nf_loginfo *li, + const char *fmt, ...); + +#endif /* _NF_LOG_H */ diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index 6ae52f7c9f55..9dc1039ff78b 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -12,7 +12,8 @@ enum nf_nat_manip_type }; /* SRC manip occurs POST_ROUTING or LOCAL_IN */ -#define HOOK2MANIP(hooknum) ((hooknum) != NF_IP_POST_ROUTING && (hooknum) != NF_IP_LOCAL_IN) +#define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \ + (hooknum) != NF_INET_LOCAL_IN) #define IP_NAT_RANGE_MAP_IPS 1 #define IP_NAT_RANGE_PROTO_SPECIFIED 2 @@ -79,7 +80,7 @@ struct nf_conn_nat /* Set up the info structure to map into this range. */ extern unsigned int nf_nat_setup_info(struct nf_conn *ct, const struct nf_nat_range *range, - unsigned int hooknum); + enum nf_nat_manip_type maniptype); /* Is this tuple already taken? (not by us)*/ extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h index 04578bfe23e1..4aa0edbb5b96 100644 --- a/include/net/netfilter/nf_nat_protocol.h +++ b/include/net/netfilter/nf_nat_protocol.h @@ -46,21 +46,21 @@ struct nf_nat_protocol }; /* Protocol registration. */ -extern int nf_nat_protocol_register(struct nf_nat_protocol *proto); -extern void nf_nat_protocol_unregister(struct nf_nat_protocol *proto); +extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto); +extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto); -extern struct nf_nat_protocol *nf_nat_proto_find_get(u_int8_t protocol); -extern void nf_nat_proto_put(struct nf_nat_protocol *proto); +extern const struct nf_nat_protocol *nf_nat_proto_find_get(u_int8_t protocol); +extern void nf_nat_proto_put(const struct nf_nat_protocol *proto); /* Built-in protocols. */ -extern struct nf_nat_protocol nf_nat_protocol_tcp; -extern struct nf_nat_protocol nf_nat_protocol_udp; -extern struct nf_nat_protocol nf_nat_protocol_icmp; -extern struct nf_nat_protocol nf_nat_unknown_protocol; +extern const struct nf_nat_protocol nf_nat_protocol_tcp; +extern const struct nf_nat_protocol nf_nat_protocol_udp; +extern const struct nf_nat_protocol nf_nat_protocol_icmp; +extern const struct nf_nat_protocol nf_nat_unknown_protocol; extern int init_protocols(void) __init; extern void cleanup_protocols(void); -extern struct nf_nat_protocol *find_nat_proto(u_int16_t protonum); +extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum); extern int nf_nat_port_range_to_nlattr(struct sk_buff *skb, const struct nf_nat_range *range); diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h new file mode 100644 index 000000000000..d030044e9235 --- /dev/null +++ b/include/net/netfilter/nf_queue.h @@ -0,0 +1,34 @@ +#ifndef _NF_QUEUE_H +#define _NF_QUEUE_H + +/* Each queued (to userspace) skbuff has one of these. */ +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + + struct nf_hook_ops *elem; + int pf; + unsigned int hook; + struct net_device *indev; + struct net_device *outdev; + int (*okfn)(struct sk_buff *); +}; + +#define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) + +/* Packet queuing */ +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *entry, + unsigned int queuenum); + char *name; +}; + +extern int nf_register_queue_handler(int pf, + const struct nf_queue_handler *qh); +extern int nf_unregister_queue_handler(int pf, + const struct nf_queue_handler *qh); +extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh); +extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); + +#endif /* _NF_QUEUE_H */ diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h new file mode 100644 index 000000000000..65d594dffbff --- /dev/null +++ b/include/net/netfilter/xt_rateest.h @@ -0,0 +1,17 @@ +#ifndef _XT_RATEEST_H +#define _XT_RATEEST_H + +struct xt_rateest { + struct hlist_node list; + char name[IFNAMSIZ]; + unsigned int refcnt; + spinlock_t lock; + struct gnet_estimator params; + struct gnet_stats_rate_est rstats; + struct gnet_stats_basic bstats; +}; + +extern struct xt_rateest *xt_rateest_lookup(const char *name); +extern void xt_rateest_put(struct xt_rateest *est); + +#endif /* _XT_RATEEST_H */ diff --git a/include/net/netlabel.h b/include/net/netlabel.h index 2e5b2f6f9fa0..b3213c7c5309 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h @@ -67,7 +67,11 @@ * NetLabel NETLINK protocol */ -#define NETLBL_PROTO_VERSION 1 +/* NetLabel NETLINK protocol version + * 1: initial version + * 2: added static labels for unlabeled connections + */ +#define NETLBL_PROTO_VERSION 2 /* NetLabel NETLINK types/families */ #define NETLBL_NLTYPE_NONE 0 @@ -105,17 +109,49 @@ struct netlbl_dom_map; /* Domain mapping operations */ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info); -/* LSM security attributes */ +/* + * LSM security attributes + */ + +/** + * struct netlbl_lsm_cache - NetLabel LSM security attribute cache + * @refcount: atomic reference counter + * @free: LSM supplied function to free the cache data + * @data: LSM supplied cache data + * + * Description: + * This structure is provided for LSMs which wish to make use of the NetLabel + * caching mechanism to store LSM specific data/attributes in the NetLabel + * cache. If the LSM has to perform a lot of translation from the NetLabel + * security attributes into it's own internal representation then the cache + * mechanism can provide a way to eliminate some or all of that translation + * overhead on a cache hit. + * + */ struct netlbl_lsm_cache { atomic_t refcount; void (*free) (const void *data); void *data; }; -/* The catmap bitmap field MUST be a power of two in length and large + +/** + * struct netlbl_lsm_secattr_catmap - NetLabel LSM secattr category bitmap + * @startbit: the value of the lowest order bit in the bitmap + * @bitmap: the category bitmap + * @next: pointer to the next bitmap "node" or NULL + * + * Description: + * This structure is used to represent category bitmaps. Due to the large + * number of categories supported by most labeling protocols it is not + * practical to transfer a full bitmap internally so NetLabel adopts a sparse + * bitmap structure modeled after SELinux's ebitmap structure. + * The catmap bitmap field MUST be a power of two in length and large * enough to hold at least 240 bits. Special care (i.e. check the code!) * should be used when changing these values as the LSM implementation * probably has functions which rely on the sizes of these types to speed - * processing. */ + * processing. + * + */ #define NETLBL_CATMAP_MAPTYPE u64 #define NETLBL_CATMAP_MAPCNT 4 #define NETLBL_CATMAP_MAPSIZE (sizeof(NETLBL_CATMAP_MAPTYPE) * 8) @@ -127,22 +163,48 @@ struct netlbl_lsm_secattr_catmap { NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT]; struct netlbl_lsm_secattr_catmap *next; }; + +/** + * struct netlbl_lsm_secattr - NetLabel LSM security attributes + * @flags: indicate which attributes are contained in this structure + * @type: indicate the NLTYPE of the attributes + * @domain: the NetLabel LSM domain + * @cache: NetLabel LSM specific cache + * @attr.mls: MLS sensitivity label + * @attr.mls.cat: MLS category bitmap + * @attr.mls.lvl: MLS sensitivity level + * @attr.secid: LSM specific secid token + * + * Description: + * This structure is used to pass security attributes between NetLabel and the + * LSM modules. The flags field is used to specify which fields within the + * struct are valid and valid values can be created by bitwise OR'ing the + * NETLBL_SECATTR_* defines. The domain field is typically set by the LSM to + * specify domain specific configuration settings and is not usually used by + * NetLabel itself when returning security attributes to the LSM. + * + */ #define NETLBL_SECATTR_NONE 0x00000000 #define NETLBL_SECATTR_DOMAIN 0x00000001 #define NETLBL_SECATTR_CACHE 0x00000002 #define NETLBL_SECATTR_MLS_LVL 0x00000004 #define NETLBL_SECATTR_MLS_CAT 0x00000008 +#define NETLBL_SECATTR_SECID 0x00000010 #define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \ - NETLBL_SECATTR_MLS_CAT) + NETLBL_SECATTR_MLS_CAT | \ + NETLBL_SECATTR_SECID) struct netlbl_lsm_secattr { u32 flags; - + u32 type; char *domain; - - u32 mls_lvl; - struct netlbl_lsm_secattr_catmap *mls_cat; - struct netlbl_lsm_cache *cache; + union { + struct { + struct netlbl_lsm_secattr_catmap *cat; + u32 lvl; + } mls; + u32 secid; + } attr; }; /* @@ -231,10 +293,7 @@ static inline void netlbl_secattr_catmap_free( */ static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr) { - secattr->flags = 0; - secattr->domain = NULL; - secattr->mls_cat = NULL; - secattr->cache = NULL; + memset(secattr, 0, sizeof(*secattr)); } /** @@ -248,11 +307,11 @@ static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr) */ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) { - if (secattr->cache) - netlbl_secattr_cache_free(secattr->cache); kfree(secattr->domain); - if (secattr->mls_cat) - netlbl_secattr_catmap_free(secattr->mls_cat); + if (secattr->flags & NETLBL_SECATTR_CACHE) + netlbl_secattr_cache_free(secattr->cache); + if (secattr->flags & NETLBL_SECATTR_MLS_CAT) + netlbl_secattr_catmap_free(secattr->attr.mls.cat); } /** @@ -300,7 +359,7 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap, gfp_t flags); /* - * LSM protocol operations + * LSM protocol operations (NetLabel LSM/kernel API) */ int netlbl_enabled(void); int netlbl_sock_setattr(struct sock *sk, @@ -308,6 +367,7 @@ int netlbl_sock_setattr(struct sock *sk, int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr); int netlbl_skbuff_getattr(const struct sk_buff *skb, + u16 family, struct netlbl_lsm_secattr *secattr); void netlbl_skbuff_err(struct sk_buff *skb, int error); @@ -360,6 +420,7 @@ static inline int netlbl_sock_getattr(struct sock *sk, return -ENOSYS; } static inline int netlbl_skbuff_getattr(const struct sk_buff *skb, + u16 family, struct netlbl_lsm_secattr *secattr) { return -ENOSYS; diff --git a/include/net/netlink.h b/include/net/netlink.h index 9298218c07f9..a5506c42f03c 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -91,6 +91,7 @@ * nla_reserve_nohdr(skb, len) reserve room for an attribute w/o hdr * nla_put(skb, type, len, data) add attribute to skb * nla_put_nohdr(skb, len, data) add attribute w/o hdr + * nla_append(skb, len, data) append data to skb * * Attribute Construction for Basic Types: * nla_put_u8(skb, type, value) add u8 attribute to skb @@ -217,6 +218,7 @@ struct nla_policy { */ struct nl_info { struct nlmsghdr *nlh; + struct net *nl_net; u32 pid; }; @@ -253,6 +255,8 @@ extern int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data); extern int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data); +extern int nla_append(struct sk_buff *skb, int attrlen, + const void *data); /************************************************************************** * Netlink Messages @@ -862,7 +866,7 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, #define NLA_PUT(skb, attrtype, attrlen, data) \ do { \ - if (nla_put(skb, attrtype, attrlen, data) < 0) \ + if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \ goto nla_put_failure; \ } while(0) @@ -881,6 +885,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, #define NLA_PUT_LE16(skb, attrtype, value) \ NLA_PUT_TYPE(skb, __le16, attrtype, value) +#define NLA_PUT_BE16(skb, attrtype, value) \ + NLA_PUT_TYPE(skb, __be16, attrtype, value) + #define NLA_PUT_U32(skb, attrtype, value) \ NLA_PUT_TYPE(skb, u32, attrtype, value) @@ -927,6 +934,15 @@ static inline u16 nla_get_u16(struct nlattr *nla) } /** + * nla_get_be16 - return payload of __be16 attribute + * @nla: __be16 netlink attribute + */ +static inline __be16 nla_get_be16(struct nlattr *nla) +{ + return *(__be16 *) nla_data(nla); +} + +/** * nla_get_le16 - return payload of __le16 attribute * @nla: __le16 netlink attribute */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h new file mode 100644 index 000000000000..15a0b052df22 --- /dev/null +++ b/include/net/netns/ipv4.h @@ -0,0 +1,31 @@ +/* + * ipv4 in net namespaces + */ + +#ifndef __NETNS_IPV4_H__ +#define __NETNS_IPV4_H__ + +#include <net/inet_frag.h> + +struct ctl_table_header; +struct ipv4_devconf; +struct fib_rules_ops; +struct hlist_head; +struct sock; + +struct netns_ipv4 { +#ifdef CONFIG_SYSCTL + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; +#endif + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; +#ifdef CONFIG_IP_MULTIPLE_TABLES + struct fib_rules_ops *rules_ops; +#endif + struct hlist_head *fib_table_hash; + struct sock *fibnl; + + struct netns_frags frags; +}; +#endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h new file mode 100644 index 000000000000..187c4248df22 --- /dev/null +++ b/include/net/netns/ipv6.h @@ -0,0 +1,35 @@ +/* + * ipv6 in net namespaces + */ + +#include <net/inet_frag.h> + +#ifndef __NETNS_IPV6_H__ +#define __NETNS_IPV6_H__ + +struct ctl_table_header; + +struct netns_sysctl_ipv6 { +#ifdef CONFIG_SYSCTL + struct ctl_table_header *table; + struct ctl_table_header *frags_hdr; +#endif + int bindv6only; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + int icmpv6_time; +}; + +struct netns_ipv6 { + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct netns_frags frags; +}; +#endif diff --git a/include/net/netns/packet.h b/include/net/netns/packet.h new file mode 100644 index 000000000000..637daf698884 --- /dev/null +++ b/include/net/netns/packet.h @@ -0,0 +1,15 @@ +/* + * Packet network namespace + */ +#ifndef __NETNS_PACKET_H__ +#define __NETNS_PACKET_H__ + +#include <linux/list.h> +#include <linux/spinlock.h> + +struct netns_packet { + rwlock_t sklist_lock; + struct hlist_head sklist; +}; + +#endif /* __NETNS_PACKET_H__ */ diff --git a/include/net/netns/unix.h b/include/net/netns/unix.h new file mode 100644 index 000000000000..284649d4dfb4 --- /dev/null +++ b/include/net/netns/unix.h @@ -0,0 +1,13 @@ +/* + * Unix network namespace + */ +#ifndef __NETNS_UNIX_H__ +#define __NETNS_UNIX_H__ + +struct ctl_table_header; +struct netns_unix { + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +#endif /* __NETNS_UNIX_H__ */ diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index f285de69c615..8716eb757d51 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -2,7 +2,6 @@ #define __NET_PKT_CLS_H #include <linux/pkt_cls.h> -#include <net/net_namespace.h> #include <net/sch_generic.h> #include <net/act_api.h> @@ -130,8 +129,8 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, return 0; } -extern int tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb, - struct rtattr *rate_tlv, struct tcf_exts *exts, +extern int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, struct tcf_ext_map *map); extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, @@ -248,7 +247,7 @@ struct tcf_ematch_ops extern int tcf_em_register(struct tcf_ematch_ops *); extern int tcf_em_unregister(struct tcf_ematch_ops *); -extern int tcf_em_tree_validate(struct tcf_proto *, struct rtattr *, +extern int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, struct tcf_ematch_tree *); extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); @@ -336,10 +335,12 @@ static inline int tcf_valid_offset(const struct sk_buff *skb, } #ifdef CONFIG_NET_CLS_IND +#include <net/net_namespace.h> + static inline int -tcf_change_indev(struct tcf_proto *tp, char *indev, struct rtattr *indev_tlv) +tcf_change_indev(struct tcf_proto *tp, char *indev, struct nlattr *indev_tlv) { - if (rtattr_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) + if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) return -EINVAL; return 0; } diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index ab61809a9616..46fb4d80c74a 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -77,7 +77,7 @@ extern int unregister_qdisc(struct Qdisc_ops *qops); extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, - struct rtattr *tab); + struct nlattr *tab); extern void qdisc_put_rtab(struct qdisc_rate_table *tab); extern void __qdisc_run(struct net_device *dev); diff --git a/include/net/protocol.h b/include/net/protocol.h index 1166ffb4b3ec..ad8c584233a6 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -102,7 +102,7 @@ extern void inet_unregister_protosw(struct inet_protosw *p); #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) extern int inet6_add_protocol(struct inet6_protocol *prot, unsigned char num); extern int inet6_del_protocol(struct inet6_protocol *prot, unsigned char num); -extern void inet6_register_protosw(struct inet_protosw *p); +extern int inet6_register_protosw(struct inet_protosw *p); extern void inet6_unregister_protosw(struct inet_protosw *p); #endif diff --git a/include/net/raw.h b/include/net/raw.h index e4af59781949..cca81d8b2d8b 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -22,27 +22,39 @@ extern struct proto raw_prot; -extern void raw_err(struct sock *, struct sk_buff *, u32 info); -extern int raw_rcv(struct sock *, struct sk_buff *); - -/* Note: v4 ICMP wants to get at this stuff, if you change the - * hashing mechanism, make sure you update icmp.c as well. - */ -#define RAWV4_HTABLE_SIZE MAX_INET_PROTOS -extern struct hlist_head raw_v4_htable[RAWV4_HTABLE_SIZE]; - -extern rwlock_t raw_v4_lock; +void raw_icmp_error(struct sk_buff *, int, u32); +int raw_local_deliver(struct sk_buff *, int); +extern int raw_rcv(struct sock *, struct sk_buff *); -extern struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num, - __be32 raddr, __be32 laddr, - int dif); +#define RAW_HTABLE_SIZE MAX_INET_PROTOS -extern int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash); +struct raw_hashinfo { + rwlock_t lock; + struct hlist_head ht[RAW_HTABLE_SIZE]; +}; #ifdef CONFIG_PROC_FS extern int raw_proc_init(void); extern void raw_proc_exit(void); + +struct raw_iter_state { + struct seq_net_private p; + int bucket; + unsigned short family; + struct raw_hashinfo *h; +}; + +#define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private) +void *raw_seq_start(struct seq_file *seq, loff_t *pos); +void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos); +void raw_seq_stop(struct seq_file *seq, void *v); +int raw_seq_open(struct inode *ino, struct file *file, struct raw_hashinfo *h, + unsigned short family); + #endif +void raw_hash_sk(struct sock *sk, struct raw_hashinfo *h); +void raw_unhash_sk(struct sock *sk, struct raw_hashinfo *h); + #endif /* _RAW_H */ diff --git a/include/net/rawv6.h b/include/net/rawv6.h index a5819891d525..8a22599f26ba 100644 --- a/include/net/rawv6.h +++ b/include/net/rawv6.h @@ -5,26 +5,13 @@ #include <net/protocol.h> -#define RAWV6_HTABLE_SIZE MAX_INET_PROTOS -extern struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE]; -extern rwlock_t raw_v6_lock; - -extern int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr); - -extern struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num, - struct in6_addr *loc_addr, struct in6_addr *rmt_addr, - int dif); +void raw6_icmp_error(struct sk_buff *, int nexthdr, + int type, int code, int inner_offset, __be32); +int raw6_local_deliver(struct sk_buff *, int); extern int rawv6_rcv(struct sock *sk, struct sk_buff *skb); - -extern void rawv6_err(struct sock *sk, - struct sk_buff *skb, - struct inet6_skb_parm *opt, - int type, int code, - int offset, __be32 info); - #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) int rawv6_mh_filter_register(int (*filter)(struct sock *sock, struct sk_buff *skb)); diff --git a/include/net/route.h b/include/net/route.h index 59b0b19205a2..4eabf008413b 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -33,6 +33,7 @@ #include <linux/ip.h> #include <linux/cache.h> #include <linux/security.h> +#include <net/sock.h> #ifndef __KERNEL__ #warning This file is not supposed to be used outside of kernel. @@ -110,16 +111,17 @@ extern int ip_rt_init(void); extern void ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw, __be32 src, struct net_device *dev); extern void rt_cache_flush(int how); -extern int __ip_route_output_key(struct rtable **, const struct flowi *flp); -extern int ip_route_output_key(struct rtable **, struct flowi *flp); -extern int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags); +extern int __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp); +extern int ip_route_output_key(struct net *, struct rtable **, struct flowi *flp); +extern int ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags); extern int ip_route_input(struct sk_buff*, __be32 dst, __be32 src, u8 tos, struct net_device *devin); -extern unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu); +extern unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, unsigned short new_mtu); extern void ip_rt_send_redirect(struct sk_buff *skb); -extern unsigned inet_addr_type(__be32 addr); +extern unsigned inet_addr_type(struct net *net, __be32 addr); +extern unsigned inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); extern void ip_rt_multicast_event(struct in_device *); -extern int ip_rt_ioctl(unsigned int cmd, void __user *arg); +extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); extern void ip_rt_get_source(u8 *src, struct rtable *rt); extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb); @@ -156,8 +158,9 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst, .dport = dport } } }; int err; + struct net *net = sk->sk_net; if (!dst || !src) { - err = __ip_route_output_key(rp, &fl); + err = __ip_route_output_key(net, rp, &fl); if (err) return err; fl.fl4_dst = (*rp)->rt_dst; @@ -166,7 +169,7 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst, *rp = NULL; } security_sk_classify_flow(sk, &fl); - return ip_route_output_flow(rp, &fl, sk, flags); + return ip_route_output_flow(net, rp, &fl, sk, flags); } static inline int ip_route_newports(struct rtable **rp, u8 protocol, @@ -183,7 +186,7 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol, ip_rt_put(*rp); *rp = NULL; security_sk_classify_flow(sk, &fl); - return ip_route_output_flow(rp, &fl, sk, 0); + return ip_route_output_flow(sk->sk_net, rp, &fl, sk, 0); } return 0; } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 4c3b35153c37..ab502ec1c61c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -66,7 +66,7 @@ struct Qdisc_class_ops unsigned long (*get)(struct Qdisc *, u32 classid); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32, u32, - struct rtattr **, unsigned long *); + struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker * arg); @@ -86,7 +86,7 @@ struct Qdisc_class_ops struct Qdisc_ops { struct Qdisc_ops *next; - struct Qdisc_class_ops *cl_ops; + const struct Qdisc_class_ops *cl_ops; char id[IFNAMSIZ]; int priv_size; @@ -95,10 +95,10 @@ struct Qdisc_ops int (*requeue)(struct sk_buff *, struct Qdisc *); unsigned int (*drop)(struct Qdisc *); - int (*init)(struct Qdisc *, struct rtattr *arg); + int (*init)(struct Qdisc *, struct nlattr *arg); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); - int (*change)(struct Qdisc *, struct rtattr *arg); + int (*change)(struct Qdisc *, struct nlattr *arg); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); @@ -126,7 +126,7 @@ struct tcf_proto_ops unsigned long (*get)(struct tcf_proto*, u32 handle); void (*put)(struct tcf_proto*, unsigned long); int (*change)(struct tcf_proto*, unsigned long, - u32 handle, struct rtattr **, + u32 handle, struct nlattr **, unsigned long *); int (*delete)(struct tcf_proto*, unsigned long); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h new file mode 100644 index 000000000000..ba75c67cb992 --- /dev/null +++ b/include/net/sctp/checksum.h @@ -0,0 +1,78 @@ +/* SCTP kernel reference Implementation + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2003 International Business Machines, Corp. + * + * This file is part of the SCTP kernel reference Implementation + * + * SCTP Checksum functions + * + * The SCTP reference implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * The SCTP reference implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <lksctp-developers@lists.sourceforge.net> + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * Dinakaran Joseph + * Jon Grimm <jgrimm@us.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + * + * Rewritten to use libcrc32c by: + * Vlad Yasevich <vladislav.yasevich@hp.com> + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ + +#include <linux/types.h> +#include <net/sctp/sctp.h> +#include <linux/crc32c.h> + +static inline __u32 sctp_start_cksum(__u8 *buffer, __u16 length) +{ + __u32 crc = ~(__u32) 0; + __u8 zero[sizeof(__u32)] = {0}; + + /* Optimize this routine to be SCTP specific, knowing how + * to skip the checksum field of the SCTP header. + */ + + /* Calculate CRC up to the checksum. */ + crc = crc32c(crc, buffer, sizeof(struct sctphdr) - sizeof(__u32)); + + /* Skip checksum field of the header. */ + crc = crc32c(crc, zero, sizeof(__u32)); + + /* Calculate the rest of the CRC. */ + crc = crc32c(crc, &buffer[sizeof(struct sctphdr)], + length - sizeof(struct sctphdr)); + return crc; +} + +static inline __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32) +{ + return crc32c(crc32, buffer, length); +} + +static inline __u32 sctp_end_cksum(__u32 crc32) +{ + return ntohl(~crc32); +} diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 05f22a6afbcd..fefcba67bd1e 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -365,36 +365,12 @@ typedef enum { * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ -#define IS_IPV4_UNUSABLE_ADDRESS(a) \ - ((htonl(INADDR_BROADCAST) == *a) || \ - (MULTICAST(*a)) || \ - (((unsigned char *)(a))[0] == 0) || \ - ((((unsigned char *)(a))[0] == 198) && \ - (((unsigned char *)(a))[1] == 18) && \ - (((unsigned char *)(a))[2] == 0)) || \ - ((((unsigned char *)(a))[0] == 192) && \ - (((unsigned char *)(a))[1] == 88) && \ - (((unsigned char *)(a))[2] == 99))) - -/* IPv4 Link-local addresses: 169.254.0.0/16. */ -#define IS_IPV4_LINK_ADDRESS(a) \ - ((((unsigned char *)(a))[0] == 169) && \ - (((unsigned char *)(a))[1] == 254)) - -/* RFC 1918 "Address Allocation for Private Internets" defines the IPv4 - * private address space as the following: - * - * 10.0.0.0 - 10.255.255.255 (10/8 prefix) - * 172.16.0.0.0 - 172.31.255.255 (172.16/12 prefix) - * 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) - */ -#define IS_IPV4_PRIVATE_ADDRESS(a) \ - ((((unsigned char *)(a))[0] == 10) || \ - ((((unsigned char *)(a))[0] == 172) && \ - (((unsigned char *)(a))[1] >= 16) && \ - (((unsigned char *)(a))[1] < 32)) || \ - ((((unsigned char *)(a))[0] == 192) && \ - (((unsigned char *)(a))[1] == 168))) +#define IS_IPV4_UNUSABLE_ADDRESS(a) \ + ((htonl(INADDR_BROADCAST) == a) || \ + ipv4_is_multicast(a) || \ + ipv4_is_zeronet(a) || \ + ipv4_is_test_198(a) || \ + ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ #define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 34318a33a94c..4977b0a81535 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -150,13 +150,6 @@ int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg); int sctp_primitive_ASCONF(struct sctp_association *, void *arg); /* - * sctp/crc32c.c - */ -__u32 sctp_start_cksum(__u8 *ptr, __u16 count); -__u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum); -__u32 sctp_end_cksum(__u32 cksum); - -/* * sctp/input.c */ int sctp_rcv(struct sk_buff *skb); @@ -470,8 +463,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) skb->destructor = sctp_sock_rfree; atomic_add(event->rmem_len, &sk->sk_rmem_alloc); /* - * This mimics the behavior of - * sk_stream_set_owner_r + * This mimics the behavior of skb_set_owner_r */ sk->sk_forward_alloc -= event->rmem_len; } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index bb965742b64e..4d591bfce452 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -451,6 +451,7 @@ union sctp_params { struct sctp_random_param *random; struct sctp_chunks_param *chunks; struct sctp_hmac_algo_param *hmac_algo; + struct sctp_addip_param *addip; }; /* RFC 2960. Section 3.3.5 Heartbeat. @@ -743,6 +744,7 @@ struct sctp_chunk { __u8 tsn_missing_report; /* Data chunk missing counter. */ __u8 data_accepted; /* At least 1 chunk in this packet accepted */ __u8 auth; /* IN: was auth'ed | OUT: needs auth */ + __u8 has_asconf; /* IN: have seen an asconf before */ }; void sctp_chunk_hold(struct sctp_chunk *); @@ -758,12 +760,18 @@ void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *, union sctp_addr *); const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); +enum { + SCTP_ADDR_NEW, /* new address added to assoc/ep */ + SCTP_ADDR_SRC, /* address can be used as source */ + SCTP_ADDR_DEL, /* address about to be deleted */ +}; + /* This is a structure for holding either an IPv6 or an IPv4 address. */ struct sctp_sockaddr_entry { struct list_head list; struct rcu_head rcu; union sctp_addr a; - __u8 use_as_src; + __u8 state; __u8 valid; }; @@ -1188,10 +1196,12 @@ int sctp_bind_addr_dup(struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, gfp_t gfp); int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, - __u8 use_as_src, gfp_t gfp); + __u8 addr_state, gfp_t gfp); int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, struct sctp_sock *); +int sctp_bind_addr_state(const struct sctp_bind_addr *bp, + const union sctp_addr *addr); union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, const union sctp_addr *addrs, int addrcnt, @@ -1784,20 +1794,16 @@ struct sctp_association { */ struct sctp_chunk *addip_last_asconf; - /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. + /* ADDIP Section 5.2 Upon reception of an ASCONF Chunk. * - * IMPLEMENTATION NOTE: As an optimization a receiver may wish - * to save the last ASCONF-ACK for some predetermined period - * of time and instead of re-processing the ASCONF (with the - * same serial number) it may just re-transmit the - * ASCONF-ACK. It may wish to use the arrival of a new serial - * number to discard the previously saved ASCONF-ACK or any - * other means it may choose to expire the saved ASCONF-ACK. + * This is needed to implement itmes E1 - E4 of the updated + * spec. Here is the justification: * - * [This is our saved ASCONF-ACK. We invalidate it when a new - * ASCONF serial number arrives.] + * Since the peer may bundle multiple ASCONF chunks toward us, + * we now need the ability to cache multiple ACKs. The section + * describes in detail how they are cached and cleaned up. */ - struct sctp_chunk *addip_last_asconf_ack; + struct list_head asconf_ack_list; /* These ASCONF chunks are waiting to be sent. * @@ -1938,12 +1944,19 @@ void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned); void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned); void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); +void sctp_assoc_del_nonprimary_peers(struct sctp_association *, + struct sctp_transport *); int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, gfp_t); int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, struct sctp_cookie*, gfp_t gfp); int sctp_assoc_set_id(struct sctp_association *, gfp_t); +void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc); +struct sctp_chunk *sctp_assoc_lookup_asconf_ack( + const struct sctp_association *asoc, + __be32 serial); + int sctp_cmp_addr_exact(const union sctp_addr *ss1, const union sctp_addr *ss2); diff --git a/include/net/snmp.h b/include/net/snmp.h index ea206bff0dc4..ce2f48507510 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -23,6 +23,7 @@ #include <linux/cache.h> #include <linux/snmp.h> +#include <linux/smp.h> /* * Mibs are stored in array of unsigned long. @@ -117,6 +118,11 @@ struct linux_mib { unsigned long mibs[LINUX_MIB_MAX]; }; +/* Linux Xfrm */ +#define LINUX_MIB_XFRMMAX __LINUX_MIB_XFRMMAX +struct linux_xfrm_mib { + unsigned long mibs[LINUX_MIB_XFRMMAX]; +}; /* * FIXME: On x86 and some other CPUs the split into user and softirq parts @@ -134,17 +140,27 @@ struct linux_mib { #define SNMP_INC_STATS_BH(mib, field) \ (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) -#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \ - (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++) #define SNMP_INC_STATS_USER(mib, field) \ - (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++) + do { \ + per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \ + put_cpu(); \ + } while (0) #define SNMP_INC_STATS(mib, field) \ - (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++) + do { \ + per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \ + put_cpu(); \ + } while (0) #define SNMP_DEC_STATS(mib, field) \ - (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--) + do { \ + per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \ + put_cpu(); \ + } while (0) #define SNMP_ADD_STATS_BH(mib, field, addend) \ (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend) #define SNMP_ADD_STATS_USER(mib, field, addend) \ - (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend) + do { \ + per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \ + put_cpu(); \ + } while (0) #endif diff --git a/include/net/sock.h b/include/net/sock.h index 6e1542da33a1..902324488d0f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -47,6 +47,7 @@ #include <linux/module.h> #include <linux/lockdep.h> #include <linux/netdevice.h> +#include <linux/pcounter.h> #include <linux/skbuff.h> /* struct sk_buff */ #include <linux/mm.h> #include <linux/security.h> @@ -56,7 +57,6 @@ #include <asm/atomic.h> #include <net/dst.h> #include <net/checksum.h> -#include <net/net_namespace.h> /* * This structure really needs to be cleaned up. @@ -94,6 +94,7 @@ typedef struct { struct sock; struct proto; +struct net; /** * struct sock_common - minimal network layer representation of sockets @@ -145,7 +146,8 @@ struct sock_common { * @sk_forward_alloc: space allocated forward * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes - * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings + * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, + * %SO_OOBINLINE settings * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) @@ -153,9 +155,12 @@ struct sock_common { * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct * @sk_error_queue: rarely used - * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) + * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, + * IPV6_ADDRFORM for instance) * @sk_err: last error - * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' + * @sk_err_soft: errors that don't cause failure but are the cause of a + * persistent failure not just 'timed out' + * @sk_drops: raw drops counter * @sk_ack_backlog: current listen backlog * @sk_max_ack_backlog: listen backlog set in listen() * @sk_priority: %SO_PRIORITY setting @@ -239,6 +244,7 @@ struct sock { rwlock_t sk_callback_lock; int sk_err, sk_err_soft; + atomic_t sk_drops; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; @@ -439,7 +445,7 @@ static inline int sk_acceptq_is_full(struct sock *sk) */ static inline int sk_stream_min_wspace(struct sock *sk) { - return sk->sk_wmem_queued / 2; + return sk->sk_wmem_queued >> 1; } static inline int sk_stream_wspace(struct sock *sk) @@ -454,25 +460,6 @@ static inline int sk_stream_memory_free(struct sock *sk) return sk->sk_wmem_queued < sk->sk_sndbuf; } -extern void sk_stream_rfree(struct sk_buff *skb); - -static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) -{ - skb->sk = sk; - skb->destructor = sk_stream_rfree; - atomic_add(skb->truesize, &sk->sk_rmem_alloc); - sk->sk_forward_alloc -= skb->truesize; -} - -static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) -{ - skb_truesize_check(skb); - sock_set_flag(sk, SOCK_QUEUE_SHRUNK); - sk->sk_wmem_queued -= skb->truesize; - sk->sk_forward_alloc += skb->truesize; - __kfree_skb(skb); -} - /* The per-socket spinlock must be held here. */ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) { @@ -560,14 +547,11 @@ struct proto { void (*unhash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); -#ifdef CONFIG_SMP /* Keeping track of sockets in use */ - void (*inuse_add)(struct proto *prot, int inc); - int (*inuse_getval)(const struct proto *prot); - int *inuse_ptr; -#else - int inuse; +#ifdef CONFIG_PROC_FS + struct pcounter inuse; #endif + /* Memory pressure */ void (*enter_memory_pressure)(void); atomic_t *memory_allocated; /* Current allocated memory. */ @@ -575,7 +559,7 @@ struct proto { /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. - * All the sk_stream_mem_schedule() is of this nature: accounting + * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ int *memory_pressure; @@ -602,36 +586,6 @@ struct proto { #endif }; -/* - * Special macros to let protos use a fast version of inuse{get|add} - * using a static percpu variable per proto instead of an allocated one, - * saving one dereference. - * This might be changed if/when dynamic percpu vars become fast. - */ -#ifdef CONFIG_SMP -# define DEFINE_PROTO_INUSE(NAME) \ -static DEFINE_PER_CPU(int, NAME##_inuse); \ -static void NAME##_inuse_add(struct proto *prot, int inc) \ -{ \ - __get_cpu_var(NAME##_inuse) += inc; \ -} \ - \ -static int NAME##_inuse_getval(const struct proto *prot)\ -{ \ - int res = 0, cpu; \ - \ - for_each_possible_cpu(cpu) \ - res += per_cpu(NAME##_inuse, cpu); \ - return res; \ -} -# define REF_PROTO_INUSE(NAME) \ - .inuse_add = NAME##_inuse_add, \ - .inuse_getval = NAME##_inuse_getval, -#else -# define DEFINE_PROTO_INUSE(NAME) -# define REF_PROTO_INUSE(NAME) -#endif - extern int proto_register(struct proto *prot, int alloc_slab); extern void proto_unregister(struct proto *prot); @@ -660,33 +614,42 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) #define sk_refcnt_debug_release(sk) do { } while (0) #endif /* SOCK_REFCNT_DEBUG */ + +#ifdef CONFIG_PROC_FS +# define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME) +# define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse) /* Called with local bh disabled */ -static __inline__ void sock_prot_inc_use(struct proto *prot) +static inline void sock_prot_inuse_add(struct proto *prot, int inc) { -#ifdef CONFIG_SMP - prot->inuse_add(prot, 1); -#else - prot->inuse++; -#endif + pcounter_add(&prot->inuse, inc); } - -static __inline__ void sock_prot_dec_use(struct proto *prot) +static inline int sock_prot_inuse_init(struct proto *proto) { -#ifdef CONFIG_SMP - prot->inuse_add(prot, -1); -#else - prot->inuse--; -#endif + return pcounter_alloc(&proto->inuse); } - -static __inline__ int sock_prot_inuse(struct proto *proto) +static inline int sock_prot_inuse_get(struct proto *proto) { -#ifdef CONFIG_SMP - return proto->inuse_getval(proto); + return pcounter_getval(&proto->inuse); +} +static inline void sock_prot_inuse_free(struct proto *proto) +{ + pcounter_free(&proto->inuse); +} #else - return proto->inuse; -#endif +# define DEFINE_PROTO_INUSE(NAME) +# define REF_PROTO_INUSE(NAME) +static void inline sock_prot_inuse_add(struct proto *prot, int inc) +{ +} +static int inline sock_prot_inuse_init(struct proto *proto) +{ + return 0; } +static void inline sock_prot_inuse_free(struct proto *proto) +{ +} +#endif + /* With per-bucket locks this operation is not-atomic, so that * this version is not worse. @@ -750,32 +713,81 @@ static inline struct inode *SOCK_INODE(struct socket *socket) return &container_of(socket, struct socket_alloc, socket)->vfs_inode; } -extern void __sk_stream_mem_reclaim(struct sock *sk); -extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); +/* + * Functions for memory accounting + */ +extern int __sk_mem_schedule(struct sock *sk, int size, int kind); +extern void __sk_mem_reclaim(struct sock *sk); -#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) +#define SK_MEM_QUANTUM ((int)PAGE_SIZE) +#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) +#define SK_MEM_SEND 0 +#define SK_MEM_RECV 1 -static inline int sk_stream_pages(int amt) +static inline int sk_mem_pages(int amt) { - return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM); + return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; } -static inline void sk_stream_mem_reclaim(struct sock *sk) +static inline int sk_has_account(struct sock *sk) { - if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) - __sk_stream_mem_reclaim(sk); + /* return true if protocol supports memory accounting */ + return !!sk->sk_prot->memory_allocated; } -static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) +static inline int sk_wmem_schedule(struct sock *sk, int size) { - return (int)skb->truesize <= sk->sk_forward_alloc || - sk_stream_mem_schedule(sk, skb->truesize, 1); + if (!sk_has_account(sk)) + return 1; + return size <= sk->sk_forward_alloc || + __sk_mem_schedule(sk, size, SK_MEM_SEND); } -static inline int sk_stream_wmem_schedule(struct sock *sk, int size) +static inline int sk_rmem_schedule(struct sock *sk, int size) { + if (!sk_has_account(sk)) + return 1; return size <= sk->sk_forward_alloc || - sk_stream_mem_schedule(sk, size, 0); + __sk_mem_schedule(sk, size, SK_MEM_RECV); +} + +static inline void sk_mem_reclaim(struct sock *sk) +{ + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) + __sk_mem_reclaim(sk); +} + +static inline void sk_mem_reclaim_partial(struct sock *sk) +{ + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc > SK_MEM_QUANTUM) + __sk_mem_reclaim(sk); +} + +static inline void sk_mem_charge(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc -= size; +} + +static inline void sk_mem_uncharge(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc += size; +} + +static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) +{ + skb_truesize_check(skb); + sock_set_flag(sk, SOCK_QUEUE_SHRUNK); + sk->sk_wmem_queued -= skb->truesize; + sk_mem_uncharge(sk, skb->truesize); + __kfree_skb(skb); } /* Used by processes to "lock" a socket state, so that @@ -812,14 +824,14 @@ do { \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) -extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass)); +extern void lock_sock_nested(struct sock *sk, int subclass); static inline void lock_sock(struct sock *sk) { lock_sock_nested(sk, 0); } -extern void FASTCALL(release_sock(struct sock *sk)); +extern void release_sock(struct sock *sk); /* BH context may only use the following locking interface. */ #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) @@ -1113,12 +1125,6 @@ static inline int sk_can_gso(const struct sock *sk) extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); -static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) -{ - sk->sk_wmem_queued += skb->truesize; - sk->sk_forward_alloc -= skb->truesize; -} - static inline int skb_copy_to_page(struct sock *sk, char __user *from, struct sk_buff *skb, struct page *page, int off, int copy) @@ -1138,7 +1144,7 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from, skb->data_len += copy; skb->truesize += copy; sk->sk_wmem_queued += copy; - sk->sk_forward_alloc -= copy; + sk_mem_charge(sk, copy); return 0; } @@ -1164,6 +1170,7 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) skb->sk = sk; skb->destructor = sock_rfree; atomic_add(skb->truesize, &sk->sk_rmem_alloc); + sk_mem_charge(sk, skb->truesize); } extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, @@ -1225,45 +1232,12 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) static inline void sk_stream_moderate_sndbuf(struct sock *sk) { if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { - sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); + sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); } } -static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, - int size, int mem, - gfp_t gfp) -{ - struct sk_buff *skb; - - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - - skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); - if (skb) { - skb->truesize += mem; - if (sk_stream_wmem_schedule(sk, skb->truesize)) { - /* - * Make sure that we have exactly size bytes - * available to the caller, no more, no less. - */ - skb_reserve(skb, skb_tailroom(skb) - size); - return skb; - } - __kfree_skb(skb); - } else { - sk->sk_prot->enter_memory_pressure(); - sk_stream_moderate_sndbuf(sk); - } - return NULL; -} - -static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, - int size, - gfp_t gfp) -{ - return sk_stream_alloc_pskb(sk, size, 0, gfp); -} +struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); static inline struct page *sk_stream_alloc_page(struct sock *sk) { @@ -1282,7 +1256,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) */ static inline int sock_writeable(const struct sock *sk) { - return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); + return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); } static inline gfp_t gfp_any(void) @@ -1391,23 +1365,11 @@ extern int net_msg_warn; lock_sock(sk); \ } -static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) -{ - if (valbool) - sock_set_flag(sk, bit); - else - sock_reset_flag(sk, bit); -} - extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; extern void sk_init(void); -#ifdef CONFIG_SYSCTL -extern struct ctl_table core_table[]; -#endif - extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; diff --git a/include/net/tcp.h b/include/net/tcp.h index cb5b033e0e59..7de4ea3a04d9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -309,6 +309,9 @@ extern int tcp_twsk_unique(struct sock *sk, extern void tcp_twsk_destructor(struct sock *sk); +extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, unsigned int flags); + static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) { @@ -575,10 +578,6 @@ struct tcp_skb_cb { #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) -#define TCPCB_URG 0x20 /* Urgent pointer advanced here */ - -#define TCPCB_AT_TAIL (TCPCB_URG) - __u16 urg_ptr; /* Valid w/URG flags is set. */ __u32 ack_seq; /* Sequence number ACK'd */ }; @@ -649,7 +648,7 @@ struct tcp_congestion_ops { /* lower bound for congestion window (optional) */ u32 (*min_cwnd)(const struct sock *sk); /* do new cwnd calculation (required) */ - void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight, int good_ack); + void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); /* call before changing ca_state (optional) */ void (*set_state)(struct sock *sk, u8 new_state); /* call when cwnd event occurs (optional) */ @@ -680,7 +679,7 @@ extern void tcp_slow_start(struct tcp_sock *tp); extern struct tcp_congestion_ops tcp_init_congestion_ops; extern u32 tcp_reno_ssthresh(struct sock *sk); -extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag); +extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); extern u32 tcp_reno_min_cwnd(const struct sock *sk); extern struct tcp_congestion_ops tcp_reno; @@ -782,26 +781,12 @@ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) return 3; } -/* RFC2861 Check whether we are limited by application or congestion window - * This is the inverse of cwnd check in tcp_tso_should_defer - */ -static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) +/* Returns end sequence number of the receiver's advertised window */ +static inline u32 tcp_wnd_end(const struct tcp_sock *tp) { - const struct tcp_sock *tp = tcp_sk(sk); - u32 left; - - if (in_flight >= tp->snd_cwnd) - return 1; - - if (!sk_can_gso(sk)) - return 0; - - left = tp->snd_cwnd - in_flight; - if (sysctl_tcp_tso_win_divisor) - return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd; - else - return left <= tcp_max_burst(tp); + return tp->snd_una + tp->snd_wnd; } +extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, const struct sk_buff *skb) @@ -921,40 +906,7 @@ static const char *statename[]={ "Close Wait","Last ACK","Listen","Closing" }; #endif - -static inline void tcp_set_state(struct sock *sk, int state) -{ - int oldstate = sk->sk_state; - - switch (state) { - case TCP_ESTABLISHED: - if (oldstate != TCP_ESTABLISHED) - TCP_INC_STATS(TCP_MIB_CURRESTAB); - break; - - case TCP_CLOSE: - if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) - TCP_INC_STATS(TCP_MIB_ESTABRESETS); - - sk->sk_prot->unhash(sk); - if (inet_csk(sk)->icsk_bind_hash && - !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) - inet_put_port(&tcp_hashinfo, sk); - /* fall through */ - default: - if (oldstate==TCP_ESTABLISHED) - TCP_DEC_STATS(TCP_MIB_CURRESTAB); - } - - /* Change state AFTER socket is unhashed to avoid closed - * socket sitting in hash tables. - */ - sk->sk_state = state; - -#ifdef STATE_TRACE - SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); -#endif -} +extern void tcp_set_state(struct sock *sk, int state); extern void tcp_done(struct sock *sk); @@ -1078,7 +1030,6 @@ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) { tcp_clear_retrans_hints_partial(tp); - tp->fastpath_skb_hint = NULL; } /* MD5 Signature */ @@ -1153,7 +1104,8 @@ extern int tcp_v4_calc_md5_hash(char *md5_hash, struct dst_entry *dst, struct request_sock *req, struct tcphdr *th, - int protocol, int tcplen); + int protocol, + unsigned int tcplen); extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, struct sock *addr_sk); @@ -1193,8 +1145,8 @@ static inline void tcp_write_queue_purge(struct sock *sk) struct sk_buff *skb; while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) - sk_stream_free_skb(sk, skb); - sk_stream_mem_reclaim(sk); + sk_wmem_free_skb(sk, skb); + sk_mem_reclaim(sk); } static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) @@ -1227,6 +1179,11 @@ static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_bu for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ skb = skb->next) +#define tcp_for_write_queue_from_safe(skb, tmp, sk) \ + for (tmp = skb->next; \ + (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ + skb = tmp, tmp = skb->next) + static inline struct sk_buff *tcp_send_head(struct sock *sk) { return sk->sk_send_head; @@ -1234,14 +1191,9 @@ static inline struct sk_buff *tcp_send_head(struct sock *sk) static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) { - struct tcp_sock *tp = tcp_sk(sk); - sk->sk_send_head = skb->next; if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) sk->sk_send_head = NULL; - /* Don't override Nagle indefinately with F-RTO */ - if (tp->frto_counter == 2) - tp->frto_counter = 3; } static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) @@ -1265,8 +1217,12 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb __tcp_add_write_queue_tail(sk, skb); /* Queue it, remembering where we must start sending. */ - if (sk->sk_send_head == NULL) + if (sk->sk_send_head == NULL) { sk->sk_send_head = skb; + + if (tcp_sk(sk)->highest_sack == NULL) + tcp_sk(sk)->highest_sack = skb; + } } static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) @@ -1309,6 +1265,45 @@ static inline int tcp_write_queue_empty(struct sock *sk) return skb_queue_empty(&sk->sk_write_queue); } +/* Start sequence of the highest skb with SACKed bit, valid only if + * sacked > 0 or when the caller has ensured validity by itself. + */ +static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) +{ + if (!tp->sacked_out) + return tp->snd_una; + + if (tp->highest_sack == NULL) + return tp->snd_nxt; + + return TCP_SKB_CB(tp->highest_sack)->seq; +} + +static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) +{ + tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL : + tcp_write_queue_next(sk, skb); +} + +static inline struct sk_buff *tcp_highest_sack(struct sock *sk) +{ + return tcp_sk(sk)->highest_sack; +} + +static inline void tcp_highest_sack_reset(struct sock *sk) +{ + tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); +} + +/* Called when old skb is about to be deleted (to be combined with new skb) */ +static inline void tcp_highest_sack_combine(struct sock *sk, + struct sk_buff *old, + struct sk_buff *new) +{ + if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) + tcp_sk(sk)->highest_sack = new; +} + /* /proc */ enum tcp_seq_states { TCP_SEQ_STATE_LISTENING, @@ -1359,7 +1354,8 @@ struct tcp_sock_af_ops { struct dst_entry *dst, struct request_sock *req, struct tcphdr *th, - int protocol, int len); + int protocol, + unsigned int len); int (*md5_add) (struct sock *sk, struct sock *addr_sk, u8 *newkey, diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 409da3a9a455..27394e0447d8 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -17,16 +17,20 @@ extern struct proto tcpv6_prot; struct flowi; /* extention headers */ -extern void ipv6_rthdr_init(void); -extern void ipv6_frag_init(void); -extern void ipv6_nodata_init(void); -extern void ipv6_destopt_init(void); +extern int ipv6_exthdrs_init(void); +extern void ipv6_exthdrs_exit(void); +extern int ipv6_frag_init(void); +extern void ipv6_frag_exit(void); /* transport protocols */ -extern void rawv6_init(void); -extern void udpv6_init(void); -extern void udplitev6_init(void); -extern void tcpv6_init(void); +extern int rawv6_init(void); +extern void rawv6_exit(void); +extern int udpv6_init(void); +extern void udpv6_exit(void); +extern int udplitev6_init(void); +extern void udplitev6_exit(void); +extern int tcpv6_init(void); +extern void tcpv6_exit(void); extern int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, diff --git a/include/net/udp.h b/include/net/udp.h index 98755ebaf163..c6669c0a74c7 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -65,6 +65,13 @@ extern rwlock_t udp_hash_lock; extern struct proto udp_prot; +extern atomic_t udp_memory_allocated; + +/* sysctl variables for udp */ +extern int sysctl_udp_mem[3]; +extern int sysctl_udp_rmem_min; +extern int sysctl_udp_wmem_min; + struct sk_buff; /* @@ -108,7 +115,7 @@ static inline void udp_lib_unhash(struct sock *sk) write_lock_bh(&udp_hash_lock); if (sk_del_node_init(sk)) { inet_sk(sk)->num = 0; - sock_prot_dec_use(sk->sk_prot); + sock_prot_inuse_add(sk->sk_prot, -1); } write_unlock_bh(&udp_hash_lock); } @@ -139,6 +146,12 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, int (*push_pending_frames)(struct sock *)); DECLARE_SNMP_STAT(struct udp_mib, udp_statistics); +DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6); + +/* UDP-Lite does not have a standardized MIB yet, so we inherit from UDP */ +DECLARE_SNMP_STAT(struct udp_mib, udplite_statistics); +DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); + /* * SNMP statistics for UDP and UDP-Lite */ @@ -149,6 +162,25 @@ DECLARE_SNMP_STAT(struct udp_mib, udp_statistics); if (is_udplite) SNMP_INC_STATS_BH(udplite_statistics, field); \ else SNMP_INC_STATS_BH(udp_statistics, field); } while(0) +#define UDP6_INC_STATS_BH(field, is_udplite) do { \ + if (is_udplite) SNMP_INC_STATS_BH(udplite_stats_in6, field); \ + else SNMP_INC_STATS_BH(udp_stats_in6, field); } while(0) +#define UDP6_INC_STATS_USER(field, is_udplite) do { \ + if (is_udplite) SNMP_INC_STATS_USER(udplite_stats_in6, field); \ + else SNMP_INC_STATS_USER(udp_stats_in6, field); } while(0) + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#define UDPX_INC_STATS_BH(sk, field) \ + do { \ + if ((sk)->sk_family == AF_INET) \ + UDP_INC_STATS_BH(field, 0); \ + else \ + UDP6_INC_STATS_BH(field, 0); \ + } while (0); +#else +#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(field, 0) +#endif + /* /proc */ struct udp_seq_afinfo { struct module *owner; @@ -173,4 +205,6 @@ extern void udp_proc_unregister(struct udp_seq_afinfo *afinfo); extern int udp4_proc_init(void); extern void udp4_proc_exit(void); #endif + +extern void udp_init(void); #endif /* _UDP_H */ diff --git a/include/net/udplite.h b/include/net/udplite.h index 635b0eafca95..b76b2e377af4 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -13,9 +13,6 @@ extern struct proto udplite_prot; extern struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; -/* UDP-Lite does not have a standardized MIB yet, so we inherit from UDP */ -DECLARE_SNMP_STAT(struct udp_mib, udplite_statistics); - /* * Checksum computation is all in software, hence simpler getfrag. */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 1dd20cf17982..5ebb9ba479b1 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -19,6 +19,9 @@ #include <net/route.h> #include <net/ipv6.h> #include <net/ip6_fib.h> +#ifdef CONFIG_XFRM_STATISTICS +#include <net/snmp.h> +#endif #define XFRM_PROTO_ESP 50 #define XFRM_PROTO_AH 51 @@ -34,6 +37,17 @@ #define MODULE_ALIAS_XFRM_TYPE(family, proto) \ MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto)) +#ifdef CONFIG_XFRM_STATISTICS +DECLARE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics); +#define XFRM_INC_STATS(field) SNMP_INC_STATS(xfrm_statistics, field) +#define XFRM_INC_STATS_BH(field) SNMP_INC_STATS_BH(xfrm_statistics, field) +#define XFRM_INC_STATS_USER(field) SNMP_INC_STATS_USER(xfrm_statistics, field) +#else +#define XFRM_INC_STATS(field) +#define XFRM_INC_STATS_BH(field) +#define XFRM_INC_STATS_USER(field) +#endif + extern struct sock *xfrm_nl; extern u32 sysctl_xfrm_aevent_etime; extern u32 sysctl_xfrm_aevent_rseqth; @@ -183,7 +197,7 @@ struct xfrm_state struct timer_list timer; /* Last used time */ - u64 lastused; + unsigned long lastused; /* Reference to data common to all the instances of this * transformer. */ @@ -227,22 +241,26 @@ struct km_event u32 event; }; +struct net_device; struct xfrm_type; struct xfrm_dst; struct xfrm_policy_afinfo { unsigned short family; struct dst_ops *dst_ops; void (*garbage_collect)(void); - int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl); + struct dst_entry *(*dst_lookup)(int tos, xfrm_address_t *saddr, + xfrm_address_t *daddr); int (*get_saddr)(xfrm_address_t *saddr, xfrm_address_t *daddr); struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy); - int (*bundle_create)(struct xfrm_policy *policy, - struct xfrm_state **xfrm, - int nx, - struct flowi *fl, - struct dst_entry **dst_p); void (*decode_session)(struct sk_buff *skb, - struct flowi *fl); + struct flowi *fl, + int reverse); + int (*get_tos)(struct flowi *fl); + int (*init_path)(struct xfrm_dst *path, + struct dst_entry *dst, + int nfheader_len); + int (*fill_dst)(struct xfrm_dst *xdst, + struct net_device *dev); }; extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); @@ -257,6 +275,8 @@ extern int __xfrm_state_delete(struct xfrm_state *x); struct xfrm_state_afinfo { unsigned int family; + unsigned int proto; + unsigned int eth_proto; struct module *owner; struct xfrm_type *type_map[IPPROTO_MAX]; struct xfrm_mode *mode_map[XFRM_MODE_MAX]; @@ -267,6 +287,12 @@ struct xfrm_state_afinfo { int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); int (*output)(struct sk_buff *skb); + int (*extract_input)(struct xfrm_state *x, + struct sk_buff *skb); + int (*extract_output)(struct xfrm_state *x, + struct sk_buff *skb); + int (*transport_finish)(struct sk_buff *skb, + int async); }; extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); @@ -282,6 +308,8 @@ struct xfrm_type __u8 flags; #define XFRM_TYPE_NON_FRAGMENT 1 #define XFRM_TYPE_REPLAY_PROT 2 +#define XFRM_TYPE_LOCAL_COADDR 4 +#define XFRM_TYPE_REMOTE_COADDR 8 int (*init_state)(struct xfrm_state *x); void (*destructor)(struct xfrm_state *); @@ -289,8 +317,6 @@ struct xfrm_type int (*output)(struct xfrm_state *, struct sk_buff *pskb); int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *); int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); - xfrm_address_t *(*local_addr)(struct xfrm_state *, xfrm_address_t *); - xfrm_address_t *(*remote_addr)(struct xfrm_state *, xfrm_address_t *); /* Estimate maximal size of result of transformation of a dgram */ u32 (*get_mtu)(struct xfrm_state *, int size); }; @@ -299,6 +325,27 @@ extern int xfrm_register_type(struct xfrm_type *type, unsigned short family); extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family); struct xfrm_mode { + /* + * Remove encapsulation header. + * + * The IP header will be moved over the top of the encapsulation + * header. + * + * On entry, the transport header shall point to where the IP header + * should be and the network header shall be set to where the IP + * header currently is. skb->data shall point to the start of the + * payload. + */ + int (*input2)(struct xfrm_state *x, struct sk_buff *skb); + + /* + * This is the actual input entry point. + * + * For transport mode and equivalent this would be identical to + * input2 (which does not need to be set). While tunnel mode + * and equivalent would set this to the tunnel encapsulation function + * xfrm4_prepare_input that would in turn call input2. + */ int (*input)(struct xfrm_state *x, struct sk_buff *skb); /* @@ -312,7 +359,18 @@ struct xfrm_mode { * header. The value of the network header will always point * to the top IP header while skb->data will point to the payload. */ - int (*output)(struct xfrm_state *x,struct sk_buff *skb); + int (*output2)(struct xfrm_state *x,struct sk_buff *skb); + + /* + * This is the actual output entry point. + * + * For transport mode and equivalent this would be identical to + * output2 (which does not need to be set). While tunnel mode + * and equivalent would set this to a tunnel encapsulation function + * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn + * call output2. + */ + int (*output)(struct xfrm_state *x, struct sk_buff *skb); struct xfrm_state_afinfo *afinfo; struct module *owner; @@ -454,6 +512,51 @@ struct xfrm_skb_cb { #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0])) +/* + * This structure is used by the afinfo prepare_input/prepare_output functions + * to transmit header information to the mode input/output functions. + */ +struct xfrm_mode_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + + /* Copied from header for IPv4, always set to zero and DF for IPv6. */ + __be16 id; + __be16 frag_off; + + /* TOS for IPv4, class for IPv6. */ + u8 tos; + + /* TTL for IPv4, hop limitfor IPv6. */ + u8 ttl; + + /* Protocol for IPv4, NH for IPv6. */ + u8 protocol; + + /* Used by IPv6 only, zero for IPv4. */ + u8 flow_lbl[3]; +}; + +#define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0])) + +/* + * This structure is used by the input processing to locate the SPI and + * related information. + */ +struct xfrm_spi_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + + unsigned int daddroff; + unsigned int family; +}; + +#define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) + /* Audit Information */ struct xfrm_audit { @@ -462,41 +565,59 @@ struct xfrm_audit }; #ifdef CONFIG_AUDITSYSCALL -static inline struct audit_buffer *xfrm_audit_start(u32 auid, u32 sid) +static inline struct audit_buffer *xfrm_audit_start(const char *op) { struct audit_buffer *audit_buf = NULL; - char *secctx; - u32 secctx_len; + if (audit_enabled == 0) + return NULL; audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, - AUDIT_MAC_IPSEC_EVENT); + AUDIT_MAC_IPSEC_EVENT); if (audit_buf == NULL) return NULL; + audit_log_format(audit_buf, "op=%s", op); + return audit_buf; +} - audit_log_format(audit_buf, "auid=%u", auid); +static inline void xfrm_audit_helper_usrinfo(u32 auid, u32 secid, + struct audit_buffer *audit_buf) +{ + char *secctx; + u32 secctx_len; - if (sid != 0 && - security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) { + audit_log_format(audit_buf, " auid=%u", auid); + if (secid != 0 && + security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) { audit_log_format(audit_buf, " subj=%s", secctx); security_release_secctx(secctx, secctx_len); } else audit_log_task_context(audit_buf); - return audit_buf; } extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, - u32 auid, u32 sid); + u32 auid, u32 secid); extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, - u32 auid, u32 sid); + u32 auid, u32 secid); extern void xfrm_audit_state_add(struct xfrm_state *x, int result, - u32 auid, u32 sid); + u32 auid, u32 secid); extern void xfrm_audit_state_delete(struct xfrm_state *x, int result, - u32 auid, u32 sid); + u32 auid, u32 secid); +extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x, + struct sk_buff *skb); +extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family); +extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, + __be32 net_spi, __be32 net_seq); +extern void xfrm_audit_state_icvfail(struct xfrm_state *x, + struct sk_buff *skb, u8 proto); #else #define xfrm_audit_policy_add(x, r, a, s) do { ; } while (0) #define xfrm_audit_policy_delete(x, r, a, s) do { ; } while (0) #define xfrm_audit_state_add(x, r, a, s) do { ; } while (0) #define xfrm_audit_state_delete(x, r, a, s) do { ; } while (0) +#define xfrm_audit_state_replay_overflow(x, s) do { ; } while (0) +#define xfrm_audit_state_notfound_simple(s, f) do { ; } while (0) +#define xfrm_audit_state_notfound(s, f, sp, sq) do { ; } while (0) +#define xfrm_audit_state_icvfail(x, s, p) do { ; } while (0) #endif /* CONFIG_AUDITSYSCALL */ static inline void xfrm_pol_hold(struct xfrm_policy *policy) @@ -505,12 +626,12 @@ static inline void xfrm_pol_hold(struct xfrm_policy *policy) atomic_inc(&policy->refcnt); } -extern void __xfrm_policy_destroy(struct xfrm_policy *policy); +extern void xfrm_policy_destroy(struct xfrm_policy *policy); static inline void xfrm_pol_put(struct xfrm_policy *policy) { if (atomic_dec_and_test(&policy->refcnt)) - __xfrm_policy_destroy(policy); + xfrm_policy_destroy(policy); } #ifdef CONFIG_XFRM_SUB_POLICY @@ -757,17 +878,25 @@ xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short } #ifdef CONFIG_XFRM - extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family); -static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) +static inline int __xfrm_policy_check2(struct sock *sk, int dir, + struct sk_buff *skb, + unsigned int family, int reverse) { + int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0); + if (sk && sk->sk_policy[XFRM_POLICY_IN]) - return __xfrm_policy_check(sk, dir, skb, family); + return __xfrm_policy_check(sk, ndir, skb, family); return (!xfrm_policy_count[dir] && !skb->sp) || (skb->dst->flags & DST_NOPOLICY) || - __xfrm_policy_check(sk, dir, skb, family); + __xfrm_policy_check(sk, ndir, skb, family); +} + +static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) +{ + return __xfrm_policy_check2(sk, dir, skb, family, 0); } static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb) @@ -780,7 +909,34 @@ static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *s return xfrm_policy_check(sk, dir, skb, AF_INET6); } -extern int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family); +static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir, + struct sk_buff *skb) +{ + return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1); +} + +static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, + struct sk_buff *skb) +{ + return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1); +} + +extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, + unsigned int family, int reverse); + +static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, + unsigned int family) +{ + return __xfrm_decode_session(skb, fl, family, 0); +} + +static inline int xfrm_decode_session_reverse(struct sk_buff *skb, + struct flowi *fl, + unsigned int family) +{ + return __xfrm_decode_session(skb, fl, family, 1); +} + extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) @@ -841,6 +997,22 @@ static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *sk { return 1; } +static inline int xfrm_decode_session_reverse(struct sk_buff *skb, + struct flowi *fl, + unsigned int family) +{ + return -ENOSYS; +} +static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir, + struct sk_buff *skb) +{ + return 1; +} +static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, + struct sk_buff *skb) +{ + return 1; +} #endif static __inline__ @@ -981,12 +1153,27 @@ struct xfrm6_tunnel { extern void xfrm_init(void); extern void xfrm4_init(void); -extern void xfrm6_init(void); -extern void xfrm6_fini(void); extern void xfrm_state_init(void); extern void xfrm4_state_init(void); -extern void xfrm6_state_init(void); +#ifdef CONFIG_XFRM +extern int xfrm6_init(void); +extern void xfrm6_fini(void); +extern int xfrm6_state_init(void); extern void xfrm6_state_fini(void); +#else +static inline int xfrm6_init(void) +{ + return 0; +} +static inline void xfrm6_fini(void) +{ + ; +} +#endif + +#ifdef CONFIG_XFRM_STATISTICS +extern int xfrm_proc_init(void); +#endif extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *); extern struct xfrm_state *xfrm_state_alloc(void); @@ -1045,14 +1232,23 @@ extern int xfrm_state_delete(struct xfrm_state *x); extern int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info); extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); -extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); +extern int xfrm_replay_check(struct xfrm_state *x, + struct sk_buff *skb, __be32 seq); extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_notify(struct xfrm_state *x, int event); extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); extern int xfrm_init_state(struct xfrm_state *x); +extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); +extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type); +extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr); +extern int xfrm_output_resume(struct sk_buff *skb, int err); extern int xfrm_output(struct sk_buff *skb); +extern int xfrm4_extract_header(struct sk_buff *skb); +extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); +extern int xfrm4_transport_finish(struct sk_buff *skb, int async); extern int xfrm4_rcv(struct sk_buff *skb); static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) @@ -1060,10 +1256,15 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) return xfrm4_rcv_encap(skb, nexthdr, spi, 0); } +extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); +extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_output(struct sk_buff *skb); extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); +extern int xfrm6_extract_header(struct sk_buff *skb); +extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); +extern int xfrm6_transport_finish(struct sk_buff *skb, int async); extern int xfrm6_rcv(struct sk_buff *skb); extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto); @@ -1072,6 +1273,8 @@ extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr); extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr); extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr); +extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); +extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_output(struct sk_buff *skb); extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr); @@ -1079,7 +1282,6 @@ extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, #ifdef CONFIG_XFRM extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen); -extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family); #else static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) { @@ -1092,11 +1294,6 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); return 0; } - -static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family) -{ - return -EINVAL; -} #endif struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); @@ -1113,11 +1310,9 @@ extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create, unsigned short family); -extern int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info); extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst, struct flowi *fl, int family, int strict); -extern void xfrm_init_pmtu(struct dst_entry *dst); #ifdef CONFIG_XFRM_MIGRATE extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type, @@ -1214,4 +1409,9 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n) } #endif +static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) +{ + return skb->sp->xvec[skb->sp->len - 1]; +} + #endif /* _NET_XFRM_H */ diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 8ec3799e42e1..7228c056b9e9 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -230,7 +230,9 @@ struct ib_class_port_info * @seg_count: The number of RMPP segments allocated for this send. * @seg_size: Size of each RMPP segment. * @timeout_ms: Time to wait for a response. - * @retries: Number of times to retry a request for a response. + * @retries: Number of times to retry a request for a response. For MADs + * using RMPP, this applies per window. On completion, returns the number + * of retries needed to complete the transfer. * * Users are responsible for initializing the MAD buffer itself, with the * exception of any RMPP header. Additional segment buffer space allocated diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 11f39606e7d9..cfbd38fe2998 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1026,7 +1026,7 @@ struct ib_device { struct module *owner; struct class_device class_dev; - struct kobject ports_parent; + struct kobject *ports_parent; struct list_head port_list; enum { diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h index 9749c1b34d00..c55705460b87 100644 --- a/include/rdma/rdma_user_cm.h +++ b/include/rdma/rdma_user_cm.h @@ -60,7 +60,8 @@ enum { RDMA_USER_CM_CMD_SET_OPTION, RDMA_USER_CM_CMD_NOTIFY, RDMA_USER_CM_CMD_JOIN_MCAST, - RDMA_USER_CM_CMD_LEAVE_MCAST + RDMA_USER_CM_CMD_LEAVE_MCAST, + RDMA_USER_CM_CMD_MIGRATE_ID }; /* @@ -230,4 +231,14 @@ struct rdma_ucm_set_option { __u32 optlen; }; +struct rdma_ucm_migrate_id { + __u64 response; + __u32 id; + __u32 fd; +}; + +struct rdma_ucm_migrate_resp { + __u32 events_reported; +}; + #endif /* RDMA_USER_CM_H */ diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 50e907f42048..e19e58423166 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -49,12 +49,15 @@ enum iscsi_uevent_e { ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16, + ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17, /* up events */ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2, ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3, ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4, + ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5, + ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6, }; enum iscsi_tgt_dscvr { @@ -156,6 +159,10 @@ struct iscsi_uevent { uint32_t sid; uint32_t cid; } c_conn_ret; + struct msg_unbind_session { + uint32_t sid; + uint32_t host_no; + } unbind_session; struct msg_recv_req { uint32_t sid; uint32_t cid; @@ -236,6 +243,13 @@ enum iscsi_param { ISCSI_PARAM_PASSWORD, ISCSI_PARAM_PASSWORD_IN, + ISCSI_PARAM_FAST_ABORT, + ISCSI_PARAM_ABORT_TMO, + ISCSI_PARAM_LU_RESET_TMO, + ISCSI_PARAM_HOST_RESET_TMO, + + ISCSI_PARAM_PING_TMO, + ISCSI_PARAM_RECV_TMO, /* must always be last */ ISCSI_PARAM_MAX, }; @@ -266,6 +280,12 @@ enum iscsi_param { #define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN) #define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD) #define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN) +#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT) +#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO) +#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO) +#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO) +#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO) +#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO) /* iSCSI HBA params */ enum iscsi_host_param { diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index 8d1e4e8026fe..318a909e7ae1 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h @@ -21,13 +21,15 @@ #ifndef ISCSI_PROTO_H #define ISCSI_PROTO_H +#include <linux/types.h> + #define ISCSI_DRAFT20_VERSION 0x00 /* default iSCSI listen port for incoming connections */ #define ISCSI_LISTEN_PORT 3260 /* Padding word length */ -#define PAD_WORD_LEN 4 +#define ISCSI_PAD_LEN 4 /* * useful common(control and data pathes) macro @@ -147,6 +149,14 @@ struct iscsi_rlength_ahdr { __be32 read_length; }; +/* Extended CDB AHS */ +struct iscsi_ecdb_ahdr { + __be16 ahslength; /* CDB length - 15, including reserved byte */ + uint8_t ahstype; + uint8_t reserved; + uint8_t ecdb[260 - 16]; /* 4-byte aligned extended CDB spillover */ +}; + /* SCSI Response Header */ struct iscsi_cmd_rsp { uint8_t opcode; @@ -600,6 +610,8 @@ struct iscsi_reject { #define ISCSI_MIN_MAX_BURST_LEN 512 #define ISCSI_MAX_MAX_BURST_LEN 16777215 +#define ISCSI_DEF_TIME2WAIT 2 + /************************* RFC 3720 End *****************************/ #endif /* ISCSI_PROTO_H */ diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index b4b31132618b..889f51fabab9 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -57,11 +57,14 @@ struct iscsi_nopin; #define ISCSI_MAX_CMD_PER_LUN 128 /* Task Mgmt states */ -#define TMABORT_INITIAL 0x0 -#define TMABORT_SUCCESS 0x1 -#define TMABORT_FAILED 0x2 -#define TMABORT_TIMEDOUT 0x3 -#define TMABORT_NOT_FOUND 0x4 +enum { + TMF_INITIAL, + TMF_QUEUED, + TMF_SUCCESS, + TMF_FAILED, + TMF_TIMEDOUT, + TMF_NOT_FOUND, +}; /* Connection suspend "bit" */ #define ISCSI_SUSPEND_BIT 1 @@ -74,6 +77,13 @@ struct iscsi_nopin; #define ISCSI_ADDRESS_BUF_LEN 64 +enum { + /* this is the maximum possible storage for AHSs */ + ISCSI_MAX_AHS_SIZE = sizeof(struct iscsi_ecdb_ahdr) + + sizeof(struct iscsi_rlength_ahdr), + ISCSI_DIGEST_SIZE = sizeof(__u32), +}; + struct iscsi_mgmt_task { /* * Becuae LLDs allocate their hdr differently, this is a pointer to @@ -91,15 +101,17 @@ enum { ISCSI_TASK_COMPLETED, ISCSI_TASK_PENDING, ISCSI_TASK_RUNNING, - ISCSI_TASK_ABORTING, }; struct iscsi_cmd_task { /* - * Becuae LLDs allocate their hdr differently, this is a pointer to - * that storage. It must be setup at session creation time. + * Because LLDs allocate their hdr differently, this is a pointer + * and length to that storage. It must be setup at session + * creation time. */ struct iscsi_cmd *hdr; + unsigned short hdr_max; + unsigned short hdr_len; /* accumulated size of hdr used */ int itt; /* this ITT */ uint32_t unsol_datasn; @@ -110,7 +122,6 @@ struct iscsi_cmd_task { unsigned data_count; /* remaining Data-Out */ struct scsi_cmnd *sc; /* associated SCSI cmd*/ struct iscsi_conn *conn; /* used connection */ - struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */ /* state set/tested under session->lock */ int state; @@ -119,6 +130,11 @@ struct iscsi_cmd_task { void *dd_data; /* driver/transport data */ }; +static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask) +{ + return (void*)ctask->hdr + ctask->hdr_len; +} + struct iscsi_conn { struct iscsi_cls_conn *cls_conn; /* ptr to class connection */ void *dd_data; /* iscsi_transport data */ @@ -132,6 +148,12 @@ struct iscsi_conn { * conn_stop() flag: stop to recover, stop to terminate */ int stop_stage; + struct timer_list transport_timer; + unsigned long last_recv; + unsigned long last_ping; + int ping_timeout; + int recv_timeout; + struct iscsi_mgmt_task *ping_mtask; /* iSCSI connection-wide sequencing */ uint32_t exp_statsn; @@ -152,10 +174,11 @@ struct iscsi_conn { struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ /* xmit */ - struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */ + struct list_head mgmtqueue; /* mgmt (control) xmit queue */ struct list_head mgmt_run_list; /* list of control tasks */ struct list_head xmitqueue; /* data-path cmd queue */ struct list_head run_list; /* list of cmds in progress */ + struct list_head requeue; /* tasks needing another run */ struct work_struct xmitwork; /* per-conn. xmit workqueue */ unsigned long suspend_tx; /* suspend Tx */ unsigned long suspend_rx; /* suspend Rx */ @@ -163,8 +186,8 @@ struct iscsi_conn { /* abort */ wait_queue_head_t ehwait; /* used in eh_abort() */ struct iscsi_tm tmhdr; - struct timer_list tmabort_timer; - int tmabort_state; /* see TMABORT_INITIAL, etc.*/ + struct timer_list tmf_timer; + int tmf_state; /* see TMF_INITIAL, etc.*/ /* negotiated params */ unsigned max_recv_dlength; /* initiator_max_recv_dsl*/ @@ -198,7 +221,7 @@ struct iscsi_conn { uint32_t eh_abort_cnt; }; -struct iscsi_queue { +struct iscsi_pool { struct kfifo *queue; /* FIFO Queue */ void **pool; /* Pool of elements */ int max; /* Max number of elements */ @@ -221,6 +244,8 @@ struct iscsi_session { uint32_t queued_cmdsn; /* configuration */ + int abort_timeout; + int lu_reset_timeout; int initial_r2t_en; unsigned max_r2t; int imm_data_en; @@ -231,6 +256,7 @@ struct iscsi_session { int pdu_inorder_en; int dataseq_inorder_en; int erl; + int fast_abort; int tpgt; char *username; char *username_in; @@ -256,10 +282,10 @@ struct iscsi_session { int cmds_max; /* size of cmds array */ struct iscsi_cmd_task **cmds; /* Original Cmds arr */ - struct iscsi_queue cmdpool; /* PDU's pool */ + struct iscsi_pool cmdpool; /* PDU's pool */ int mgmtpool_max; /* size of mgmt array */ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */ - struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */ + struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */ }; /* @@ -268,6 +294,7 @@ struct iscsi_session { extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth); extern int iscsi_eh_abort(struct scsi_cmnd *sc); extern int iscsi_eh_host_reset(struct scsi_cmnd *sc); +extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); extern int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)); @@ -326,11 +353,32 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, char *, int); extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *, uint32_t *); +extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask); +extern void iscsi_free_mgmt_task(struct iscsi_conn *conn, + struct iscsi_mgmt_task *mtask); /* * generic helpers */ -extern void iscsi_pool_free(struct iscsi_queue *, void **); -extern int iscsi_pool_init(struct iscsi_queue *, int, void ***, int); +extern void iscsi_pool_free(struct iscsi_pool *); +extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int); + +/* + * inline functions to deal with padding. + */ +static inline unsigned int +iscsi_padded(unsigned int len) +{ + return (len + ISCSI_PAD_LEN - 1) & ~(ISCSI_PAD_LEN - 1); +} + +static inline unsigned int +iscsi_padding(unsigned int len) +{ + len &= (ISCSI_PAD_LEN - 1); + if (len) + len = ISCSI_PAD_LEN - len; + return len; +} #endif diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index a466c2cb8955..3ffd6b582a97 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -91,8 +91,6 @@ enum discover_event { /* ---------- Expander Devices ---------- */ -#define ETASK 0xFA - #define to_dom_device(_obj) container_of(_obj, struct domain_device, dev_obj) #define to_dev_attr(_attr) container_of(_attr, struct domain_dev_attribute,\ attr) @@ -122,8 +120,8 @@ struct ex_phy { u8 attached_sata_dev:1; u8 attached_sata_ps:1; - enum sas_proto attached_tproto; - enum sas_proto attached_iproto; + enum sas_protocol attached_tproto; + enum sas_protocol attached_iproto; u8 attached_sas_addr[SAS_ADDR_SIZE]; u8 attached_phy_id; @@ -191,8 +189,8 @@ struct domain_device { struct list_head dev_list_node; - enum sas_proto iproto; - enum sas_proto tproto; + enum sas_protocol iproto; + enum sas_protocol tproto; struct sas_rphy *rphy; @@ -245,8 +243,8 @@ struct asd_sas_port { enum sas_class class; u8 sas_addr[SAS_ADDR_SIZE]; u8 attached_sas_addr[SAS_ADDR_SIZE]; - enum sas_proto iproto; - enum sas_proto tproto; + enum sas_protocol iproto; + enum sas_protocol tproto; enum sas_oob_mode oob_mode; @@ -289,8 +287,8 @@ struct asd_sas_phy { int id; /* must be set */ enum sas_class class; - enum sas_proto iproto; - enum sas_proto tproto; + enum sas_protocol iproto; + enum sas_protocol tproto; enum sas_phy_type type; enum sas_phy_role role; @@ -537,7 +535,7 @@ struct sas_task { spinlock_t task_state_lock; unsigned task_state_flags; - enum sas_proto task_proto; + enum sas_protocol task_proto; /* Used by the discovery code. */ struct timer_list timer; @@ -563,7 +561,7 @@ struct sas_task { struct work_struct abort_work; }; - +extern struct kmem_cache *sas_task_cache; #define SAS_TASK_STATE_PENDING 1 #define SAS_TASK_STATE_DONE 2 @@ -573,7 +571,6 @@ struct sas_task { static inline struct sas_task *sas_alloc_task(gfp_t flags) { - extern struct kmem_cache *sas_task_cache; struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); if (task) { @@ -590,7 +587,6 @@ static inline struct sas_task *sas_alloc_task(gfp_t flags) static inline void sas_free_task(struct sas_task *task) { if (task) { - extern struct kmem_cache *sas_task_cache; BUG_ON(!list_empty(&task->list)); kmem_cache_free(sas_task_cache, task); } @@ -676,4 +672,8 @@ extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg); extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct request *req); + +extern void sas_ssp_task_response(struct device *dev, struct sas_task *task, + struct ssp_response_iu *iu); + #endif /* _SASLIB_H_ */ diff --git a/include/scsi/sas.h b/include/scsi/sas.h index 2f4b6afa34fc..e9fd02281381 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -102,13 +102,12 @@ enum sas_dev_type { SATA_PM_PORT= 8, }; -/* Partly from IDENTIFY address frame. */ -enum sas_proto { - SATA_PROTO = 1, - SAS_PROTO_SMP = 2, /* protocol */ - SAS_PROTO_STP = 4, /* protocol */ - SAS_PROTO_SSP = 8, /* protocol */ - SAS_PROTO_ALL = 0xE, +enum sas_protocol { + SAS_PROTOCOL_SATA = 0x01, + SAS_PROTOCOL_SMP = 0x02, + SAS_PROTOCOL_STP = 0x04, + SAS_PROTOCOL_SSP = 0x08, + SAS_PROTOCOL_ALL = 0x0E, }; /* From the spec; local phys only */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 702fcfeb37f1..82251575a9b4 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -11,6 +11,25 @@ #include <linux/types.h> /* + * The maximum number of SG segments that we will put inside a + * scatterlist (unless chaining is used). Should ideally fit inside a + * single page, to avoid a higher order allocation. We could define this + * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The + * minimum value is 32 + */ +#define SCSI_MAX_SG_SEGMENTS 128 + +/* + * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit + * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. + */ +#ifdef ARCH_HAS_SG_CHAIN +#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 +#else +#define SCSI_MAX_SG_CHAIN_SEGMENTS SCSI_MAX_SG_SEGMENTS +#endif + +/* * SCSI command lengths */ @@ -83,6 +102,7 @@ extern const unsigned char scsi_command_size[8]; #define READ_TOC 0x43 #define LOG_SELECT 0x4c #define LOG_SENSE 0x4d +#define XDWRITEREAD_10 0x53 #define MODE_SELECT_10 0x55 #define RESERVE_10 0x56 #define RELEASE_10 0x57 diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 3f47e522a1ec..de28aab820b0 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -2,16 +2,20 @@ #define _SCSI_SCSI_CMND_H #include <linux/dma-mapping.h> +#include <linux/blkdev.h> #include <linux/list.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/scatterlist.h> -struct request; -struct scatterlist; struct Scsi_Host; struct scsi_device; +struct scsi_data_buffer { + struct sg_table table; + unsigned length; + int resid; +}; /* embedded in scsi_cmnd */ struct scsi_pointer { @@ -62,15 +66,11 @@ struct scsi_cmnd { /* These elements define the operation we are about to perform */ #define MAX_COMMAND_SIZE 16 unsigned char cmnd[MAX_COMMAND_SIZE]; - unsigned request_bufflen; /* Actual request size */ struct timer_list eh_timeout; /* Used to time out the command. */ - void *request_buffer; /* Actual requested buffer */ /* These elements define the operation we ultimately want to perform */ - unsigned short use_sg; /* Number of pieces of scatter-gather */ - unsigned short __use_sg; - + struct scsi_data_buffer sdb; unsigned underflow; /* Return error if less than this amount is transferred */ @@ -80,15 +80,11 @@ struct scsi_cmnd { reconnects. Probably == sector size */ - int resid; /* Number of bytes requested to be - transferred less actual number - transferred (0 if not supported) */ - struct request *request; /* The command we are working on */ #define SCSI_SENSE_BUFFERSIZE 96 - unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; + unsigned char *sense_buffer; /* obtained by REQUEST SENSE when * CHECK CONDITION is received on original * command (auto-sense) */ @@ -128,27 +124,55 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, size_t *offset, size_t *len); extern void scsi_kunmap_atomic_sg(void *virt); -extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); -extern void scsi_free_sgtable(struct scsi_cmnd *); +extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask); +extern void scsi_release_buffers(struct scsi_cmnd *cmd); extern int scsi_dma_map(struct scsi_cmnd *cmd); extern void scsi_dma_unmap(struct scsi_cmnd *cmd); -#define scsi_sg_count(cmd) ((cmd)->use_sg) -#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer) -#define scsi_bufflen(cmd) ((cmd)->request_bufflen) +static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd) +{ + return cmd->sdb.table.nents; +} + +static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd) +{ + return cmd->sdb.table.sgl; +} + +static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd) +{ + return cmd->sdb.length; +} static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid) { - cmd->resid = resid; + cmd->sdb.resid = resid; } static inline int scsi_get_resid(struct scsi_cmnd *cmd) { - return cmd->resid; + return cmd->sdb.resid; } #define scsi_for_each_sg(cmd, sg, nseg, __i) \ for_each_sg(scsi_sglist(cmd), sg, nseg, __i) +static inline int scsi_bidi_cmnd(struct scsi_cmnd *cmd) +{ + return blk_bidi_rq(cmd->request) && + (cmd->request->next_rq->special != NULL); +} + +static inline struct scsi_data_buffer *scsi_in(struct scsi_cmnd *cmd) +{ + return scsi_bidi_cmnd(cmd) ? + cmd->request->next_rq->special : &cmd->sdb; +} + +static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd) +{ + return &cmd->sdb; +} + #endif /* _SCSI_SCSI_CMND_H */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 6c2d80b36aa1..ab7acbe80960 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -122,9 +122,6 @@ struct scsi_device { unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ unsigned simple_tags:1; /* simple queue tag messages are enabled */ unsigned ordered_tags:1;/* ordered queue tag messages are enabled */ - unsigned single_lun:1; /* Indicates we should only allow I/O to - * one of the luns for the device at a - * time. */ unsigned was_reset:1; /* There was a bus reset on the bus for * this device */ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN @@ -142,6 +139,7 @@ struct scsi_device { unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ + unsigned last_sector_bug:1; /* Always read last sector in a 1 sector read */ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ struct list_head event_list; /* asserted events */ @@ -202,6 +200,9 @@ struct scsi_target { unsigned int id; /* target id ... replace * scsi_device.id eventually */ unsigned int create:1; /* signal that it needs to be added */ + unsigned int single_lun:1; /* Indicates we should only + * allow I/O to one of the luns + * for the device at a time. */ unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */ /* means no lun present */ @@ -295,7 +296,7 @@ extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, struct scsi_mode_data *data, struct scsi_sense_hdr *); extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, - int retries); + int retries, struct scsi_sense_hdr *sshdr); extern int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state); extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, @@ -386,6 +387,10 @@ static inline int scsi_device_qas(struct scsi_device *sdev) return 0; return sdev->inquiry[56] & 0x02; } +static inline int scsi_device_enclosure(struct scsi_device *sdev) +{ + return sdev->inquiry[6] & (1<<6); +} #define MODULE_ALIAS_SCSI_DEVICE(type) \ MODULE_ALIAS("scsi:t-" __stringify(type) "*") diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index d21b8913ceb3..25071d5d9bf8 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -68,16 +68,15 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, extern int scsi_reset_provider(struct scsi_device *, int); struct scsi_eh_save { + /* saved state */ int result; enum dma_data_direction data_direction; unsigned char cmd_len; unsigned char cmnd[MAX_COMMAND_SIZE]; + struct scsi_data_buffer sdb; + struct request *next_rq; - void *buffer; - unsigned bufflen; - unsigned short use_sg; - int resid; - + /* new command support */ struct scatterlist sense_sgl; }; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 0fd4746ee39d..5c58d594126a 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -39,9 +39,6 @@ struct blk_queue_tags; #define DISABLE_CLUSTERING 0 #define ENABLE_CLUSTERING 1 -#define DISABLE_SG_CHAINING 0 -#define ENABLE_SG_CHAINING 1 - enum scsi_eh_timer_return { EH_NOT_HANDLED, EH_HANDLED, @@ -136,9 +133,9 @@ struct scsi_host_template { * the done callback is invoked. * * This is called to inform the LLD to transfer - * cmd->request_bufflen bytes. The cmd->use_sg speciefies the + * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the * number of scatterlist entried in the command and - * cmd->request_buffer contains the scatterlist. + * scsi_sglist(cmd) returns the scatterlist. * * return values: see queuecommand * @@ -446,15 +443,6 @@ struct scsi_host_template { unsigned ordered_tag:1; /* - * true if the low-level driver can support sg chaining. this - * will be removed eventually when all the drivers are - * converted to support sg chaining. - * - * Status: OBSOLETE - */ - unsigned use_sg_chaining:1; - - /* * Countdown for host blocking with no commands outstanding */ unsigned int max_host_blocked; @@ -598,7 +586,6 @@ struct Scsi_Host { unsigned unchecked_isa_dma:1; unsigned use_clustering:1; unsigned use_blk_tcq:1; - unsigned use_sg_chaining:1; /* * Host has requested that no further requests come through for the diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 7ff6199cbd55..404f11d331d6 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -118,7 +118,7 @@ struct iscsi_transport { char *data, uint32_t data_size); void (*get_stats) (struct iscsi_cls_conn *conn, struct iscsi_stats *stats); - void (*init_cmd_task) (struct iscsi_cmd_task *ctask); + int (*init_cmd_task) (struct iscsi_cmd_task *ctask); void (*init_mgmt_task) (struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask); int (*xmit_cmd_task) (struct iscsi_conn *conn, @@ -176,6 +176,7 @@ struct iscsi_cls_conn { #define ISCSI_STATE_TERMINATE 4 #define ISCSI_STATE_IN_RECOVERY 5 #define ISCSI_STATE_RECOVERY_FAILED 6 +#define ISCSI_STATE_LOGGING_OUT 7 struct iscsi_cls_session { struct list_head sess_list; /* item in session_list */ @@ -185,6 +186,7 @@ struct iscsi_cls_session { /* recovery fields */ int recovery_tmo; struct delayed_work recovery_work; + struct work_struct unbind_work; int target_id; @@ -205,6 +207,8 @@ struct iscsi_cls_session { struct iscsi_host { struct list_head sessions; struct mutex mutex; + struct workqueue_struct *unbind_workq; + char unbind_workq_name[KOBJ_NAME_LEN]; }; /* @@ -214,8 +218,8 @@ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport); extern int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id); -extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn); -extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn); +extern int iscsi_session_event(struct iscsi_cls_session *session, + enum iscsi_uevent_e event); extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *t, unsigned int target_id); diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index abdfd2e27dd7..09125fa95b93 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -4,23 +4,17 @@ #include <linux/transport_class.h> #include <linux/types.h> #include <linux/mutex.h> +#include <scsi/sas.h> struct scsi_transport_template; struct sas_rphy; struct request; enum sas_device_type { - SAS_PHY_UNUSED, - SAS_END_DEVICE, - SAS_EDGE_EXPANDER_DEVICE, - SAS_FANOUT_EXPANDER_DEVICE, -}; - -enum sas_protocol { - SAS_PROTOCOL_SATA = 0x01, - SAS_PROTOCOL_SMP = 0x02, - SAS_PROTOCOL_STP = 0x04, - SAS_PROTOCOL_SSP = 0x08, + SAS_PHY_UNUSED = 0, + SAS_END_DEVICE = 1, + SAS_EDGE_EXPANDER_DEVICE = 2, + SAS_FANOUT_EXPANDER_DEVICE = 3, }; static inline int sas_protocol_ata(enum sas_protocol proto) diff --git a/include/scsi/sd.h b/include/scsi/sd.h index f7513313ef0d..8ea9f7358ac1 100644 --- a/include/scsi/sd.h +++ b/include/scsi/sd.h @@ -41,6 +41,7 @@ struct scsi_disk { u32 index; u8 media_present; u8 write_prot; + unsigned previous_state : 1; unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ diff --git a/include/xen/page.h b/include/xen/page.h index c0c8fcb27899..031ef22a971e 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -156,16 +156,16 @@ static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) static inline unsigned long long pte_val_ma(pte_t x) { - return ((unsigned long long)x.pte_high << 32) | x.pte_low; + return x.pte; } #define pmd_val_ma(v) ((v).pmd) #define pud_val_ma(v) ((v).pgd.pgd) -#define __pte_ma(x) ((pte_t) { .pte_low = (x), .pte_high = (x)>>32 } ) +#define __pte_ma(x) ((pte_t) { .pte = (x) }) #define __pmd_ma(x) ((pmd_t) { (x) } ) #else /* !X86_PAE */ #define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT) #define mfn_pte(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) -#define pte_val_ma(x) ((x).pte_low) +#define pte_val_ma(x) ((x).pte) #define pmd_val_ma(v) ((v).pud.pgd.pgd) #define __pte_ma(x) ((pte_t) { (x) } ) #endif /* CONFIG_X86_PAE */ |